1// Copyright (c) 2005, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10//     * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14//     * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// ---
31// Author: Sanjay Ghemawat <opensource@google.com>
32//
33// A malloc that uses a per-thread cache to satisfy small malloc requests.
34// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
35//
36// See doc/tcmalloc.html for a high-level
37// description of how this malloc works.
38//
39// SYNCHRONIZATION
40//  1. The thread-specific lists are accessed without acquiring any locks.
41//     This is safe because each such list is only accessed by one thread.
42//  2. We have a lock per central free-list, and hold it while manipulating
43//     the central free list for a particular size.
44//  3. The central page allocator is protected by "pageheap_lock".
45//  4. The pagemap (which maps from page-number to descriptor),
46//     can be read without holding any locks, and written while holding
47//     the "pageheap_lock".
48//  5. To improve performance, a subset of the information one can get
49//     from the pagemap is cached in a data structure, pagemap_cache_,
50//     that atomically reads and writes its entries.  This cache can be
51//     read and written without locking.
52//
53//     This multi-threaded access to the pagemap is safe for fairly
54//     subtle reasons.  We basically assume that when an object X is
55//     allocated by thread A and deallocated by thread B, there must
56//     have been appropriate synchronization in the handoff of object
57//     X from thread A to thread B.  The same logic applies to pagemap_cache_.
58//
59// THE PAGEID-TO-SIZECLASS CACHE
60// Hot PageID-to-sizeclass mappings are held by pagemap_cache_.  If this cache
61// returns 0 for a particular PageID then that means "no information," not that
62// the sizeclass is 0.  The cache may have stale information for pages that do
63// not hold the beginning of any free()'able object.  Staleness is eliminated
64// in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
65// do_memalign() for all other relevant pages.
66//
67// PAGEMAP
68// -------
69// Page map contains a mapping from page id to Span.
70//
71// If Span s occupies pages [p..q],
72//      pagemap[p] == s
73//      pagemap[q] == s
74//      pagemap[p+1..q-1] are undefined
75//      pagemap[p-1] and pagemap[q+1] are defined:
76//         NULL if the corresponding page is not yet in the address space.
77//         Otherwise it points to a Span.  This span may be free
78//         or allocated.  If free, it is in one of pageheap's freelist.
79//
80// TODO: Bias reclamation to larger addresses
81// TODO: implement mallinfo/mallopt
82// TODO: Better testing
83//
84// 9/28/2003 (new page-level allocator replaces ptmalloc2):
85// * malloc/free of small objects goes from ~300 ns to ~50 ns.
86// * allocation of a reasonably complicated struct
87//   goes from about 1100 ns to about 300 ns.
88
89#include "config.h"
90#include <gperftools/tcmalloc.h>
91
92#include <errno.h>                      // for ENOMEM, EINVAL, errno
93#ifdef HAVE_SYS_CDEFS_H
94#include <sys/cdefs.h>                  // for __THROW
95#endif
96#if defined HAVE_STDINT_H
97#include <stdint.h>
98#elif defined HAVE_INTTYPES_H
99#include <inttypes.h>
100#else
101#include <sys/types.h>
102#endif
103#include <stddef.h>                     // for size_t, NULL
104#include <stdlib.h>                     // for getenv
105#include <string.h>                     // for strcmp, memset, strlen, etc
106#ifdef HAVE_UNISTD_H
107#include <unistd.h>                     // for getpagesize, write, etc
108#endif
109#include <algorithm>                    // for max, min
110#include <limits>                       // for numeric_limits
111#include <new>                          // for nothrow_t (ptr only), etc
112#include <vector>                       // for vector
113
114#include <gperftools/malloc_extension.h>
115#include <gperftools/malloc_hook.h>         // for MallocHook
116#include "base/basictypes.h"            // for int64
117#include "base/commandlineflags.h"      // for RegisterFlagValidator, etc
118#include "base/dynamic_annotations.h"   // for RunningOnValgrind
119#include "base/spinlock.h"              // for SpinLockHolder
120#include "central_freelist.h"  // for CentralFreeListPadded
121#include "common.h"            // for StackTrace, kPageShift, etc
122#include "free_list.h"         // for FL_Init
123#include "internal_logging.h"  // for ASSERT, TCMalloc_Printer, etc
124#include "malloc_hook-inl.h"       // for MallocHook::InvokeNewHook, etc
125#include "page_heap.h"         // for PageHeap, PageHeap::Stats
126#include "page_heap_allocator.h"  // for PageHeapAllocator
127#include "span.h"              // for Span, DLL_Prepend, etc
128#include "stack_trace_table.h"  // for StackTraceTable
129#include "static_vars.h"       // for Static
130#include "system-alloc.h"      // for DumpSystemAllocatorStats, etc
131#include "tcmalloc_guard.h"    // for TCMallocGuard
132#include "thread_cache.h"      // for ThreadCache
133
134#if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defined(WIN32_OVERRIDE_ALLOCATORS)
135# define WIN32_DO_PATCHING 1
136#endif
137
138// Some windows file somewhere (at least on cygwin) #define's small (!)
139// For instance, <windows.h> appears to have "#define small char".
140#undef small
141
142using STL_NAMESPACE::max;
143using STL_NAMESPACE::min;
144using STL_NAMESPACE::numeric_limits;
145using STL_NAMESPACE::vector;
146
147#include "libc_override.h"
148
149// __THROW is defined in glibc (via <sys/cdefs.h>).  It means,
150// counter-intuitively, "This function will never throw an exception."
151// It's an optional optimization tool, but we may need to use it to
152// match glibc prototypes.
153#ifndef __THROW    // I guess we're not on a glibc system
154# define __THROW   // __THROW is just an optimization, so ok to make it ""
155#endif
156
157using tcmalloc::AlignmentForSize;
158using tcmalloc::kLog;
159using tcmalloc::kCrash;
160using tcmalloc::kCrashWithStats;
161using tcmalloc::Log;
162using tcmalloc::PageHeap;
163using tcmalloc::PageHeapAllocator;
164using tcmalloc::SizeMap;
165using tcmalloc::Span;
166using tcmalloc::StackTrace;
167using tcmalloc::Static;
168using tcmalloc::ThreadCache;
169
170// ---- Functions doing validation with an extra mark.
171static size_t ExcludeSpaceForMark(size_t size);
172static void AddRoomForMark(size_t* size);
173static void ExcludeMarkFromSize(size_t* new_size);
174static void MarkAllocatedRegion(void* ptr);
175static void ValidateAllocatedRegion(void* ptr, size_t cl);
176// ---- End validation functions.
177
178DECLARE_int64(tcmalloc_sample_parameter);
179DECLARE_double(tcmalloc_release_rate);
180
181// For windows, the printf we use to report large allocs is
182// potentially dangerous: it could cause a malloc that would cause an
183// infinite loop.  So by default we set the threshold to a huge number
184// on windows, so this bad situation will never trigger.  You can
185// always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
186// want this functionality.
187#ifdef _WIN32
188const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 62;
189#else
190const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 30;
191#endif
192DEFINE_int64(tcmalloc_large_alloc_report_threshold,
193             EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD",
194                        kDefaultLargeAllocReportThreshold),
195             "Allocations larger than this value cause a stack "
196             "trace to be dumped to stderr.  The threshold for "
197             "dumping stack traces is increased by a factor of 1.125 "
198             "every time we print a message so that the threshold "
199             "automatically goes up by a factor of ~1000 every 60 "
200             "messages.  This bounds the amount of extra logging "
201             "generated by this flag.  Default value of this flag "
202             "is very large and therefore you should see no extra "
203             "logging unless the flag is overridden.  Set to 0 to "
204             "disable reporting entirely.");
205
206
207// We already declared these functions in tcmalloc.h, but we have to
208// declare them again to give them an ATTRIBUTE_SECTION: we want to
209// put all callers of MallocHook::Invoke* in this module into
210// ATTRIBUTE_SECTION(google_malloc) section, so that
211// MallocHook::GetCallerStackTrace can function accurately.
212extern "C" {
213  void* tc_malloc(size_t size) __THROW
214      ATTRIBUTE_SECTION(google_malloc);
215  void tc_free(void* ptr) __THROW
216      ATTRIBUTE_SECTION(google_malloc);
217  void* tc_realloc(void* ptr, size_t size) __THROW
218      ATTRIBUTE_SECTION(google_malloc);
219  void* tc_calloc(size_t nmemb, size_t size) __THROW
220      ATTRIBUTE_SECTION(google_malloc);
221  void tc_cfree(void* ptr) __THROW
222      ATTRIBUTE_SECTION(google_malloc);
223
224  void* tc_memalign(size_t __alignment, size_t __size) __THROW
225      ATTRIBUTE_SECTION(google_malloc);
226  int tc_posix_memalign(void** ptr, size_t align, size_t size) __THROW
227      ATTRIBUTE_SECTION(google_malloc);
228  void* tc_valloc(size_t __size) __THROW
229      ATTRIBUTE_SECTION(google_malloc);
230  void* tc_pvalloc(size_t __size) __THROW
231      ATTRIBUTE_SECTION(google_malloc);
232
233  void tc_malloc_stats(void) __THROW
234      ATTRIBUTE_SECTION(google_malloc);
235  int tc_mallopt(int cmd, int value) __THROW
236      ATTRIBUTE_SECTION(google_malloc);
237#ifdef HAVE_STRUCT_MALLINFO
238  struct mallinfo tc_mallinfo(void) __THROW
239      ATTRIBUTE_SECTION(google_malloc);
240#endif
241
242  void* tc_new(size_t size)
243      ATTRIBUTE_SECTION(google_malloc);
244  void tc_delete(void* p) __THROW
245      ATTRIBUTE_SECTION(google_malloc);
246  void* tc_newarray(size_t size)
247      ATTRIBUTE_SECTION(google_malloc);
248  void tc_deletearray(void* p) __THROW
249      ATTRIBUTE_SECTION(google_malloc);
250
251  // And the nothrow variants of these:
252  void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW
253      ATTRIBUTE_SECTION(google_malloc);
254  void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
255      ATTRIBUTE_SECTION(google_malloc);
256  // Surprisingly, standard C++ library implementations use a
257  // nothrow-delete internally.  See, eg:
258  // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html
259  void tc_delete_nothrow(void* ptr, const std::nothrow_t&) __THROW
260      ATTRIBUTE_SECTION(google_malloc);
261  void tc_deletearray_nothrow(void* ptr, const std::nothrow_t&) __THROW
262      ATTRIBUTE_SECTION(google_malloc);
263
264  // Some non-standard extensions that we support.
265
266  // This is equivalent to
267  //    OS X: malloc_size()
268  //    glibc: malloc_usable_size()
269  //    Windows: _msize()
270  size_t tc_malloc_size(void* p) __THROW
271      ATTRIBUTE_SECTION(google_malloc);
272
273  void* tc_malloc_skip_new_handler(size_t size)
274      ATTRIBUTE_SECTION(google_malloc);
275}  // extern "C"
276
277
278// ----------------------- IMPLEMENTATION -------------------------------
279
280static int tc_new_mode = 0;  // See tc_set_new_mode().
281
282// Routines such as free() and realloc() catch some erroneous pointers
283// passed to them, and invoke the below when they do.  (An erroneous pointer
284// won't be caught if it's within a valid span or a stale span for which
285// the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing
286// required) kind of exception handling for these routines.
287namespace {
288void InvalidFree(void* ptr) {
289  Log(kCrash, __FILE__, __LINE__, "Attempt to free invalid pointer", ptr);
290}
291
292size_t InvalidGetSizeForRealloc(const void* old_ptr) {
293  Log(kCrash, __FILE__, __LINE__,
294      "Attempt to realloc invalid pointer", old_ptr);
295  return 0;
296}
297
298size_t InvalidGetAllocatedSize(const void* ptr) {
299  Log(kCrash, __FILE__, __LINE__,
300      "Attempt to get the size of an invalid pointer", ptr);
301  return 0;
302}
303
304// For security reasons, we want to limit the size of allocations.
305// See crbug.com/169327.
306inline bool IsAllocSizePermitted(size_t alloc_size) {
307  // Never allow an allocation larger than what can be indexed via an int.
308  // Remove kPageSize to account for various rounding, padding and to have a
309  // small margin.
310  return alloc_size <= ((std::numeric_limits<int>::max)() - kPageSize);
311}
312
313}  // unnamed namespace
314
315// Extract interesting stats
316struct TCMallocStats {
317  uint64_t thread_bytes;      // Bytes in thread caches
318  uint64_t central_bytes;     // Bytes in central cache
319  uint64_t transfer_bytes;    // Bytes in central transfer cache
320  uint64_t metadata_bytes;    // Bytes alloced for metadata
321  uint64_t metadata_unmapped_bytes;    // Address space reserved for metadata
322                                       // but is not committed.
323  PageHeap::Stats pageheap;   // Stats from page heap
324};
325
326// Get stats into "r".  Also get per-size-class counts if class_count != NULL
327static void ExtractStats(TCMallocStats* r, uint64_t* class_count,
328                         PageHeap::SmallSpanStats* small_spans,
329                         PageHeap::LargeSpanStats* large_spans) {
330  r->central_bytes = 0;
331  r->transfer_bytes = 0;
332  for (int cl = 0; cl < kNumClasses; ++cl) {
333    const int length = Static::central_cache()[cl].length();
334    const int tc_length = Static::central_cache()[cl].tc_length();
335    const size_t cache_overhead = Static::central_cache()[cl].OverheadBytes();
336    const size_t size = static_cast<uint64_t>(
337        Static::sizemap()->ByteSizeForClass(cl));
338    r->central_bytes += (size * length) + cache_overhead;
339    r->transfer_bytes += (size * tc_length);
340    if (class_count) class_count[cl] = length + tc_length;
341  }
342
343  // Add stats from per-thread heaps
344  r->thread_bytes = 0;
345  { // scope
346    SpinLockHolder h(Static::pageheap_lock());
347    ThreadCache::GetThreadStats(&r->thread_bytes, class_count);
348    r->metadata_bytes = tcmalloc::metadata_system_bytes();
349    r->metadata_unmapped_bytes = tcmalloc::metadata_unmapped_bytes();
350    r->pageheap = Static::pageheap()->stats();
351    if (small_spans != NULL) {
352      Static::pageheap()->GetSmallSpanStats(small_spans);
353    }
354    if (large_spans != NULL) {
355      Static::pageheap()->GetLargeSpanStats(large_spans);
356    }
357  }
358}
359
360static double PagesToMiB(uint64_t pages) {
361  return (pages << kPageShift) / 1048576.0;
362}
363
364// WRITE stats to "out"
365static void DumpStats(TCMalloc_Printer* out, int level) {
366  TCMallocStats stats;
367  uint64_t class_count[kNumClasses];
368  PageHeap::SmallSpanStats small;
369  PageHeap::LargeSpanStats large;
370  if (level >= 2) {
371    ExtractStats(&stats, class_count, &small, &large);
372  } else {
373    ExtractStats(&stats, NULL, NULL, NULL);
374  }
375
376  static const double MiB = 1048576.0;
377
378  const uint64_t physical_memory_used_by_metadata =
379      stats.metadata_bytes - stats.metadata_unmapped_bytes;
380  const uint64_t unmapped_bytes =
381      stats.pageheap.unmapped_bytes + stats.metadata_unmapped_bytes;
382
383  const uint64_t virtual_memory_used = (stats.pageheap.system_bytes
384                                        + stats.metadata_bytes);
385  const uint64_t physical_memory_used = virtual_memory_used - unmapped_bytes;
386  const uint64_t bytes_in_use_by_app = (physical_memory_used
387                                        - physical_memory_used_by_metadata
388                                        - stats.pageheap.free_bytes
389                                        - stats.central_bytes
390                                        - stats.transfer_bytes
391                                        - stats.thread_bytes);
392
393  out->printf(
394      "WASTE:   %7.1f MiB bytes in use\n"
395      "WASTE: + %7.1f MiB committed but not used\n"
396      "WASTE:   ------------\n"
397      "WASTE: = %7.1f MiB bytes committed\n"
398      "WASTE: committed/used ratio of %f\n",
399      bytes_in_use_by_app / MiB,
400      (stats.pageheap.committed_bytes - bytes_in_use_by_app) / MiB,
401      stats.pageheap.committed_bytes / MiB,
402      stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use_by_app)
403      );
404#ifdef TCMALLOC_SMALL_BUT_SLOW
405  out->printf(
406      "NOTE:  SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n");
407#endif
408  out->printf(
409      "------------------------------------------------\n"
410      "MALLOC:   %12" PRIu64 " (%7.1f MiB) Bytes in use by application\n"
411      "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n"
412      "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n"
413      "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n"
414      "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n"
415      "MALLOC:   ------------\n"
416      "MALLOC: = %12" PRIu64 " (%7.1f MiB) Bytes committed\n"
417      "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n"
418      "MALLOC:   ------------\n"
419      "MALLOC: = %12" PRIu64 " (%7.1f MiB) Actual memory used (physical + swap)\n"
420      "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes released to OS (aka unmapped)\n"
421      "MALLOC:   ------------\n"
422      "MALLOC: = %12" PRIu64 " (%7.1f MiB) Virtual address space used\n"
423      "MALLOC:\n"
424      "MALLOC:   %12" PRIu64 "              Spans in use\n"
425      "MALLOC:   %12" PRIu64 "              Thread heaps in use\n"
426      "MALLOC:   %12" PRIu64 "              Tcmalloc page size\n"
427      "------------------------------------------------\n"
428      "Call ReleaseFreeMemory() to release freelist memory to the OS"
429      " (via madvise()).\n"
430      "Bytes released to the OS take up virtual address space"
431      " but no physical memory.\n",
432      bytes_in_use_by_app, bytes_in_use_by_app / MiB,
433      stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB,
434      stats.central_bytes, stats.central_bytes / MiB,
435      stats.transfer_bytes, stats.transfer_bytes / MiB,
436      stats.thread_bytes, stats.thread_bytes / MiB,
437      stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / MiB,
438      physical_memory_used_by_metadata , physical_memory_used_by_metadata / MiB,
439      physical_memory_used, physical_memory_used / MiB,
440      unmapped_bytes, unmapped_bytes / MiB,
441      virtual_memory_used, virtual_memory_used / MiB,
442      uint64_t(Static::span_allocator()->inuse()),
443      uint64_t(ThreadCache::HeapsInUse()),
444      uint64_t(kPageSize));
445
446  if (level >= 2) {
447    out->printf("------------------------------------------------\n");
448    out->printf("Size class breakdown\n");
449    out->printf("------------------------------------------------\n");
450    uint64_t cumulative = 0;
451    for (int cl = 0; cl < kNumClasses; ++cl) {
452      if (class_count[cl] > 0) {
453        uint64_t class_bytes =
454            class_count[cl] * Static::sizemap()->ByteSizeForClass(cl);
455        cumulative += class_bytes;
456        out->printf("class %3d [ %8" PRIuS " bytes ] : "
457                "%8" PRIu64 " objs; %5.1f MiB; %5.1f cum MiB\n",
458                cl, Static::sizemap()->ByteSizeForClass(cl),
459                class_count[cl],
460                class_bytes / MiB,
461                cumulative / MiB);
462      }
463    }
464
465    // append page heap info
466    int nonempty_sizes = 0;
467    for (int s = 0; s < kMaxPages; s++) {
468      if (small.normal_length[s] + small.returned_length[s] > 0) {
469        nonempty_sizes++;
470      }
471    }
472    out->printf("------------------------------------------------\n");
473    out->printf("PageHeap: %d sizes; %6.1f MiB free; %6.1f MiB unmapped\n",
474                nonempty_sizes, stats.pageheap.free_bytes / MiB,
475                stats.pageheap.unmapped_bytes / MiB);
476    out->printf("------------------------------------------------\n");
477    uint64_t total_normal = 0;
478    uint64_t total_returned = 0;
479    for (int s = 0; s < kMaxPages; s++) {
480      const int n_length = small.normal_length[s];
481      const int r_length = small.returned_length[s];
482      if (n_length + r_length > 0) {
483        uint64_t n_pages = s * n_length;
484        uint64_t r_pages = s * r_length;
485        total_normal += n_pages;
486        total_returned += r_pages;
487        out->printf("%6u pages * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
488                    "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
489                    s,
490                    (n_length + r_length),
491                    PagesToMiB(n_pages + r_pages),
492                    PagesToMiB(total_normal + total_returned),
493                    PagesToMiB(r_pages),
494                    PagesToMiB(total_returned));
495      }
496    }
497
498    total_normal += large.normal_pages;
499    total_returned += large.returned_pages;
500    out->printf(">255   large * %6u spans ~ %6.1f MiB; %6.1f MiB cum"
501                "; unmapped: %6.1f MiB; %6.1f MiB cum\n",
502                static_cast<unsigned int>(large.spans),
503                PagesToMiB(large.normal_pages + large.returned_pages),
504                PagesToMiB(total_normal + total_returned),
505                PagesToMiB(large.returned_pages),
506                PagesToMiB(total_returned));
507  }
508}
509
510static void PrintStats(int level) {
511  const int kBufferSize = 16 << 10;
512  char* buffer = new char[kBufferSize];
513  TCMalloc_Printer printer(buffer, kBufferSize);
514  DumpStats(&printer, level);
515  write(STDERR_FILENO, buffer, strlen(buffer));
516  delete[] buffer;
517}
518
519static void** DumpHeapGrowthStackTraces() {
520  // Count how much space we need
521  int needed_slots = 0;
522  {
523    SpinLockHolder h(Static::pageheap_lock());
524    for (StackTrace* t = Static::growth_stacks();
525         t != NULL;
526         t = reinterpret_cast<StackTrace*>(
527             t->stack[tcmalloc::kMaxStackDepth-1])) {
528      needed_slots += 3 + t->depth;
529    }
530    needed_slots += 100;            // Slop in case list grows
531    needed_slots += needed_slots/8; // An extra 12.5% slop
532  }
533
534  void** result = new void*[needed_slots];
535  if (result == NULL) {
536    Log(kLog, __FILE__, __LINE__,
537        "tcmalloc: allocation failed for stack trace slots",
538        needed_slots * sizeof(*result));
539    return NULL;
540  }
541
542  SpinLockHolder h(Static::pageheap_lock());
543  int used_slots = 0;
544  for (StackTrace* t = Static::growth_stacks();
545       t != NULL;
546       t = reinterpret_cast<StackTrace*>(
547           t->stack[tcmalloc::kMaxStackDepth-1])) {
548    ASSERT(used_slots < needed_slots);  // Need to leave room for terminator
549    if (used_slots + 3 + t->depth >= needed_slots) {
550      // No more room
551      break;
552    }
553
554    result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
555    result[used_slots+1] = reinterpret_cast<void*>(t->size);
556    result[used_slots+2] = reinterpret_cast<void*>(t->depth);
557    for (int d = 0; d < t->depth; d++) {
558      result[used_slots+3+d] = t->stack[d];
559    }
560    used_slots += 3 + t->depth;
561  }
562  result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
563  return result;
564}
565
566static void IterateOverRanges(void* arg, MallocExtension::RangeFunction func) {
567  PageID page = 1;  // Some code may assume that page==0 is never used
568  bool done = false;
569  while (!done) {
570    // Accumulate a small number of ranges in a local buffer
571    static const int kNumRanges = 16;
572    static base::MallocRange ranges[kNumRanges];
573    int n = 0;
574    {
575      SpinLockHolder h(Static::pageheap_lock());
576      while (n < kNumRanges) {
577        if (!Static::pageheap()->GetNextRange(page, &ranges[n])) {
578          done = true;
579          break;
580        } else {
581          uintptr_t limit = ranges[n].address + ranges[n].length;
582          page = (limit + kPageSize - 1) >> kPageShift;
583          n++;
584        }
585      }
586    }
587
588    for (int i = 0; i < n; i++) {
589      (*func)(arg, &ranges[i]);
590    }
591  }
592}
593
594// TCMalloc's support for extra malloc interfaces
595class TCMallocImplementation : public MallocExtension {
596 private:
597  // ReleaseToSystem() might release more than the requested bytes because
598  // the page heap releases at the span granularity, and spans are of wildly
599  // different sizes.  This member keeps track of the extra bytes bytes
600  // released so that the app can periodically call ReleaseToSystem() to
601  // release memory at a constant rate.
602  // NOTE: Protected by Static::pageheap_lock().
603  size_t extra_bytes_released_;
604
605 public:
606  TCMallocImplementation()
607      : extra_bytes_released_(0) {
608  }
609
610  virtual void GetStats(char* buffer, int buffer_length) {
611    ASSERT(buffer_length > 0);
612    TCMalloc_Printer printer(buffer, buffer_length);
613
614    // Print level one stats unless lots of space is available
615    if (buffer_length < 10000) {
616      DumpStats(&printer, 1);
617    } else {
618      DumpStats(&printer, 2);
619    }
620  }
621
622  // We may print an extra, tcmalloc-specific warning message here.
623  virtual void GetHeapSample(MallocExtensionWriter* writer) {
624    if (FLAGS_tcmalloc_sample_parameter == 0) {
625      const char* const kWarningMsg =
626          "%warn\n"
627          "%warn This heap profile does not have any data in it, because\n"
628          "%warn the application was run with heap sampling turned off.\n"
629          "%warn To get useful data from GetHeapSample(), you must\n"
630          "%warn set the environment variable TCMALLOC_SAMPLE_PARAMETER to\n"
631          "%warn a positive sampling period, such as 524288.\n"
632          "%warn\n";
633      writer->append(kWarningMsg, strlen(kWarningMsg));
634    }
635    MallocExtension::GetHeapSample(writer);
636  }
637
638  virtual void** ReadStackTraces(int* sample_period) {
639    tcmalloc::StackTraceTable table;
640    {
641      SpinLockHolder h(Static::pageheap_lock());
642      Span* sampled = Static::sampled_objects();
643      for (Span* s = sampled->next; s != sampled; s = s->next) {
644        table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects));
645      }
646    }
647    *sample_period = ThreadCache::GetCache()->GetSamplePeriod();
648    return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock
649  }
650
651  virtual void** ReadHeapGrowthStackTraces() {
652    return DumpHeapGrowthStackTraces();
653  }
654
655  virtual void Ranges(void* arg, RangeFunction func) {
656    IterateOverRanges(arg, func);
657  }
658
659  virtual bool GetNumericProperty(const char* name, size_t* value) {
660    ASSERT(name != NULL);
661
662    if (strcmp(name, "generic.current_allocated_bytes") == 0) {
663      TCMallocStats stats;
664      ExtractStats(&stats, NULL, NULL, NULL);
665      *value = stats.pageheap.system_bytes
666               - stats.thread_bytes
667               - stats.central_bytes
668               - stats.transfer_bytes
669               - stats.pageheap.free_bytes
670               - stats.pageheap.unmapped_bytes;
671      return true;
672    }
673
674    if (strcmp(name, "generic.heap_size") == 0) {
675      TCMallocStats stats;
676      ExtractStats(&stats, NULL, NULL, NULL);
677      *value = stats.pageheap.system_bytes;
678      return true;
679    }
680
681    if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
682      // Kept for backwards compatibility.  Now defined externally as:
683      //    pageheap_free_bytes + pageheap_unmapped_bytes.
684      SpinLockHolder l(Static::pageheap_lock());
685      PageHeap::Stats stats = Static::pageheap()->stats();
686      *value = stats.free_bytes + stats.unmapped_bytes;
687      return true;
688    }
689
690    if (strcmp(name, "tcmalloc.pageheap_free_bytes") == 0) {
691      SpinLockHolder l(Static::pageheap_lock());
692      *value = Static::pageheap()->stats().free_bytes;
693      return true;
694    }
695
696    if (strcmp(name, "tcmalloc.pageheap_unmapped_bytes") == 0) {
697      SpinLockHolder l(Static::pageheap_lock());
698      *value = Static::pageheap()->stats().unmapped_bytes;
699      return true;
700    }
701
702    if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
703      SpinLockHolder l(Static::pageheap_lock());
704      *value = ThreadCache::overall_thread_cache_size();
705      return true;
706    }
707
708    if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
709      TCMallocStats stats;
710      ExtractStats(&stats, NULL, NULL, NULL);
711      *value = stats.thread_bytes;
712      return true;
713    }
714
715    return false;
716  }
717
718  virtual bool SetNumericProperty(const char* name, size_t value) {
719    ASSERT(name != NULL);
720
721    if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
722      SpinLockHolder l(Static::pageheap_lock());
723      ThreadCache::set_overall_thread_cache_size(value);
724      return true;
725    }
726
727    return false;
728  }
729
730  virtual void MarkThreadIdle() {
731    ThreadCache::BecomeIdle();
732  }
733
734  virtual void MarkThreadBusy();  // Implemented below
735
736  virtual SysAllocator* GetSystemAllocator() {
737    SpinLockHolder h(Static::pageheap_lock());
738    return sys_alloc;
739  }
740
741  virtual void SetSystemAllocator(SysAllocator* alloc) {
742    SpinLockHolder h(Static::pageheap_lock());
743    sys_alloc = alloc;
744  }
745
746  virtual void ReleaseToSystem(size_t num_bytes) {
747    SpinLockHolder h(Static::pageheap_lock());
748    if (num_bytes <= extra_bytes_released_) {
749      // We released too much on a prior call, so don't release any
750      // more this time.
751      extra_bytes_released_ = extra_bytes_released_ - num_bytes;
752      return;
753    }
754    num_bytes = num_bytes - extra_bytes_released_;
755    // num_bytes might be less than one page.  If we pass zero to
756    // ReleaseAtLeastNPages, it won't do anything, so we release a whole
757    // page now and let extra_bytes_released_ smooth it out over time.
758    Length num_pages = max<Length>(num_bytes >> kPageShift, 1);
759    size_t bytes_released = Static::pageheap()->ReleaseAtLeastNPages(
760        num_pages) << kPageShift;
761    if (bytes_released > num_bytes) {
762      extra_bytes_released_ = bytes_released - num_bytes;
763    } else {
764      // The PageHeap wasn't able to release num_bytes.  Don't try to
765      // compensate with a big release next time.  Specifically,
766      // ReleaseFreeMemory() calls ReleaseToSystem(LONG_MAX).
767      extra_bytes_released_ = 0;
768    }
769  }
770
771  virtual void SetMemoryReleaseRate(double rate) {
772    FLAGS_tcmalloc_release_rate = rate;
773  }
774
775  virtual double GetMemoryReleaseRate() {
776    return FLAGS_tcmalloc_release_rate;
777  }
778  virtual size_t GetEstimatedAllocatedSize(size_t size) {
779    if (size <= kMaxSize) {
780      const size_t cl = Static::sizemap()->SizeClass(size);
781      const size_t alloc_size = Static::sizemap()->ByteSizeForClass(cl);
782      return alloc_size;
783    } else {
784      return tcmalloc::pages(size) << kPageShift;
785    }
786  }
787
788  // This just calls GetSizeWithCallback, but because that's in an
789  // unnamed namespace, we need to move the definition below it in the
790  // file.
791  virtual size_t GetAllocatedSize(const void* ptr);
792
793  // This duplicates some of the logic in GetSizeWithCallback, but is
794  // faster.  This is important on OS X, where this function is called
795  // on every allocation operation.
796  virtual Ownership GetOwnership(const void* ptr) {
797    const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
798    // The rest of tcmalloc assumes that all allocated pointers use at
799    // most kAddressBits bits.  If ptr doesn't, then it definitely
800    // wasn't alloacted by tcmalloc.
801    if ((p >> (kAddressBits - kPageShift)) > 0) {
802      return kNotOwned;
803    }
804    size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
805    if (cl != 0) {
806      return kOwned;
807    }
808    const Span *span = Static::pageheap()->GetDescriptor(p);
809    return span ? kOwned : kNotOwned;
810  }
811
812  virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) {
813    static const char* kCentralCacheType = "tcmalloc.central";
814    static const char* kTransferCacheType = "tcmalloc.transfer";
815    static const char* kThreadCacheType = "tcmalloc.thread";
816    static const char* kPageHeapType = "tcmalloc.page";
817    static const char* kPageHeapUnmappedType = "tcmalloc.page_unmapped";
818    static const char* kLargeSpanType = "tcmalloc.large";
819    static const char* kLargeUnmappedSpanType = "tcmalloc.large_unmapped";
820
821    v->clear();
822
823    // central class information
824    int64 prev_class_size = 0;
825    for (int cl = 1; cl < kNumClasses; ++cl) {
826      size_t class_size = Static::sizemap()->ByteSizeForClass(cl);
827      MallocExtension::FreeListInfo i;
828      i.min_object_size = prev_class_size + 1;
829      i.max_object_size = class_size;
830      i.total_bytes_free =
831          Static::central_cache()[cl].length() * class_size;
832      i.type = kCentralCacheType;
833      v->push_back(i);
834
835      // transfer cache
836      i.total_bytes_free =
837          Static::central_cache()[cl].tc_length() * class_size;
838      i.type = kTransferCacheType;
839      v->push_back(i);
840
841      prev_class_size = Static::sizemap()->ByteSizeForClass(cl);
842    }
843
844    // Add stats from per-thread heaps
845    uint64_t class_count[kNumClasses];
846    memset(class_count, 0, sizeof(class_count));
847    {
848      SpinLockHolder h(Static::pageheap_lock());
849      uint64_t thread_bytes = 0;
850      ThreadCache::GetThreadStats(&thread_bytes, class_count);
851    }
852
853    prev_class_size = 0;
854    for (int cl = 1; cl < kNumClasses; ++cl) {
855      MallocExtension::FreeListInfo i;
856      i.min_object_size = prev_class_size + 1;
857      i.max_object_size = Static::sizemap()->ByteSizeForClass(cl);
858      i.total_bytes_free =
859          class_count[cl] * Static::sizemap()->ByteSizeForClass(cl);
860      i.type = kThreadCacheType;
861      v->push_back(i);
862    }
863
864    // append page heap info
865    PageHeap::SmallSpanStats small;
866    PageHeap::LargeSpanStats large;
867    {
868      SpinLockHolder h(Static::pageheap_lock());
869      Static::pageheap()->GetSmallSpanStats(&small);
870      Static::pageheap()->GetLargeSpanStats(&large);
871    }
872
873    // large spans: mapped
874    MallocExtension::FreeListInfo span_info;
875    span_info.type = kLargeSpanType;
876    span_info.max_object_size = (numeric_limits<size_t>::max)();
877    span_info.min_object_size = kMaxPages << kPageShift;
878    span_info.total_bytes_free = large.normal_pages << kPageShift;
879    v->push_back(span_info);
880
881    // large spans: unmapped
882    span_info.type = kLargeUnmappedSpanType;
883    span_info.total_bytes_free = large.returned_pages << kPageShift;
884    v->push_back(span_info);
885
886    // small spans
887    for (int s = 1; s < kMaxPages; s++) {
888      MallocExtension::FreeListInfo i;
889      i.max_object_size = (s << kPageShift);
890      i.min_object_size = ((s - 1) << kPageShift);
891
892      i.type = kPageHeapType;
893      i.total_bytes_free = (s << kPageShift) * small.normal_length[s];
894      v->push_back(i);
895
896      i.type = kPageHeapUnmappedType;
897      i.total_bytes_free = (s << kPageShift) * small.returned_length[s];
898      v->push_back(i);
899    }
900  }
901};
902
903// The constructor allocates an object to ensure that initialization
904// runs before main(), and therefore we do not have a chance to become
905// multi-threaded before initialization.  We also create the TSD key
906// here.  Presumably by the time this constructor runs, glibc is in
907// good enough shape to handle pthread_key_create().
908//
909// The constructor also takes the opportunity to tell STL to use
910// tcmalloc.  We want to do this early, before construct time, so
911// all user STL allocations go through tcmalloc (which works really
912// well for STL).
913//
914// The destructor prints stats when the program exits.
915static int tcmallocguard_refcount = 0;  // no lock needed: runs before main()
916TCMallocGuard::TCMallocGuard() {
917  if (tcmallocguard_refcount++ == 0) {
918#ifdef HAVE_TLS    // this is true if the cc/ld/libc combo support TLS
919    // Check whether the kernel also supports TLS (needs to happen at runtime)
920    tcmalloc::CheckIfKernelSupportsTLS();
921#endif
922    ReplaceSystemAlloc();    // defined in libc_override_*.h
923    tc_free(tc_malloc(1));
924    ThreadCache::InitTSD();
925    tc_free(tc_malloc(1));
926    // Either we, or debugallocation.cc, or valgrind will control memory
927    // management.  We register our extension if we're the winner.
928#ifdef TCMALLOC_USING_DEBUGALLOCATION
929    // Let debugallocation register its extension.
930#else
931    if (RunningOnValgrind()) {
932      // Let Valgrind uses its own malloc (so don't register our extension).
933    } else {
934      MallocExtension::Register(new TCMallocImplementation);
935    }
936#endif
937  }
938}
939
940TCMallocGuard::~TCMallocGuard() {
941  if (--tcmallocguard_refcount == 0) {
942    const char* env = getenv("MALLOCSTATS");
943    if (env != NULL) {
944      int level = atoi(env);
945      if (level < 1) level = 1;
946      PrintStats(level);
947    }
948  }
949}
950#ifndef WIN32_OVERRIDE_ALLOCATORS
951static TCMallocGuard module_enter_exit_hook;
952#endif
953
954//-------------------------------------------------------------------
955// Helpers for the exported routines below
956//-------------------------------------------------------------------
957
958static inline bool CheckCachedSizeClass(void *ptr) {
959  PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
960  size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p);
961  return cached_value == 0 ||
962      cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass;
963}
964
965static inline void* CheckMallocResult(void *result) {
966  ASSERT(result == NULL || CheckCachedSizeClass(result));
967  MarkAllocatedRegion(result);
968  return result;
969}
970
971static inline void* SpanToMallocResult(Span *span) {
972  Static::pageheap()->CacheSizeClass(span->start, 0);
973  return
974      CheckMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
975}
976
977static void* DoSampledAllocation(size_t size) {
978  // Grab the stack trace outside the heap lock
979  StackTrace tmp;
980  tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
981  tmp.size = size;
982
983  SpinLockHolder h(Static::pageheap_lock());
984  // Allocate span
985  Span *span = Static::pageheap()->New(tcmalloc::pages(size == 0 ? 1 : size));
986  if (span == NULL) {
987    return NULL;
988  }
989
990  // Allocate stack trace
991  StackTrace *stack = Static::stacktrace_allocator()->New();
992  if (stack == NULL) {
993    // Sampling failed because of lack of memory
994    return span;
995  }
996  *stack = tmp;
997  span->sample = 1;
998  span->objects = stack;
999  tcmalloc::DLL_Prepend(Static::sampled_objects(), span);
1000
1001  return SpanToMallocResult(span);
1002}
1003
1004namespace {
1005
1006// Copy of FLAGS_tcmalloc_large_alloc_report_threshold with
1007// automatic increases factored in.
1008static int64_t large_alloc_threshold =
1009  (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold
1010   ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold);
1011
1012static void ReportLargeAlloc(Length num_pages, void* result) {
1013  StackTrace stack;
1014  stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1);
1015
1016  static const int N = 1000;
1017  char buffer[N];
1018  TCMalloc_Printer printer(buffer, N);
1019  printer.printf("tcmalloc: large alloc %" PRIu64 " bytes == %p @ ",
1020                 static_cast<uint64>(num_pages) << kPageShift,
1021                 result);
1022  for (int i = 0; i < stack.depth; i++) {
1023    printer.printf(" %p", stack.stack[i]);
1024  }
1025  printer.printf("\n");
1026  write(STDERR_FILENO, buffer, strlen(buffer));
1027}
1028
1029inline void* cpp_alloc(size_t size, bool nothrow);
1030inline void* do_malloc(size_t size);
1031
1032// TODO(willchan): Investigate whether or not inlining this much is harmful to
1033// performance.
1034// This is equivalent to do_malloc() except when tc_new_mode is set to true.
1035// Otherwise, it will run the std::new_handler if set.
1036inline void* do_malloc_or_cpp_alloc(size_t size) {
1037  return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size);
1038}
1039
1040void* cpp_memalign(size_t align, size_t size);
1041void* do_memalign(size_t align, size_t size);
1042
1043inline void* do_memalign_or_cpp_memalign(size_t align, size_t size) {
1044  return tc_new_mode ? cpp_memalign(align, size) : do_memalign(align, size);
1045}
1046
1047// Must be called with the page lock held.
1048inline bool should_report_large(Length num_pages) {
1049  const int64 threshold = large_alloc_threshold;
1050  if (threshold > 0 && num_pages >= (threshold >> kPageShift)) {
1051    // Increase the threshold by 1/8 every time we generate a report.
1052    // We cap the threshold at 8GiB to avoid overflow problems.
1053    large_alloc_threshold = (threshold + threshold/8 < 8ll<<30
1054                             ? threshold + threshold/8 : 8ll<<30);
1055    return true;
1056  }
1057  return false;
1058}
1059
1060// Helper for do_malloc().
1061inline void* do_malloc_pages(ThreadCache* heap, size_t size) {
1062  void* result;
1063  bool report_large;
1064
1065  Length num_pages = tcmalloc::pages(size);
1066  size = num_pages << kPageShift;
1067
1068  // Chromium profiling.  Measurements in March 2013 suggest this
1069  // imposes a small enough runtime cost that there's no reason to
1070  // try to optimize it.
1071  heap->AddToByteAllocatedTotal(size);
1072
1073  if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
1074    result = DoSampledAllocation(size);
1075
1076    SpinLockHolder h(Static::pageheap_lock());
1077    report_large = should_report_large(num_pages);
1078  } else {
1079    SpinLockHolder h(Static::pageheap_lock());
1080    Span* span = Static::pageheap()->New(num_pages);
1081    result = (span == NULL ? NULL : SpanToMallocResult(span));
1082    report_large = should_report_large(num_pages);
1083  }
1084
1085  if (report_large) {
1086    ReportLargeAlloc(num_pages, result);
1087  }
1088  return result;
1089}
1090
1091inline void* do_malloc(size_t size) {
1092  AddRoomForMark(&size);
1093
1094  void* ret = NULL;
1095
1096  // The following call forces module initialization
1097  ThreadCache* heap = ThreadCache::GetCache();
1098  if (size <= kMaxSize && IsAllocSizePermitted(size)) {
1099    size_t cl = Static::sizemap()->SizeClass(size);
1100    size = Static::sizemap()->class_to_size(cl);
1101
1102    // Chromium profiling.  Measurements in March 2013 suggest this
1103    // imposes a small enough runtime cost that there's no reason to
1104    // try to optimize it.
1105    heap->AddToByteAllocatedTotal(size);
1106
1107    if ((FLAGS_tcmalloc_sample_parameter > 0) &&
1108        heap->SampleAllocation(size)) {
1109      ret = DoSampledAllocation(size);
1110      MarkAllocatedRegion(ret);
1111    } else {
1112      // The common case, and also the simplest.  This just pops the
1113      // size-appropriate freelist, after replenishing it if it's empty.
1114      ret = CheckMallocResult(heap->Allocate(size, cl));
1115    }
1116  } else if (IsAllocSizePermitted(size)) {
1117    ret = do_malloc_pages(heap, size);
1118    MarkAllocatedRegion(ret);
1119  }
1120  if (ret == NULL) errno = ENOMEM;
1121  ASSERT(IsAllocSizePermitted(size) || ret == NULL);
1122  return ret;
1123}
1124
1125inline void* do_calloc(size_t n, size_t elem_size) {
1126  // Overflow check
1127  const size_t size = n * elem_size;
1128  if (elem_size != 0 && size / elem_size != n) return NULL;
1129
1130  void* result = do_malloc_or_cpp_alloc(size);
1131  if (result != NULL) {
1132    memset(result, 0, size);
1133  }
1134  return result;
1135}
1136
1137static inline ThreadCache* GetCacheIfPresent() {
1138  void* const p = ThreadCache::GetCacheIfPresent();
1139  return reinterpret_cast<ThreadCache*>(p);
1140}
1141
1142// This lets you call back to a given function pointer if ptr is invalid.
1143// It is used primarily by windows code which wants a specialized callback.
1144inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) {
1145  if (ptr == NULL) return;
1146  if (Static::pageheap() == NULL) {
1147    // We called free() before malloc().  This can occur if the
1148    // (system) malloc() is called before tcmalloc is loaded, and then
1149    // free() is called after tcmalloc is loaded (and tc_free has
1150    // replaced free), but before the global constructor has run that
1151    // sets up the tcmalloc data structures.
1152    (*invalid_free_fn)(ptr);  // Decide how to handle the bad free request
1153    return;
1154  }
1155  const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
1156  Span* span = NULL;
1157  size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
1158
1159  if (cl == 0) {
1160    span = Static::pageheap()->GetDescriptor(p);
1161    if (!span) {
1162      // span can be NULL because the pointer passed in is invalid
1163      // (not something returned by malloc or friends), or because the
1164      // pointer was allocated with some other allocator besides
1165      // tcmalloc.  The latter can happen if tcmalloc is linked in via
1166      // a dynamic library, but is not listed last on the link line.
1167      // In that case, libraries after it on the link line will
1168      // allocate with libc malloc, but free with tcmalloc's free.
1169      (*invalid_free_fn)(ptr);  // Decide how to handle the bad free request
1170      return;
1171    }
1172    cl = span->sizeclass;
1173    Static::pageheap()->CacheSizeClass(p, cl);
1174  }
1175  if (cl == 0) {
1176    // Check to see if the object is in use.
1177    CHECK_CONDITION_PRINT(span->location == Span::IN_USE,
1178                          "Object was not in-use");
1179
1180    CHECK_CONDITION_PRINT(
1181        span->start << kPageShift == reinterpret_cast<uintptr_t>(ptr),
1182        "Pointer is not pointing to the start of a span");
1183  }
1184  ValidateAllocatedRegion(ptr, cl);
1185
1186  if (cl != 0) {
1187    ASSERT(!Static::pageheap()->GetDescriptor(p)->sample);
1188    ThreadCache* heap = GetCacheIfPresent();
1189    if (heap != NULL) {
1190      heap->Deallocate(ptr, cl);
1191    } else {
1192      // Delete directly into central cache
1193      tcmalloc::FL_Init(ptr);
1194      Static::central_cache()[cl].InsertRange(ptr, ptr, 1);
1195    }
1196  } else {
1197    SpinLockHolder h(Static::pageheap_lock());
1198    ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
1199    ASSERT(span != NULL && span->start == p);
1200    if (span->sample) {
1201      StackTrace* st = reinterpret_cast<StackTrace*>(span->objects);
1202      tcmalloc::DLL_Remove(span);
1203      Static::stacktrace_allocator()->Delete(st);
1204      span->objects = NULL;
1205    }
1206    Static::pageheap()->Delete(span);
1207  }
1208}
1209
1210// The default "do_free" that uses the default callback.
1211inline void do_free(void* ptr) {
1212  return do_free_with_callback(ptr, &InvalidFree);
1213}
1214
1215// NOTE: some logic here is duplicated in GetOwnership (above), for
1216// speed.  If you change this function, look at that one too.
1217inline size_t GetSizeWithCallback(const void* ptr,
1218                                  size_t (*invalid_getsize_fn)(const void*)) {
1219  if (ptr == NULL)
1220    return 0;
1221  const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
1222  size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
1223  if (cl != 0) {
1224    return Static::sizemap()->ByteSizeForClass(cl);
1225  } else {
1226    const Span *span = Static::pageheap()->GetDescriptor(p);
1227    if (span == NULL) {  // means we do not own this memory
1228      return (*invalid_getsize_fn)(ptr);
1229    } else if (span->sizeclass != 0) {
1230      Static::pageheap()->CacheSizeClass(p, span->sizeclass);
1231      return Static::sizemap()->ByteSizeForClass(span->sizeclass);
1232    } else {
1233      return span->length << kPageShift;
1234    }
1235  }
1236}
1237
1238// This lets you call back to a given function pointer if ptr is invalid.
1239// It is used primarily by windows code which wants a specialized callback.
1240inline void* do_realloc_with_callback(
1241    void* old_ptr, size_t new_size,
1242    void (*invalid_free_fn)(void*),
1243    size_t (*invalid_get_size_fn)(const void*)) {
1244  AddRoomForMark(&new_size);
1245  // Get the size of the old entry
1246  const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn);
1247
1248  // Reallocate if the new size is larger than the old size,
1249  // or if the new size is significantly smaller than the old size.
1250  // We do hysteresis to avoid resizing ping-pongs:
1251  //    . If we need to grow, grow to max(new_size, old_size * 1.X)
1252  //    . Don't shrink unless new_size < old_size * 0.Y
1253  // X and Y trade-off time for wasted space.  For now we do 1.25 and 0.5.
1254  const size_t min_growth = min(old_size / 4,
1255      (std::numeric_limits<size_t>::max)() - old_size);  // Avoid overflow.
1256  const size_t lower_bound_to_grow = old_size + min_growth;
1257  const size_t upper_bound_to_shrink = old_size / 2;
1258  if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
1259    // Need to reallocate.
1260    void* new_ptr = NULL;
1261
1262    if (new_size > old_size && new_size < lower_bound_to_grow) {
1263      new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow);
1264    }
1265    ExcludeMarkFromSize(&new_size);  // do_malloc will add space if needed.
1266    if (new_ptr == NULL) {
1267      // Either new_size is not a tiny increment, or last do_malloc failed.
1268      new_ptr = do_malloc_or_cpp_alloc(new_size);
1269    }
1270    if (new_ptr == NULL) {
1271      return NULL;
1272    }
1273    MallocHook::InvokeNewHook(new_ptr, new_size);
1274    memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
1275    MallocHook::InvokeDeleteHook(old_ptr);
1276    // We could use a variant of do_free() that leverages the fact
1277    // that we already know the sizeclass of old_ptr.  The benefit
1278    // would be small, so don't bother.
1279    do_free_with_callback(old_ptr, invalid_free_fn);
1280    return new_ptr;
1281  } else {
1282    // We still need to call hooks to report the updated size:
1283    MallocHook::InvokeDeleteHook(old_ptr);
1284    ExcludeMarkFromSize(&new_size);
1285    MallocHook::InvokeNewHook(old_ptr, new_size);
1286    return old_ptr;
1287  }
1288}
1289
1290inline void* do_realloc(void* old_ptr, size_t new_size) {
1291  return do_realloc_with_callback(old_ptr, new_size,
1292                                  &InvalidFree, &InvalidGetSizeForRealloc);
1293}
1294
1295// For use by exported routines below that want specific alignments
1296//
1297// Note: this code can be slow for alignments > 16, and can
1298// significantly fragment memory.  The expectation is that
1299// memalign/posix_memalign/valloc/pvalloc will not be invoked very
1300// often.  This requirement simplifies our implementation and allows
1301// us to tune for expected allocation patterns.
1302void* do_memalign(size_t align, size_t size) {
1303  ASSERT((align & (align - 1)) == 0);
1304  ASSERT(align > 0);
1305  // Marked in CheckMallocResult(), which is also inside SpanToMallocResult().
1306  AddRoomForMark(&size);
1307  if (size + align < size) return NULL;         // Overflow
1308
1309  // Fall back to malloc if we would already align this memory access properly.
1310  if (align <= AlignmentForSize(size)) {
1311    void* p = do_malloc(size);
1312    ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0);
1313    return p;
1314  }
1315
1316  if (Static::pageheap() == NULL) ThreadCache::InitModule();
1317
1318  // Allocate at least one byte to avoid boundary conditions below
1319  if (size == 0) size = 1;
1320
1321  if (size <= kMaxSize && align < kPageSize) {
1322    // Search through acceptable size classes looking for one with
1323    // enough alignment.  This depends on the fact that
1324    // InitSizeClasses() currently produces several size classes that
1325    // are aligned at powers of two.  We will waste time and space if
1326    // we miss in the size class array, but that is deemed acceptable
1327    // since memalign() should be used rarely.
1328    int cl = Static::sizemap()->SizeClass(size);
1329    while (cl < kNumClasses &&
1330           ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) {
1331      cl++;
1332    }
1333    if (cl < kNumClasses) {
1334      ThreadCache* heap = ThreadCache::GetCache();
1335      size = Static::sizemap()->class_to_size(cl);
1336      return CheckMallocResult(heap->Allocate(size, cl));
1337    }
1338  }
1339
1340  // We will allocate directly from the page heap
1341  SpinLockHolder h(Static::pageheap_lock());
1342
1343  if (align <= kPageSize) {
1344    // Any page-level allocation will be fine
1345    // TODO: We could put the rest of this page in the appropriate
1346    // TODO: cache but it does not seem worth it.
1347    Span* span = Static::pageheap()->New(tcmalloc::pages(size));
1348    return span == NULL ? NULL : SpanToMallocResult(span);
1349  }
1350
1351  // Allocate extra pages and carve off an aligned portion
1352  const Length alloc = tcmalloc::pages(size + align);
1353  Span* span = Static::pageheap()->New(alloc);
1354  if (span == NULL) return NULL;
1355
1356  // Skip starting portion so that we end up aligned
1357  Length skip = 0;
1358  while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
1359    skip++;
1360  }
1361  ASSERT(skip < alloc);
1362  if (skip > 0) {
1363    Span* rest = Static::pageheap()->Split(span, skip);
1364    Static::pageheap()->Delete(span);
1365    span = rest;
1366  }
1367
1368  // Skip trailing portion that we do not need to return
1369  const Length needed = tcmalloc::pages(size);
1370  ASSERT(span->length >= needed);
1371  if (span->length > needed) {
1372    Span* trailer = Static::pageheap()->Split(span, needed);
1373    Static::pageheap()->Delete(trailer);
1374  }
1375  return SpanToMallocResult(span);
1376}
1377
1378// Helpers for use by exported routines below:
1379
1380inline void do_malloc_stats() {
1381  PrintStats(1);
1382}
1383
1384inline int do_mallopt(int cmd, int value) {
1385  return 1;     // Indicates error
1386}
1387
1388#ifdef HAVE_STRUCT_MALLINFO
1389inline struct mallinfo do_mallinfo() {
1390  TCMallocStats stats;
1391  ExtractStats(&stats, NULL, NULL, NULL);
1392
1393  // Just some of the fields are filled in.
1394  struct mallinfo info;
1395  memset(&info, 0, sizeof(info));
1396
1397  // Unfortunately, the struct contains "int" field, so some of the
1398  // size values will be truncated.
1399  info.arena     = static_cast<int>(stats.pageheap.system_bytes);
1400  info.fsmblks   = static_cast<int>(stats.thread_bytes
1401                                    + stats.central_bytes
1402                                    + stats.transfer_bytes);
1403  info.fordblks  = static_cast<int>(stats.pageheap.free_bytes +
1404                                    stats.pageheap.unmapped_bytes);
1405  info.uordblks  = static_cast<int>(stats.pageheap.system_bytes
1406                                    - stats.thread_bytes
1407                                    - stats.central_bytes
1408                                    - stats.transfer_bytes
1409                                    - stats.pageheap.free_bytes
1410                                    - stats.pageheap.unmapped_bytes);
1411
1412  return info;
1413}
1414#endif  // HAVE_STRUCT_MALLINFO
1415
1416static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED);
1417
1418inline void* cpp_alloc(size_t size, bool nothrow) {
1419  for (;;) {
1420    void* p = do_malloc(size);
1421#ifdef PREANSINEW
1422    return p;
1423#else
1424    if (p == NULL) {  // allocation failed
1425      // Get the current new handler.  NB: this function is not
1426      // thread-safe.  We make a feeble stab at making it so here, but
1427      // this lock only protects against tcmalloc interfering with
1428      // itself, not with other libraries calling set_new_handler.
1429      std::new_handler nh;
1430      {
1431        SpinLockHolder h(&set_new_handler_lock);
1432        nh = std::set_new_handler(0);
1433        (void) std::set_new_handler(nh);
1434      }
1435#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1436      if (nh) {
1437        // Since exceptions are disabled, we don't really know if new_handler
1438        // failed.  Assume it will abort if it fails.
1439        (*nh)();
1440        continue;
1441      }
1442      return 0;
1443#else
1444      // If no new_handler is established, the allocation failed.
1445      if (!nh) {
1446        if (nothrow) return 0;
1447        throw std::bad_alloc();
1448      }
1449      // Otherwise, try the new_handler.  If it returns, retry the
1450      // allocation.  If it throws std::bad_alloc, fail the allocation.
1451      // if it throws something else, don't interfere.
1452      try {
1453        (*nh)();
1454      } catch (const std::bad_alloc&) {
1455        if (!nothrow) throw;
1456        return p;
1457      }
1458#endif  // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1459    } else {  // allocation success
1460      return p;
1461    }
1462#endif  // PREANSINEW
1463  }
1464}
1465
1466void* cpp_memalign(size_t align, size_t size) {
1467  for (;;) {
1468    void* p = do_memalign(align, size);
1469#ifdef PREANSINEW
1470    return p;
1471#else
1472    if (p == NULL) {  // allocation failed
1473      // Get the current new handler.  NB: this function is not
1474      // thread-safe.  We make a feeble stab at making it so here, but
1475      // this lock only protects against tcmalloc interfering with
1476      // itself, not with other libraries calling set_new_handler.
1477      std::new_handler nh;
1478      {
1479        SpinLockHolder h(&set_new_handler_lock);
1480        nh = std::set_new_handler(0);
1481        (void) std::set_new_handler(nh);
1482      }
1483#if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1484      if (nh) {
1485        // Since exceptions are disabled, we don't really know if new_handler
1486        // failed.  Assume it will abort if it fails.
1487        (*nh)();
1488        continue;
1489      }
1490      return 0;
1491#else
1492      // If no new_handler is established, the allocation failed.
1493      if (!nh)
1494        return 0;
1495
1496      // Otherwise, try the new_handler.  If it returns, retry the
1497      // allocation.  If it throws std::bad_alloc, fail the allocation.
1498      // if it throws something else, don't interfere.
1499      try {
1500        (*nh)();
1501      } catch (const std::bad_alloc&) {
1502        return p;
1503      }
1504#endif  // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS)
1505    } else {  // allocation success
1506      return p;
1507    }
1508#endif  // PREANSINEW
1509  }
1510}
1511
1512}  // end unnamed namespace
1513
1514// As promised, the definition of this function, declared above.
1515size_t TCMallocImplementation::GetAllocatedSize(const void* ptr) {
1516  // Chromium workaround for third-party code calling tc_malloc_size(NULL), see
1517  // http://code.google.com/p/chromium/issues/detail?id=118087
1518  // Note: this is consistent with GLIBC's implementation of
1519  // malloc_usable_size(NULL).
1520  if (ptr == NULL)
1521    return 0;
1522  ASSERT(TCMallocImplementation::GetOwnership(ptr)
1523         != TCMallocImplementation::kNotOwned);
1524  return ExcludeSpaceForMark(
1525      GetSizeWithCallback(ptr, &InvalidGetAllocatedSize));
1526}
1527
1528void TCMallocImplementation::MarkThreadBusy() {
1529  // Allocate to force the creation of a thread cache, but avoid
1530  // invoking any hooks.
1531  do_free(do_malloc(0));
1532}
1533
1534//-------------------------------------------------------------------
1535// Exported routines
1536//-------------------------------------------------------------------
1537
1538extern "C" PERFTOOLS_DLL_DECL const char* tc_version(
1539    int* major, int* minor, const char** patch) __THROW {
1540  if (major) *major = TC_VERSION_MAJOR;
1541  if (minor) *minor = TC_VERSION_MINOR;
1542  if (patch) *patch = TC_VERSION_PATCH;
1543  return TC_VERSION_STRING;
1544}
1545
1546// This function behaves similarly to MSVC's _set_new_mode.
1547// If flag is 0 (default), calls to malloc will behave normally.
1548// If flag is 1, calls to malloc will behave like calls to new,
1549// and the std_new_handler will be invoked on failure.
1550// Returns the previous mode.
1551extern "C" PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) __THROW {
1552  int old_mode = tc_new_mode;
1553  tc_new_mode = flag;
1554  return old_mode;
1555}
1556
1557#ifndef TCMALLOC_USING_DEBUGALLOCATION  // debugallocation.cc defines its own
1558
1559// CAVEAT: The code structure below ensures that MallocHook methods are always
1560//         called from the stack frame of the invoked allocation function.
1561//         heap-checker.cc depends on this to start a stack trace from
1562//         the call to the (de)allocation function.
1563
1564extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW {
1565  void* result = do_malloc_or_cpp_alloc(size);
1566  MallocHook::InvokeNewHook(result, size);
1567  return result;
1568}
1569
1570extern "C" PERFTOOLS_DLL_DECL void tc_free(void* ptr) __THROW {
1571  MallocHook::InvokeDeleteHook(ptr);
1572  do_free(ptr);
1573}
1574
1575extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t n,
1576                                              size_t elem_size) __THROW {
1577  void* result = do_calloc(n, elem_size);
1578  MallocHook::InvokeNewHook(result, n * elem_size);
1579  return result;
1580}
1581
1582extern "C" PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) __THROW {
1583  MallocHook::InvokeDeleteHook(ptr);
1584  do_free(ptr);
1585}
1586
1587extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* old_ptr,
1588                                               size_t new_size) __THROW {
1589  if (old_ptr == NULL) {
1590    void* result = do_malloc_or_cpp_alloc(new_size);
1591    MallocHook::InvokeNewHook(result, new_size);
1592    return result;
1593  }
1594  if (new_size == 0) {
1595    MallocHook::InvokeDeleteHook(old_ptr);
1596    do_free(old_ptr);
1597    return NULL;
1598  }
1599  return do_realloc(old_ptr, new_size);
1600}
1601
1602extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) {
1603  void* p = cpp_alloc(size, false);
1604  // We keep this next instruction out of cpp_alloc for a reason: when
1605  // it's in, and new just calls cpp_alloc, the optimizer may fold the
1606  // new call into cpp_alloc, which messes up our whole section-based
1607  // stacktracing (see ATTRIBUTE_SECTION, above).  This ensures cpp_alloc
1608  // isn't the last thing this fn calls, and prevents the folding.
1609  MallocHook::InvokeNewHook(p, size);
1610  return p;
1611}
1612
1613extern "C" PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW {
1614  void* p = cpp_alloc(size, true);
1615  MallocHook::InvokeNewHook(p, size);
1616  return p;
1617}
1618
1619extern "C" PERFTOOLS_DLL_DECL void tc_delete(void* p) __THROW {
1620  MallocHook::InvokeDeleteHook(p);
1621  do_free(p);
1622}
1623
1624// Standard C++ library implementations define and use this
1625// (via ::operator delete(ptr, nothrow)).
1626// But it's really the same as normal delete, so we just do the same thing.
1627extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p, const std::nothrow_t&) __THROW {
1628  MallocHook::InvokeDeleteHook(p);
1629  do_free(p);
1630}
1631
1632extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) {
1633  void* p = cpp_alloc(size, false);
1634  // We keep this next instruction out of cpp_alloc for a reason: when
1635  // it's in, and new just calls cpp_alloc, the optimizer may fold the
1636  // new call into cpp_alloc, which messes up our whole section-based
1637  // stacktracing (see ATTRIBUTE_SECTION, above).  This ensures cpp_alloc
1638  // isn't the last thing this fn calls, and prevents the folding.
1639  MallocHook::InvokeNewHook(p, size);
1640  return p;
1641}
1642
1643extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size, const std::nothrow_t&)
1644    __THROW {
1645  void* p = cpp_alloc(size, true);
1646  MallocHook::InvokeNewHook(p, size);
1647  return p;
1648}
1649
1650extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW {
1651  MallocHook::InvokeDeleteHook(p);
1652  do_free(p);
1653}
1654
1655extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p, const std::nothrow_t&) __THROW {
1656  MallocHook::InvokeDeleteHook(p);
1657  do_free(p);
1658}
1659
1660extern "C" PERFTOOLS_DLL_DECL void* tc_memalign(size_t align,
1661                                                size_t size) __THROW {
1662  void* result = do_memalign_or_cpp_memalign(align, size);
1663  MallocHook::InvokeNewHook(result, size);
1664  return result;
1665}
1666
1667extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(
1668    void** result_ptr, size_t align, size_t size) __THROW {
1669  if (((align % sizeof(void*)) != 0) ||
1670      ((align & (align - 1)) != 0) ||
1671      (align == 0)) {
1672    return EINVAL;
1673  }
1674
1675  void* result = do_memalign_or_cpp_memalign(align, size);
1676  MallocHook::InvokeNewHook(result, size);
1677  if (result == NULL) {
1678    return ENOMEM;
1679  } else {
1680    *result_ptr = result;
1681    return 0;
1682  }
1683}
1684
1685static size_t pagesize = 0;
1686
1687extern "C" PERFTOOLS_DLL_DECL void* tc_valloc(size_t size) __THROW {
1688  // Allocate page-aligned object of length >= size bytes
1689  if (pagesize == 0) pagesize = getpagesize();
1690  void* result = do_memalign_or_cpp_memalign(pagesize, size);
1691  MallocHook::InvokeNewHook(result, size);
1692  return result;
1693}
1694
1695extern "C" PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t size) __THROW {
1696  // Round up size to a multiple of pagesize
1697  if (pagesize == 0) pagesize = getpagesize();
1698  if (size == 0) {     // pvalloc(0) should allocate one page, according to
1699    size = pagesize;   // http://man.free4web.biz/man3/libmpatrol.3.html
1700  }
1701  size = (size + pagesize - 1) & ~(pagesize - 1);
1702  void* result = do_memalign_or_cpp_memalign(pagesize, size);
1703  MallocHook::InvokeNewHook(result, size);
1704  return result;
1705}
1706
1707extern "C" PERFTOOLS_DLL_DECL void tc_malloc_stats(void) __THROW {
1708  do_malloc_stats();
1709}
1710
1711extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW {
1712  return do_mallopt(cmd, value);
1713}
1714
1715#ifdef HAVE_STRUCT_MALLINFO
1716extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW {
1717  return do_mallinfo();
1718}
1719#endif
1720
1721extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW {
1722  return MallocExtension::instance()->GetAllocatedSize(ptr);
1723}
1724
1725#if defined(OS_LINUX)
1726extern "C" void* PERFTOOLS_DLL_DECL tc_malloc_skip_new_handler(size_t size) {
1727  void* result = do_malloc(size);
1728  MallocHook::InvokeNewHook(result, size);
1729  return result;
1730}
1731#endif
1732
1733#endif  // TCMALLOC_USING_DEBUGALLOCATION
1734
1735#if defined(OS_LINUX)
1736// Alias the weak symbol in chromium to our implementation.
1737extern "C" __attribute__((visibility("default"), alias("tc_malloc_skip_new_handler")))
1738void* tc_malloc_skip_new_handler_weak(size_t size);
1739#endif
1740
1741// --- Validation implementation with an extra mark ----------------------------
1742// We will put a mark at the extreme end of each allocation block.  We make
1743// sure that we always allocate enough "extra memory" that we can fit in the
1744// mark, and still provide the requested usable region.  If ever that mark is
1745// not as expected, then we know that the user is corrupting memory beyond their
1746// request size, or that they have called free a second time without having
1747// the memory allocated (again).  This allows us to spot most double free()s,
1748// but some can "slip by" or confuse our logic if the caller reallocates memory
1749// (for a second use) before performing an evil double-free of a first
1750// allocation
1751
1752// This code can be optimized, but for now, it is written to be most easily
1753// understood, and flexible (since it is evolving a bit). Potential
1754// optimizations include using other calculated data, such as class size, or
1755// allocation size, which is known in the code above, but then is recalculated
1756// below.  Another potential optimization would be careful manual inlining of
1757// code, but I *think* that the compile will probably do this for me, and I've
1758// been careful to avoid aliasing issues that might make a compiler back-off.
1759
1760// Evolution includes experimenting with different marks, to minimize the chance
1761// that a mark would be misunderstood (missed corruption).  The marks are meant
1762// to be hashed encoding of the location, so that they can't be copied over a
1763// different region (by accident) without being detected (most of the time).
1764
1765// Enable the following define to turn on all the TCMalloc checking.
1766// It will cost about 2% in performance, but it will catch double frees (most of
1767// the time), and will often catch allocated-buffer overrun errors.  This
1768// validation is only active when TCMalloc is used as the allocator.
1769#ifndef NDEBUG
1770#define TCMALLOC_VALIDATION
1771#endif
1772
1773#if !defined(TCMALLOC_VALIDATION)
1774
1775static size_t ExcludeSpaceForMark(size_t size) { return size; }
1776static void AddRoomForMark(size_t* size) {}
1777static void ExcludeMarkFromSize(size_t* new_size) {}
1778static void MarkAllocatedRegion(void* ptr) {}
1779static void ValidateAllocatedRegion(void* ptr, size_t cl) {}
1780
1781#else  // TCMALLOC_VALIDATION
1782
1783static void DieFromDoubleFree() {
1784  Log(kCrash, __FILE__, __LINE__, "Attempt to double free");
1785}
1786
1787static void DieFromMemoryCorruption() {
1788  Log(kCrash, __FILE__, __LINE__, "Memory corrupted");
1789}
1790
1791// We can either do byte marking, or whole word marking based on the following
1792// define.  char is as small as we can get, and word marking probably provides
1793// more than enough bits that we won't miss a corruption. Any sized integral
1794// type can be used, but we just define two examples.
1795
1796//  #define TCMALLOC_SMALL_VALIDATION
1797#if defined (TCMALLOC_SMALL_VALIDATION)
1798
1799typedef char MarkType;  // char saves memory... int is more complete.
1800static const MarkType kAllocationMarkMask = static_cast<MarkType>(0x36);
1801
1802#else
1803
1804typedef int MarkType;  // char saves memory... int is more complete.
1805static const MarkType kAllocationMarkMask = static_cast<MarkType>(0xE1AB9536);
1806
1807#endif
1808
1809// TODO(jar): See if use of reference rather than pointer gets better inlining,
1810// or if macro is needed.  My fear is that taking address map preclude register
1811// allocation :-(.
1812inline static void AddRoomForMark(size_t* size) {
1813  *size += sizeof(kAllocationMarkMask);
1814}
1815
1816inline static void ExcludeMarkFromSize(size_t* new_size) {
1817  *new_size -= sizeof(kAllocationMarkMask);
1818}
1819
1820inline static size_t ExcludeSpaceForMark(size_t size) {
1821  return size - sizeof(kAllocationMarkMask);  // Lie about size when asked.
1822}
1823
1824inline static MarkType* GetMarkLocation(void* ptr) {
1825  size_t size = GetSizeWithCallback(ptr, &InvalidGetAllocatedSize);
1826  ASSERT(size % sizeof(kAllocationMarkMask) == 0);
1827  size_t last_index = (size / sizeof(kAllocationMarkMask)) - 1;
1828  return static_cast<MarkType*>(ptr) + last_index;
1829}
1830
1831// We hash in the mark location plus the pointer so that we effectively mix in
1832// the size of the block.  This means that if a span is used for different sizes
1833// that the mark will be different. It would be good to hash in the size (which
1834// we effectively get by using both mark location and pointer), but even better
1835// would be to also include the class, as it concisely contains the entropy
1836// found in the size (when we don't have large allocation), and there is less
1837// risk of losing those bits to truncation. It would probably be good to combine
1838// the high bits of size (capturing info about large blocks) with the class
1839// (which is a 6 bit number).
1840inline static MarkType GetMarkValue(void* ptr, MarkType* mark) {
1841  void* ptr2 = static_cast<void*>(mark);
1842  size_t offset1 = static_cast<char*>(ptr) - static_cast<char*>(NULL);
1843  size_t offset2 = static_cast<char*>(ptr2) - static_cast<char*>(NULL);
1844  static const int kInvariantBits = 2;
1845  ASSERT((offset1 >> kInvariantBits) << kInvariantBits == offset1);
1846  // Note: low bits of both offsets are invariants due to alignment.  High bits
1847  // of both offsets are the same (unless we have a large allocation).  Avoid
1848  // XORing high bits together, as they will cancel for most small allocations.
1849
1850  MarkType ret = kAllocationMarkMask;
1851  // Using a little shift, we can safely XOR together both offsets.
1852  ret ^= static_cast<MarkType>(offset1 >> kInvariantBits) ^
1853         static_cast<MarkType>(offset2);
1854  if (sizeof(ret) == 1) {
1855    // Try to bring some high level bits into the mix.
1856    ret += static_cast<MarkType>(offset1 >> 8) ^
1857           static_cast<MarkType>(offset1 >> 16) ^
1858           static_cast<MarkType>(offset1 >> 24) ;
1859  }
1860  // Hash in high bits on a 64 bit architecture.
1861  if (sizeof(size_t) == 8 && sizeof(ret) == 4)
1862    ret += offset1 >> 16;
1863  if (ret == 0)
1864    ret = kAllocationMarkMask;  // Avoid common pattern of all zeros.
1865  return ret;
1866}
1867
1868// TODO(jar): Use the passed in TCmalloc Class Index to calculate mark location
1869// faster.  The current implementation calls general functions, which have to
1870// recalculate this in order to get the Class Size.  This is a slow and wasteful
1871// recomputation... but it is much more readable this way (for now).
1872static void ValidateAllocatedRegion(void* ptr, size_t cl) {
1873  if (ptr == NULL) return;
1874  MarkType* mark = GetMarkLocation(ptr);
1875  MarkType allocated_mark = GetMarkValue(ptr, mark);
1876  MarkType current_mark = *mark;
1877
1878  if (current_mark == ~allocated_mark)
1879    DieFromDoubleFree();
1880  if (current_mark != allocated_mark)
1881    DieFromMemoryCorruption();
1882#ifndef NDEBUG
1883  // In debug mode, copy the mark into all the free'd region.
1884  size_t class_size = static_cast<size_t>(reinterpret_cast<char*>(mark) -
1885                                          reinterpret_cast<char*>(ptr));
1886  memset(ptr, static_cast<char>(0x36), class_size);
1887#endif
1888  *mark = ~allocated_mark;  //  Distinctively not allocated.
1889}
1890
1891static void MarkAllocatedRegion(void* ptr) {
1892  if (ptr == NULL) return;
1893  MarkType* mark = GetMarkLocation(ptr);
1894  *mark = GetMarkValue(ptr, mark);
1895}
1896
1897#endif  // TCMALLOC_VALIDATION
1898