memory_mac.mm revision 4e180b6a0b4720a9b8e9e959a882386f690f08ff
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/process/memory.h"
6
7#include <CoreFoundation/CoreFoundation.h>
8#include <errno.h>
9#include <mach/mach.h>
10#include <mach/mach_vm.h>
11#include <malloc/malloc.h>
12#import <objc/runtime.h>
13
14#include <new>
15
16#include "base/lazy_instance.h"
17#include "base/logging.h"
18#include "base/mac/mac_util.h"
19#include "base/scoped_clear_errno.h"
20#include "third_party/apple_apsl/CFBase.h"
21#include "third_party/apple_apsl/malloc.h"
22
23#if ARCH_CPU_32_BITS
24#include <dlfcn.h>
25#include <mach-o/nlist.h>
26
27#include "base/threading/thread_local.h"
28#include "third_party/mach_override/mach_override.h"
29#endif  // ARCH_CPU_32_BITS
30
31namespace base {
32
33// These are helpers for EnableTerminationOnHeapCorruption, which is a no-op
34// on 64 bit Macs.
35#if ARCH_CPU_32_BITS
36namespace {
37
38// Finds the library path for malloc() and thus the libC part of libSystem,
39// which in Lion is in a separate image.
40const char* LookUpLibCPath() {
41  const void* addr = reinterpret_cast<void*>(&malloc);
42
43  Dl_info info;
44  if (dladdr(addr, &info))
45    return info.dli_fname;
46
47  DLOG(WARNING) << "Could not find image path for malloc()";
48  return NULL;
49}
50
51typedef void(*malloc_error_break_t)(void);
52malloc_error_break_t g_original_malloc_error_break = NULL;
53
54// Returns the function pointer for malloc_error_break. This symbol is declared
55// as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
56// get it.
57malloc_error_break_t LookUpMallocErrorBreak() {
58  const char* lib_c_path = LookUpLibCPath();
59  if (!lib_c_path)
60    return NULL;
61
62  // Only need to look up two symbols, but nlist() requires a NULL-terminated
63  // array and takes no count.
64  struct nlist nl[3];
65  bzero(&nl, sizeof(nl));
66
67  // The symbol to find.
68  nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
69
70  // A reference symbol by which the address of the desired symbol will be
71  // calculated.
72  nl[1].n_un.n_name = const_cast<char*>("_malloc");
73
74  int rv = nlist(lib_c_path, nl);
75  if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
76    return NULL;
77  }
78
79  // nlist() returns addresses as offsets in the image, not the instruction
80  // pointer in memory. Use the known in-memory address of malloc()
81  // to compute the offset for malloc_error_break().
82  uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
83  reference_addr -= nl[1].n_value;
84  reference_addr += nl[0].n_value;
85
86  return reinterpret_cast<malloc_error_break_t>(reference_addr);
87}
88
89// Combines ThreadLocalBoolean with AutoReset.  It would be convenient
90// to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that
91// would require allocating some storage for the bool.
92class ThreadLocalBooleanAutoReset {
93 public:
94  ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value)
95      : scoped_tlb_(tlb),
96        original_value_(tlb->Get()) {
97    scoped_tlb_->Set(new_value);
98  }
99  ~ThreadLocalBooleanAutoReset() {
100    scoped_tlb_->Set(original_value_);
101  }
102
103 private:
104  ThreadLocalBoolean* scoped_tlb_;
105  bool original_value_;
106
107  DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset);
108};
109
110base::LazyInstance<ThreadLocalBoolean>::Leaky
111    g_unchecked_alloc = LAZY_INSTANCE_INITIALIZER;
112
113// NOTE(shess): This is called when the malloc library noticed that the heap
114// is fubar.  Avoid calls which will re-enter the malloc library.
115void CrMallocErrorBreak() {
116  g_original_malloc_error_break();
117
118  // Out of memory is certainly not heap corruption, and not necessarily
119  // something for which the process should be terminated. Leave that decision
120  // to the OOM killer.  The EBADF case comes up because the malloc library
121  // attempts to log to ASL (syslog) before calling this code, which fails
122  // accessing a Unix-domain socket because of sandboxing.
123  if (errno == ENOMEM || (errno == EBADF && g_unchecked_alloc.Get().Get()))
124    return;
125
126  // A unit test checks this error message, so it needs to be in release builds.
127  char buf[1024] =
128      "Terminating process due to a potential for future heap corruption: "
129      "errno=";
130  char errnobuf[] = {
131    '0' + ((errno / 100) % 10),
132    '0' + ((errno / 10) % 10),
133    '0' + (errno % 10),
134    '\000'
135  };
136  COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode);
137  strlcat(buf, errnobuf, sizeof(buf));
138  RAW_LOG(ERROR, buf);
139
140  // Crash by writing to NULL+errno to allow analyzing errno from
141  // crash dump info (setting a breakpad key would re-enter the malloc
142  // library).  Max documented errno in intro(2) is actually 102, but
143  // it really just needs to be "small" to stay on the right vm page.
144  const int kMaxErrno = 256;
145  char* volatile death_ptr = NULL;
146  death_ptr += std::min(errno, kMaxErrno);
147  *death_ptr = '!';
148}
149
150}  // namespace
151#endif  // ARCH_CPU_32_BITS
152
153void EnableTerminationOnHeapCorruption() {
154#if defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
155  // AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc
156  // system automatically abort()s on heap corruption.
157  return;
158#else
159  // Only override once, otherwise CrMallocErrorBreak() will recurse
160  // to itself.
161  if (g_original_malloc_error_break)
162    return;
163
164  malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
165  if (!malloc_error_break) {
166    DLOG(WARNING) << "Could not find malloc_error_break";
167    return;
168  }
169
170  mach_error_t err = mach_override_ptr(
171     (void*)malloc_error_break,
172     (void*)&CrMallocErrorBreak,
173     (void**)&g_original_malloc_error_break);
174
175  if (err != err_none)
176    DLOG(WARNING) << "Could not override malloc_error_break; error = " << err;
177#endif  // defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
178}
179
180// ------------------------------------------------------------------------
181
182namespace {
183
184bool g_oom_killer_enabled;
185
186// Starting with Mac OS X 10.7, the zone allocators set up by the system are
187// read-only, to prevent them from being overwritten in an attack. However,
188// blindly unprotecting and reprotecting the zone allocators fails with
189// GuardMalloc because GuardMalloc sets up its zone allocator using a block of
190// memory in its bss. Explicit saving/restoring of the protection is required.
191//
192// This function takes a pointer to a malloc zone, de-protects it if necessary,
193// and returns (in the out parameters) a region of memory (if any) to be
194// re-protected when modifications are complete. This approach assumes that
195// there is no contention for the protection of this memory.
196void DeprotectMallocZone(ChromeMallocZone* default_zone,
197                         mach_vm_address_t* reprotection_start,
198                         mach_vm_size_t* reprotection_length,
199                         vm_prot_t* reprotection_value) {
200  mach_port_t unused;
201  *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
202  struct vm_region_basic_info_64 info;
203  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
204  kern_return_t result =
205      mach_vm_region(mach_task_self(),
206                     reprotection_start,
207                     reprotection_length,
208                     VM_REGION_BASIC_INFO_64,
209                     reinterpret_cast<vm_region_info_t>(&info),
210                     &count,
211                     &unused);
212  CHECK(result == KERN_SUCCESS);
213
214  result = mach_port_deallocate(mach_task_self(), unused);
215  CHECK(result == KERN_SUCCESS);
216
217  // Does the region fully enclose the zone pointers? Possibly unwarranted
218  // simplification used: using the size of a full version 8 malloc zone rather
219  // than the actual smaller size if the passed-in zone is not version 8.
220  CHECK(*reprotection_start <=
221            reinterpret_cast<mach_vm_address_t>(default_zone));
222  mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
223      reinterpret_cast<mach_vm_size_t>(*reprotection_start);
224  CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
225
226  if (info.protection & VM_PROT_WRITE) {
227    // No change needed; the zone is already writable.
228    *reprotection_start = 0;
229    *reprotection_length = 0;
230    *reprotection_value = VM_PROT_NONE;
231  } else {
232    *reprotection_value = info.protection;
233    result = mach_vm_protect(mach_task_self(),
234                             *reprotection_start,
235                             *reprotection_length,
236                             false,
237                             info.protection | VM_PROT_WRITE);
238    CHECK(result == KERN_SUCCESS);
239  }
240}
241
242// === C malloc/calloc/valloc/realloc/posix_memalign ===
243
244typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
245                             size_t size);
246typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
247                             size_t num_items,
248                             size_t size);
249typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
250                             size_t size);
251typedef void (*free_type)(struct _malloc_zone_t* zone,
252                          void* ptr);
253typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
254                              void* ptr,
255                              size_t size);
256typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
257                               size_t alignment,
258                               size_t size);
259
260malloc_type g_old_malloc;
261calloc_type g_old_calloc;
262valloc_type g_old_valloc;
263free_type g_old_free;
264realloc_type g_old_realloc;
265memalign_type g_old_memalign;
266
267malloc_type g_old_malloc_purgeable;
268calloc_type g_old_calloc_purgeable;
269valloc_type g_old_valloc_purgeable;
270free_type g_old_free_purgeable;
271realloc_type g_old_realloc_purgeable;
272memalign_type g_old_memalign_purgeable;
273
274void* oom_killer_malloc(struct _malloc_zone_t* zone,
275                        size_t size) {
276#if ARCH_CPU_32_BITS
277  ScopedClearErrno clear_errno;
278#endif  // ARCH_CPU_32_BITS
279  void* result = g_old_malloc(zone, size);
280  if (!result && size)
281    debug::BreakDebugger();
282  return result;
283}
284
285void* oom_killer_calloc(struct _malloc_zone_t* zone,
286                        size_t num_items,
287                        size_t size) {
288#if ARCH_CPU_32_BITS
289  ScopedClearErrno clear_errno;
290#endif  // ARCH_CPU_32_BITS
291  void* result = g_old_calloc(zone, num_items, size);
292  if (!result && num_items && size)
293    debug::BreakDebugger();
294  return result;
295}
296
297void* oom_killer_valloc(struct _malloc_zone_t* zone,
298                        size_t size) {
299#if ARCH_CPU_32_BITS
300  ScopedClearErrno clear_errno;
301#endif  // ARCH_CPU_32_BITS
302  void* result = g_old_valloc(zone, size);
303  if (!result && size)
304    debug::BreakDebugger();
305  return result;
306}
307
308void oom_killer_free(struct _malloc_zone_t* zone,
309                     void* ptr) {
310#if ARCH_CPU_32_BITS
311  ScopedClearErrno clear_errno;
312#endif  // ARCH_CPU_32_BITS
313  g_old_free(zone, ptr);
314}
315
316void* oom_killer_realloc(struct _malloc_zone_t* zone,
317                         void* ptr,
318                         size_t size) {
319#if ARCH_CPU_32_BITS
320  ScopedClearErrno clear_errno;
321#endif  // ARCH_CPU_32_BITS
322  void* result = g_old_realloc(zone, ptr, size);
323  if (!result && size)
324    debug::BreakDebugger();
325  return result;
326}
327
328void* oom_killer_memalign(struct _malloc_zone_t* zone,
329                          size_t alignment,
330                          size_t size) {
331#if ARCH_CPU_32_BITS
332  ScopedClearErrno clear_errno;
333#endif  // ARCH_CPU_32_BITS
334  void* result = g_old_memalign(zone, alignment, size);
335  // Only die if posix_memalign would have returned ENOMEM, since there are
336  // other reasons why NULL might be returned (see
337  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
338  if (!result && size && alignment >= sizeof(void*)
339      && (alignment & (alignment - 1)) == 0) {
340    debug::BreakDebugger();
341  }
342  return result;
343}
344
345void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
346                                  size_t size) {
347#if ARCH_CPU_32_BITS
348  ScopedClearErrno clear_errno;
349#endif  // ARCH_CPU_32_BITS
350  void* result = g_old_malloc_purgeable(zone, size);
351  if (!result && size)
352    debug::BreakDebugger();
353  return result;
354}
355
356void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
357                                  size_t num_items,
358                                  size_t size) {
359#if ARCH_CPU_32_BITS
360  ScopedClearErrno clear_errno;
361#endif  // ARCH_CPU_32_BITS
362  void* result = g_old_calloc_purgeable(zone, num_items, size);
363  if (!result && num_items && size)
364    debug::BreakDebugger();
365  return result;
366}
367
368void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
369                                  size_t size) {
370#if ARCH_CPU_32_BITS
371  ScopedClearErrno clear_errno;
372#endif  // ARCH_CPU_32_BITS
373  void* result = g_old_valloc_purgeable(zone, size);
374  if (!result && size)
375    debug::BreakDebugger();
376  return result;
377}
378
379void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
380                               void* ptr) {
381#if ARCH_CPU_32_BITS
382  ScopedClearErrno clear_errno;
383#endif  // ARCH_CPU_32_BITS
384  g_old_free_purgeable(zone, ptr);
385}
386
387void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
388                                   void* ptr,
389                                   size_t size) {
390#if ARCH_CPU_32_BITS
391  ScopedClearErrno clear_errno;
392#endif  // ARCH_CPU_32_BITS
393  void* result = g_old_realloc_purgeable(zone, ptr, size);
394  if (!result && size)
395    debug::BreakDebugger();
396  return result;
397}
398
399void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
400                                    size_t alignment,
401                                    size_t size) {
402#if ARCH_CPU_32_BITS
403  ScopedClearErrno clear_errno;
404#endif  // ARCH_CPU_32_BITS
405  void* result = g_old_memalign_purgeable(zone, alignment, size);
406  // Only die if posix_memalign would have returned ENOMEM, since there are
407  // other reasons why NULL might be returned (see
408  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
409  if (!result && size && alignment >= sizeof(void*)
410      && (alignment & (alignment - 1)) == 0) {
411    debug::BreakDebugger();
412  }
413  return result;
414}
415
416// === C++ operator new ===
417
418void oom_killer_new() {
419  debug::BreakDebugger();
420}
421
422// === Core Foundation CFAllocators ===
423
424bool CanGetContextForCFAllocator() {
425  return !base::mac::IsOSLaterThanMavericks_DontCallThis();
426}
427
428CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
429  if (base::mac::IsOSSnowLeopard()) {
430    ChromeCFAllocatorLeopards* our_allocator =
431        const_cast<ChromeCFAllocatorLeopards*>(
432            reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
433    return &our_allocator->_context;
434  } else if (base::mac::IsOSLion() ||
435             base::mac::IsOSMountainLion() ||
436             base::mac::IsOSMavericks()) {
437    ChromeCFAllocatorLions* our_allocator =
438        const_cast<ChromeCFAllocatorLions*>(
439            reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
440    return &our_allocator->_context;
441  } else {
442    return NULL;
443  }
444}
445
446CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
447CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
448CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
449
450void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
451                                            CFOptionFlags hint,
452                                            void* info) {
453  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
454  if (!result)
455    debug::BreakDebugger();
456  return result;
457}
458
459void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
460                                    CFOptionFlags hint,
461                                    void* info) {
462  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
463  if (!result)
464    debug::BreakDebugger();
465  return result;
466}
467
468void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
469                                         CFOptionFlags hint,
470                                         void* info) {
471  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
472  if (!result)
473    debug::BreakDebugger();
474  return result;
475}
476
477// === Cocoa NSObject allocation ===
478
479typedef id (*allocWithZone_t)(id, SEL, NSZone*);
480allocWithZone_t g_old_allocWithZone;
481
482id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
483{
484  id result = g_old_allocWithZone(self, _cmd, zone);
485  if (!result)
486    debug::BreakDebugger();
487  return result;
488}
489
490}  // namespace
491
492void* UncheckedMalloc(size_t size) {
493  if (g_old_malloc) {
494#if ARCH_CPU_32_BITS
495    ScopedClearErrno clear_errno;
496    ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true);
497#endif  // ARCH_CPU_32_BITS
498    return g_old_malloc(malloc_default_zone(), size);
499  }
500  return malloc(size);
501}
502
503void* UncheckedCalloc(size_t num_items, size_t size) {
504  if (g_old_calloc) {
505#if ARCH_CPU_32_BITS
506    ScopedClearErrno clear_errno;
507    ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true);
508#endif  // ARCH_CPU_32_BITS
509    return g_old_calloc(malloc_default_zone(), num_items, size);
510  }
511  return calloc(num_items, size);
512}
513
514void EnableTerminationOnOutOfMemory() {
515  if (g_oom_killer_enabled)
516    return;
517
518  g_oom_killer_enabled = true;
519
520  // === C malloc/calloc/valloc/realloc/posix_memalign ===
521
522  // This approach is not perfect, as requests for amounts of memory larger than
523  // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
524  // still fail with a NULL rather than dying (see
525  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
526  // Unfortunately, it's the best we can do. Also note that this does not affect
527  // allocations from non-default zones.
528
529  CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
530        !g_old_memalign) << "Old allocators unexpectedly non-null";
531
532  CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
533        !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
534        !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
535
536#if !defined(ADDRESS_SANITIZER)
537  // Don't do anything special on OOM for the malloc zones replaced by
538  // AddressSanitizer, as modifying or protecting them may not work correctly.
539
540  ChromeMallocZone* default_zone =
541      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
542  ChromeMallocZone* purgeable_zone =
543      reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
544
545  mach_vm_address_t default_reprotection_start = 0;
546  mach_vm_size_t default_reprotection_length = 0;
547  vm_prot_t default_reprotection_value = VM_PROT_NONE;
548  DeprotectMallocZone(default_zone,
549                      &default_reprotection_start,
550                      &default_reprotection_length,
551                      &default_reprotection_value);
552
553  mach_vm_address_t purgeable_reprotection_start = 0;
554  mach_vm_size_t purgeable_reprotection_length = 0;
555  vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
556  if (purgeable_zone) {
557    DeprotectMallocZone(purgeable_zone,
558                        &purgeable_reprotection_start,
559                        &purgeable_reprotection_length,
560                        &purgeable_reprotection_value);
561  }
562
563  // Default zone
564
565  g_old_malloc = default_zone->malloc;
566  g_old_calloc = default_zone->calloc;
567  g_old_valloc = default_zone->valloc;
568  g_old_free = default_zone->free;
569  g_old_realloc = default_zone->realloc;
570  CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
571        g_old_realloc)
572      << "Failed to get system allocation functions.";
573
574  default_zone->malloc = oom_killer_malloc;
575  default_zone->calloc = oom_killer_calloc;
576  default_zone->valloc = oom_killer_valloc;
577  default_zone->free = oom_killer_free;
578  default_zone->realloc = oom_killer_realloc;
579
580  if (default_zone->version >= 5) {
581    g_old_memalign = default_zone->memalign;
582    if (g_old_memalign)
583      default_zone->memalign = oom_killer_memalign;
584  }
585
586  // Purgeable zone (if it exists)
587
588  if (purgeable_zone) {
589    g_old_malloc_purgeable = purgeable_zone->malloc;
590    g_old_calloc_purgeable = purgeable_zone->calloc;
591    g_old_valloc_purgeable = purgeable_zone->valloc;
592    g_old_free_purgeable = purgeable_zone->free;
593    g_old_realloc_purgeable = purgeable_zone->realloc;
594    CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
595          g_old_valloc_purgeable && g_old_free_purgeable &&
596          g_old_realloc_purgeable)
597        << "Failed to get system allocation functions.";
598
599    purgeable_zone->malloc = oom_killer_malloc_purgeable;
600    purgeable_zone->calloc = oom_killer_calloc_purgeable;
601    purgeable_zone->valloc = oom_killer_valloc_purgeable;
602    purgeable_zone->free = oom_killer_free_purgeable;
603    purgeable_zone->realloc = oom_killer_realloc_purgeable;
604
605    if (purgeable_zone->version >= 5) {
606      g_old_memalign_purgeable = purgeable_zone->memalign;
607      if (g_old_memalign_purgeable)
608        purgeable_zone->memalign = oom_killer_memalign_purgeable;
609    }
610  }
611
612  // Restore protection if it was active.
613
614  if (default_reprotection_start) {
615    kern_return_t result = mach_vm_protect(mach_task_self(),
616                                           default_reprotection_start,
617                                           default_reprotection_length,
618                                           false,
619                                           default_reprotection_value);
620    CHECK(result == KERN_SUCCESS);
621  }
622
623  if (purgeable_reprotection_start) {
624    kern_return_t result = mach_vm_protect(mach_task_self(),
625                                           purgeable_reprotection_start,
626                                           purgeable_reprotection_length,
627                                           false,
628                                           purgeable_reprotection_value);
629    CHECK(result == KERN_SUCCESS);
630  }
631#endif
632
633  // === C malloc_zone_batch_malloc ===
634
635  // batch_malloc is omitted because the default malloc zone's implementation
636  // only supports batch_malloc for "tiny" allocations from the free list. It
637  // will fail for allocations larger than "tiny", and will only allocate as
638  // many blocks as it's able to from the free list. These factors mean that it
639  // can return less than the requested memory even in a non-out-of-memory
640  // situation. There's no good way to detect whether a batch_malloc failure is
641  // due to these other factors, or due to genuine memory or address space
642  // exhaustion. The fact that it only allocates space from the "tiny" free list
643  // means that it's likely that a failure will not be due to memory exhaustion.
644  // Similarly, these constraints on batch_malloc mean that callers must always
645  // be expecting to receive less memory than was requested, even in situations
646  // where memory pressure is not a concern. Finally, the only public interface
647  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
648  // system's malloc implementation. It's unlikely that anyone's even heard of
649  // it.
650
651  // === C++ operator new ===
652
653  // Yes, operator new does call through to malloc, but this will catch failures
654  // that our imperfect handling of malloc cannot.
655
656  std::set_new_handler(oom_killer_new);
657
658#ifndef ADDRESS_SANITIZER
659  // === Core Foundation CFAllocators ===
660
661  // This will not catch allocation done by custom allocators, but will catch
662  // all allocation done by system-provided ones.
663
664  CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
665        !g_old_cfallocator_malloc_zone)
666      << "Old allocators unexpectedly non-null";
667
668  bool cf_allocator_internals_known = CanGetContextForCFAllocator();
669
670  if (cf_allocator_internals_known) {
671    CFAllocatorContext* context =
672        ContextForCFAllocator(kCFAllocatorSystemDefault);
673    CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
674    g_old_cfallocator_system_default = context->allocate;
675    CHECK(g_old_cfallocator_system_default)
676        << "Failed to get kCFAllocatorSystemDefault allocation function.";
677    context->allocate = oom_killer_cfallocator_system_default;
678
679    context = ContextForCFAllocator(kCFAllocatorMalloc);
680    CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
681    g_old_cfallocator_malloc = context->allocate;
682    CHECK(g_old_cfallocator_malloc)
683        << "Failed to get kCFAllocatorMalloc allocation function.";
684    context->allocate = oom_killer_cfallocator_malloc;
685
686    context = ContextForCFAllocator(kCFAllocatorMallocZone);
687    CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
688    g_old_cfallocator_malloc_zone = context->allocate;
689    CHECK(g_old_cfallocator_malloc_zone)
690        << "Failed to get kCFAllocatorMallocZone allocation function.";
691    context->allocate = oom_killer_cfallocator_malloc_zone;
692  } else {
693    NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
694        "CFAllocator will not result in termination. http://crbug.com/45650");
695  }
696#endif
697
698  // === Cocoa NSObject allocation ===
699
700  // Note that both +[NSObject new] and +[NSObject alloc] call through to
701  // +[NSObject allocWithZone:].
702
703  CHECK(!g_old_allocWithZone)
704      << "Old allocator unexpectedly non-null";
705
706  Class nsobject_class = [NSObject class];
707  Method orig_method = class_getClassMethod(nsobject_class,
708                                            @selector(allocWithZone:));
709  g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
710      method_getImplementation(orig_method));
711  CHECK(g_old_allocWithZone)
712      << "Failed to get allocWithZone allocation function.";
713  method_setImplementation(orig_method,
714                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
715}
716
717}  // namespace base
718