process_util_mac.mm revision 731df977c0511bca2206b5f333555b1205ff1f43
1// Copyright (c) 2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5
6#include "base/process_util.h"
7
8#import <Cocoa/Cocoa.h>
9#include <crt_externs.h>
10#include <dlfcn.h>
11#include <mach/mach.h>
12#include <mach/mach_init.h>
13#include <mach/task.h>
14#include <malloc/malloc.h>
15#import <objc/runtime.h>
16#include <spawn.h>
17#include <sys/mman.h>
18#include <sys/sysctl.h>
19#include <sys/types.h>
20#include <sys/utsname.h>
21#include <sys/wait.h>
22
23#include <new>
24#include <string>
25
26#include "base/debug_util.h"
27#include "base/eintr_wrapper.h"
28#include "base/logging.h"
29#include "base/string_util.h"
30#include "base/sys_info.h"
31#include "base/sys_string_conversions.h"
32#include "base/time.h"
33#include "third_party/apple_apsl/CFBase.h"
34#include "third_party/apple_apsl/malloc.h"
35
36namespace base {
37
38void RestoreDefaultExceptionHandler() {
39  // This function is tailored to remove the Breakpad exception handler.
40  // exception_mask matches s_exception_mask in
41  // breakpad/src/client/mac/handler/exception_handler.cc
42  const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
43                                          EXC_MASK_BAD_INSTRUCTION |
44                                          EXC_MASK_ARITHMETIC |
45                                          EXC_MASK_BREAKPOINT;
46
47  // Setting the exception port to MACH_PORT_NULL may not be entirely
48  // kosher to restore the default exception handler, but in practice,
49  // it results in the exception port being set to Apple Crash Reporter,
50  // the desired behavior.
51  task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
52                           EXCEPTION_DEFAULT, THREAD_STATE_NONE);
53}
54
55ProcessIterator::ProcessIterator(const ProcessFilter* filter)
56    : index_of_kinfo_proc_(0),
57      filter_(filter) {
58  // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
59  // but trying to find where we were in a constantly changing list is basically
60  // impossible.
61
62  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, geteuid() };
63
64  // Since more processes could start between when we get the size and when
65  // we get the list, we do a loop to keep trying until we get it.
66  bool done = false;
67  int try_num = 1;
68  const int max_tries = 10;
69  do {
70    // Get the size of the buffer
71    size_t len = 0;
72    if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
73      LOG(ERROR) << "failed to get the size needed for the process list";
74      kinfo_procs_.resize(0);
75      done = true;
76    } else {
77      size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
78      // Leave some spare room for process table growth (more could show up
79      // between when we check and now)
80      num_of_kinfo_proc += 16;
81      kinfo_procs_.resize(num_of_kinfo_proc);
82      len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
83      // Load the list of processes
84      if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
85        // If we get a mem error, it just means we need a bigger buffer, so
86        // loop around again.  Anything else is a real error and give up.
87        if (errno != ENOMEM) {
88          LOG(ERROR) << "failed to get the process list";
89          kinfo_procs_.resize(0);
90          done = true;
91        }
92      } else {
93        // Got the list, just make sure we're sized exactly right
94        size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
95        kinfo_procs_.resize(num_of_kinfo_proc);
96        done = true;
97      }
98    }
99  } while (!done && (try_num++ < max_tries));
100
101  if (!done) {
102    LOG(ERROR) << "failed to collect the process list in a few tries";
103    kinfo_procs_.resize(0);
104  }
105}
106
107ProcessIterator::~ProcessIterator() {
108}
109
110bool ProcessIterator::CheckForNextProcess() {
111  std::string data;
112  for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
113    kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
114
115    // Skip processes just awaiting collection
116    if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
117      continue;
118
119    int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
120
121    // Find out what size buffer we need.
122    size_t data_len = 0;
123    if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
124      LOG(ERROR) << "failed to figure out the buffer size for a commandline";
125      continue;
126    }
127
128    data.resize(data_len);
129    if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
130      LOG(ERROR) << "failed to fetch a commandline";
131      continue;
132    }
133
134    // |data| contains all the command line parameters of the process, separated
135    // by blocks of one or more null characters. We tokenize |data| into a
136    // vector of strings using '\0' as a delimiter and populate
137    // |entry_.cmd_line_args_|.
138    std::string delimiters;
139    delimiters.push_back('\0');
140    Tokenize(data, delimiters, &entry_.cmd_line_args_);
141
142    // |data| starts with the full executable path followed by a null character.
143    // We search for the first instance of '\0' and extract everything before it
144    // to populate |entry_.exe_file_|.
145    size_t exec_name_end = data.find('\0');
146    if (exec_name_end == std::string::npos) {
147      LOG(ERROR) << "command line data didn't match expected format";
148      continue;
149    }
150
151    entry_.pid_ = kinfo.kp_proc.p_pid;
152    entry_.ppid_ = kinfo.kp_eproc.e_ppid;
153    entry_.gid_ = kinfo.kp_eproc.e_pgid;
154    size_t last_slash = data.rfind('/', exec_name_end);
155    if (last_slash == std::string::npos)
156      entry_.exe_file_.assign(data, 0, exec_name_end);
157    else
158      entry_.exe_file_.assign(data, last_slash + 1,
159                              exec_name_end - last_slash - 1);
160    // Start w/ the next entry next time through
161    ++index_of_kinfo_proc_;
162    // Done
163    return true;
164  }
165  return false;
166}
167
168bool NamedProcessIterator::IncludeEntry() {
169  return (base::SysWideToUTF8(executable_name_) == entry().exe_file() &&
170          ProcessIterator::IncludeEntry());
171}
172
173
174// ------------------------------------------------------------------------
175// NOTE: about ProcessMetrics
176//
177// Getting a mach task from a pid for another process requires permissions in
178// general, so there doesn't really seem to be a way to do these (and spinning
179// up ps to fetch each stats seems dangerous to put in a base api for anyone to
180// call). Child processes ipc their port, so return something if available,
181// otherwise return 0.
182//
183
184ProcessMetrics::ProcessMetrics(ProcessHandle process,
185                               ProcessMetrics::PortProvider* port_provider)
186    : process_(process),
187      last_time_(0),
188      last_system_time_(0),
189      port_provider_(port_provider) {
190  processor_count_ = base::SysInfo::NumberOfProcessors();
191}
192
193// static
194ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
195    ProcessHandle process,
196    ProcessMetrics::PortProvider* port_provider) {
197  return new ProcessMetrics(process, port_provider);
198}
199
200bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
201  return false;
202}
203
204static bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
205  if (task == MACH_PORT_NULL)
206    return false;
207  mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
208  kern_return_t kr = task_info(task,
209                               TASK_BASIC_INFO_64,
210                               reinterpret_cast<task_info_t>(task_info_data),
211                               &count);
212  // Most likely cause for failure: |task| is a zombie.
213  return kr == KERN_SUCCESS;
214}
215
216size_t ProcessMetrics::GetPagefileUsage() const {
217  task_basic_info_64 task_info_data;
218  if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
219    return 0;
220  return task_info_data.virtual_size;
221}
222
223size_t ProcessMetrics::GetPeakPagefileUsage() const {
224  return 0;
225}
226
227size_t ProcessMetrics::GetWorkingSetSize() const {
228  task_basic_info_64 task_info_data;
229  if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
230    return 0;
231  return task_info_data.resident_size;
232}
233
234size_t ProcessMetrics::GetPeakWorkingSetSize() const {
235  return 0;
236}
237
238// OSX appears to use a different system to get its memory.
239bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
240                                    size_t* shared_bytes) {
241  if (private_bytes)
242    *private_bytes = 0;
243  if (shared_bytes)
244    *shared_bytes = 0;
245  return true;
246}
247
248void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
249}
250
251bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
252  size_t priv = GetWorkingSetSize();
253  if (!priv)
254    return false;
255  ws_usage->priv = priv / 1024;
256  ws_usage->shareable = 0;
257  ws_usage->shared = 0;
258  return true;
259}
260
261#define TIME_VALUE_TO_TIMEVAL(a, r) do {  \
262  (r)->tv_sec = (a)->seconds;             \
263  (r)->tv_usec = (a)->microseconds;       \
264} while (0)
265
266double ProcessMetrics::GetCPUUsage() {
267  mach_port_t task = TaskForPid(process_);
268  if (task == MACH_PORT_NULL)
269    return 0;
270
271  kern_return_t kr;
272
273  // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
274  // in libtop.c), but this is more concise and gives the same results:
275  task_thread_times_info thread_info_data;
276  mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
277  kr = task_info(task,
278                 TASK_THREAD_TIMES_INFO,
279                 reinterpret_cast<task_info_t>(&thread_info_data),
280                 &thread_info_count);
281  if (kr != KERN_SUCCESS) {
282    // Most likely cause: |task| is a zombie.
283    return 0;
284  }
285
286  task_basic_info_64 task_info_data;
287  if (!GetTaskInfo(task, &task_info_data))
288    return 0;
289
290  /* Set total_time. */
291  // thread info contains live time...
292  struct timeval user_timeval, system_timeval, task_timeval;
293  TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
294  TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
295  timeradd(&user_timeval, &system_timeval, &task_timeval);
296
297  // ... task info contains terminated time.
298  TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
299  TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
300  timeradd(&user_timeval, &task_timeval, &task_timeval);
301  timeradd(&system_timeval, &task_timeval, &task_timeval);
302
303  struct timeval now;
304  int retval = gettimeofday(&now, NULL);
305  if (retval)
306    return 0;
307
308  int64 time = TimeValToMicroseconds(now);
309  int64 task_time = TimeValToMicroseconds(task_timeval);
310
311  if ((last_system_time_ == 0) || (last_time_ == 0)) {
312    // First call, just set the last values.
313    last_system_time_ = task_time;
314    last_time_ = time;
315    return 0;
316  }
317
318  int64 system_time_delta = task_time - last_system_time_;
319  int64 time_delta = time - last_time_;
320  DCHECK(time_delta != 0);
321  if (time_delta == 0)
322    return 0;
323
324  // We add time_delta / 2 so the result is rounded.
325  double cpu = static_cast<double>((system_time_delta * 100.0) / time_delta);
326
327  last_system_time_ = task_time;
328  last_time_ = time;
329
330  return cpu;
331}
332
333mach_port_t ProcessMetrics::TaskForPid(ProcessHandle process) const {
334  mach_port_t task = MACH_PORT_NULL;
335  if (port_provider_)
336    task = port_provider_->TaskForPid(process_);
337  if (task == MACH_PORT_NULL && process_ == getpid())
338    task = mach_task_self();
339  return task;
340}
341
342// ------------------------------------------------------------------------
343
344// Bytes committed by the system.
345size_t GetSystemCommitCharge() {
346  host_name_port_t host = mach_host_self();
347  mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
348  vm_statistics_data_t data;
349  kern_return_t kr = host_statistics(host, HOST_VM_INFO,
350                                     reinterpret_cast<host_info_t>(&data),
351                                     &count);
352  if (kr) {
353    LOG(WARNING) << "Failed to fetch host statistics.";
354    return 0;
355  }
356
357  vm_size_t page_size;
358  kr = host_page_size(host, &page_size);
359  if (kr) {
360    LOG(ERROR) << "Failed to fetch host page size.";
361    return 0;
362  }
363
364  return (data.active_count * page_size) / 1024;
365}
366
367// ------------------------------------------------------------------------
368
369namespace {
370
371bool g_oom_killer_enabled;
372
373// === C malloc/calloc/valloc/realloc/posix_memalign ===
374
375typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
376                             size_t size);
377typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
378                             size_t num_items,
379                             size_t size);
380typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
381                             size_t size);
382typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
383                              void* ptr,
384                              size_t size);
385typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
386                               size_t alignment,
387                               size_t size);
388
389malloc_type g_old_malloc;
390calloc_type g_old_calloc;
391valloc_type g_old_valloc;
392realloc_type g_old_realloc;
393memalign_type g_old_memalign;
394
395malloc_type g_old_malloc_purgeable;
396calloc_type g_old_calloc_purgeable;
397valloc_type g_old_valloc_purgeable;
398realloc_type g_old_realloc_purgeable;
399memalign_type g_old_memalign_purgeable;
400
401void* oom_killer_malloc(struct _malloc_zone_t* zone,
402                        size_t size) {
403  void* result = g_old_malloc(zone, size);
404  if (!result && size)
405    DebugUtil::BreakDebugger();
406  return result;
407}
408
409void* oom_killer_calloc(struct _malloc_zone_t* zone,
410                        size_t num_items,
411                        size_t size) {
412  void* result = g_old_calloc(zone, num_items, size);
413  if (!result && num_items && size)
414    DebugUtil::BreakDebugger();
415  return result;
416}
417
418void* oom_killer_valloc(struct _malloc_zone_t* zone,
419                        size_t size) {
420  void* result = g_old_valloc(zone, size);
421  if (!result && size)
422    DebugUtil::BreakDebugger();
423  return result;
424}
425
426void* oom_killer_realloc(struct _malloc_zone_t* zone,
427                         void* ptr,
428                         size_t size) {
429  void* result = g_old_realloc(zone, ptr, size);
430  if (!result && size)
431    DebugUtil::BreakDebugger();
432  return result;
433}
434
435void* oom_killer_memalign(struct _malloc_zone_t* zone,
436                          size_t alignment,
437                          size_t size) {
438  void* result = g_old_memalign(zone, alignment, size);
439  // Only die if posix_memalign would have returned ENOMEM, since there are
440  // other reasons why NULL might be returned (see
441  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
442  if (!result && size && alignment >= sizeof(void*)
443      && (alignment & (alignment - 1)) == 0) {
444    DebugUtil::BreakDebugger();
445  }
446  return result;
447}
448
449void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
450                                  size_t size) {
451  void* result = g_old_malloc_purgeable(zone, size);
452  if (!result && size)
453    DebugUtil::BreakDebugger();
454  return result;
455}
456
457void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
458                                  size_t num_items,
459                                  size_t size) {
460  void* result = g_old_calloc_purgeable(zone, num_items, size);
461  if (!result && num_items && size)
462    DebugUtil::BreakDebugger();
463  return result;
464}
465
466void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
467                                  size_t size) {
468  void* result = g_old_valloc_purgeable(zone, size);
469  if (!result && size)
470    DebugUtil::BreakDebugger();
471  return result;
472}
473
474void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
475                                   void* ptr,
476                                   size_t size) {
477  void* result = g_old_realloc_purgeable(zone, ptr, size);
478  if (!result && size)
479    DebugUtil::BreakDebugger();
480  return result;
481}
482
483void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
484                                    size_t alignment,
485                                    size_t size) {
486  void* result = g_old_memalign_purgeable(zone, alignment, size);
487  // Only die if posix_memalign would have returned ENOMEM, since there are
488  // other reasons why NULL might be returned (see
489  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
490  if (!result && size && alignment >= sizeof(void*)
491      && (alignment & (alignment - 1)) == 0) {
492    DebugUtil::BreakDebugger();
493  }
494  return result;
495}
496
497// === C++ operator new ===
498
499void oom_killer_new() {
500  DebugUtil::BreakDebugger();
501}
502
503// === Core Foundation CFAllocators ===
504
505typedef ChromeCFAllocator* ChromeCFAllocatorRef;
506
507CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
508CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
509CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
510
511void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
512                                            CFOptionFlags hint,
513                                            void* info) {
514  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
515  if (!result)
516    DebugUtil::BreakDebugger();
517  return result;
518}
519
520void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
521                                    CFOptionFlags hint,
522                                    void* info) {
523  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
524  if (!result)
525    DebugUtil::BreakDebugger();
526  return result;
527}
528
529void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
530                                         CFOptionFlags hint,
531                                         void* info) {
532  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
533  if (!result)
534    DebugUtil::BreakDebugger();
535  return result;
536}
537
538// === Cocoa NSObject allocation ===
539
540typedef id (*allocWithZone_t)(id, SEL, NSZone*);
541allocWithZone_t g_old_allocWithZone;
542
543id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
544{
545  id result = g_old_allocWithZone(self, _cmd, zone);
546  if (!result)
547    DebugUtil::BreakDebugger();
548  return result;
549}
550
551}  // namespace
552
553malloc_zone_t* GetPurgeableZone() {
554  // malloc_default_purgeable_zone only exists on >= 10.6. Use dlsym to grab it
555  // at runtime because it may not be present in the SDK used for compilation.
556  typedef malloc_zone_t* (*malloc_default_purgeable_zone_t)(void);
557  malloc_default_purgeable_zone_t malloc_purgeable_zone =
558      reinterpret_cast<malloc_default_purgeable_zone_t>(
559          dlsym(RTLD_DEFAULT, "malloc_default_purgeable_zone"));
560  if (malloc_purgeable_zone)
561    return malloc_purgeable_zone();
562  return NULL;
563}
564
565void EnableTerminationOnOutOfMemory() {
566  if (g_oom_killer_enabled)
567    return;
568
569  g_oom_killer_enabled = true;
570
571  // Not SysInfo::OperatingSystemVersionNumbers as that calls through to Gestalt
572  // which ends up (on > 10.6) spawning threads.
573  struct utsname machine_info;
574  if (uname(&machine_info)) {
575    return;
576  }
577
578  // The string machine_info.release is the xnu/Darwin version number, "9.xxx"
579  // on Mac OS X 10.5, and "10.xxx" on Mac OS X 10.6. See
580  // http://en.wikipedia.org/wiki/Darwin_(operating_system) .
581  long darwin_version = strtol(machine_info.release, NULL, 10);
582
583  // === C malloc/calloc/valloc/realloc/posix_memalign ===
584
585  // This approach is not perfect, as requests for amounts of memory larger than
586  // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
587  // still fail with a NULL rather than dying (see
588  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
589  // Unfortunately, it's the best we can do. Also note that this does not affect
590  // allocations from non-default zones.
591
592  CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
593        !g_old_memalign) << "Old allocators unexpectedly non-null";
594
595  CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
596        !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
597        !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
598
599  // See http://trac.webkit.org/changeset/53362/trunk/WebKitTools/DumpRenderTree/mac
600  bool zone_allocators_protected = darwin_version > 10;
601
602  ChromeMallocZone* default_zone =
603      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
604  ChromeMallocZone* purgeable_zone =
605      reinterpret_cast<ChromeMallocZone*>(GetPurgeableZone());
606
607  vm_address_t page_start_default = NULL;
608  vm_address_t page_start_purgeable = NULL;
609  vm_size_t len_default = 0;
610  vm_size_t len_purgeable = 0;
611  if (zone_allocators_protected) {
612    page_start_default = reinterpret_cast<vm_address_t>(default_zone) &
613        static_cast<vm_size_t>(~(getpagesize() - 1));
614    len_default = reinterpret_cast<vm_address_t>(default_zone) -
615        page_start_default + sizeof(ChromeMallocZone);
616    mprotect(reinterpret_cast<void*>(page_start_default), len_default,
617             PROT_READ | PROT_WRITE);
618
619    if (purgeable_zone) {
620      page_start_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) &
621          static_cast<vm_size_t>(~(getpagesize() - 1));
622      len_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) -
623          page_start_purgeable + sizeof(ChromeMallocZone);
624      mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
625               PROT_READ | PROT_WRITE);
626    }
627  }
628
629  // Default zone
630
631  g_old_malloc = default_zone->malloc;
632  g_old_calloc = default_zone->calloc;
633  g_old_valloc = default_zone->valloc;
634  g_old_realloc = default_zone->realloc;
635  CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_realloc)
636      << "Failed to get system allocation functions.";
637
638  default_zone->malloc = oom_killer_malloc;
639  default_zone->calloc = oom_killer_calloc;
640  default_zone->valloc = oom_killer_valloc;
641  default_zone->realloc = oom_killer_realloc;
642
643  if (default_zone->version >= 5) {
644    g_old_memalign = default_zone->memalign;
645    if (g_old_memalign)
646      default_zone->memalign = oom_killer_memalign;
647  }
648
649  // Purgeable zone (if it exists)
650
651  if (purgeable_zone) {
652    g_old_malloc_purgeable = purgeable_zone->malloc;
653    g_old_calloc_purgeable = purgeable_zone->calloc;
654    g_old_valloc_purgeable = purgeable_zone->valloc;
655    g_old_realloc_purgeable = purgeable_zone->realloc;
656    CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
657          g_old_valloc_purgeable && g_old_realloc_purgeable)
658        << "Failed to get system allocation functions.";
659
660    purgeable_zone->malloc = oom_killer_malloc_purgeable;
661    purgeable_zone->calloc = oom_killer_calloc_purgeable;
662    purgeable_zone->valloc = oom_killer_valloc_purgeable;
663    purgeable_zone->realloc = oom_killer_realloc_purgeable;
664
665    if (purgeable_zone->version >= 5) {
666      g_old_memalign_purgeable = purgeable_zone->memalign;
667      if (g_old_memalign_purgeable)
668        purgeable_zone->memalign = oom_killer_memalign_purgeable;
669    }
670  }
671
672  if (zone_allocators_protected) {
673    mprotect(reinterpret_cast<void*>(page_start_default), len_default,
674             PROT_READ);
675    if (purgeable_zone) {
676      mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
677               PROT_READ);
678    }
679  }
680
681  // === C malloc_zone_batch_malloc ===
682
683  // batch_malloc is omitted because the default malloc zone's implementation
684  // only supports batch_malloc for "tiny" allocations from the free list. It
685  // will fail for allocations larger than "tiny", and will only allocate as
686  // many blocks as it's able to from the free list. These factors mean that it
687  // can return less than the requested memory even in a non-out-of-memory
688  // situation. There's no good way to detect whether a batch_malloc failure is
689  // due to these other factors, or due to genuine memory or address space
690  // exhaustion. The fact that it only allocates space from the "tiny" free list
691  // means that it's likely that a failure will not be due to memory exhaustion.
692  // Similarly, these constraints on batch_malloc mean that callers must always
693  // be expecting to receive less memory than was requested, even in situations
694  // where memory pressure is not a concern. Finally, the only public interface
695  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
696  // system's malloc implementation. It's unlikely that anyone's even heard of
697  // it.
698
699  // === C++ operator new ===
700
701  // Yes, operator new does call through to malloc, but this will catch failures
702  // that our imperfect handling of malloc cannot.
703
704  std::set_new_handler(oom_killer_new);
705
706  // === Core Foundation CFAllocators ===
707
708  // This will not catch allocation done by custom allocators, but will catch
709  // all allocation done by system-provided ones.
710
711  CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
712        !g_old_cfallocator_malloc_zone)
713      << "Old allocators unexpectedly non-null";
714
715  bool cf_allocator_internals_known =
716      darwin_version == 9 || darwin_version == 10;
717
718  if (cf_allocator_internals_known) {
719    ChromeCFAllocatorRef allocator = const_cast<ChromeCFAllocatorRef>(
720        reinterpret_cast<const ChromeCFAllocator*>(kCFAllocatorSystemDefault));
721    g_old_cfallocator_system_default = allocator->_context.allocate;
722    CHECK(g_old_cfallocator_system_default)
723        << "Failed to get kCFAllocatorSystemDefault allocation function.";
724    allocator->_context.allocate = oom_killer_cfallocator_system_default;
725
726    allocator = const_cast<ChromeCFAllocatorRef>(
727        reinterpret_cast<const ChromeCFAllocator*>(kCFAllocatorMalloc));
728    g_old_cfallocator_malloc = allocator->_context.allocate;
729    CHECK(g_old_cfallocator_malloc)
730        << "Failed to get kCFAllocatorMalloc allocation function.";
731    allocator->_context.allocate = oom_killer_cfallocator_malloc;
732
733    allocator = const_cast<ChromeCFAllocatorRef>(
734        reinterpret_cast<const ChromeCFAllocator*>(kCFAllocatorMallocZone));
735    g_old_cfallocator_malloc_zone = allocator->_context.allocate;
736    CHECK(g_old_cfallocator_malloc_zone)
737        << "Failed to get kCFAllocatorMallocZone allocation function.";
738    allocator->_context.allocate = oom_killer_cfallocator_malloc_zone;
739  } else {
740    NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
741        "CFAllocator will not result in termination. http://crbug.com/45650");
742  }
743
744  // === Cocoa NSObject allocation ===
745
746  // Note that both +[NSObject new] and +[NSObject alloc] call through to
747  // +[NSObject allocWithZone:].
748
749  CHECK(!g_old_allocWithZone)
750      << "Old allocator unexpectedly non-null";
751
752  Class nsobject_class = [NSObject class];
753  Method orig_method = class_getClassMethod(nsobject_class,
754                                            @selector(allocWithZone:));
755  g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
756      method_getImplementation(orig_method));
757  CHECK(g_old_allocWithZone)
758      << "Failed to get allocWithZone allocation function.";
759  method_setImplementation(orig_method,
760                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
761}
762
763}  // namespace base
764