process_util_mac.mm revision c407dc5cd9bdc5668497f21b26b09d988ab439de
1// Copyright (c) 2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5
6#include "base/process_util.h"
7
8#import <Cocoa/Cocoa.h>
9#include <crt_externs.h>
10#include <dlfcn.h>
11#include <mach/mach.h>
12#include <mach/mach_init.h>
13#include <mach/task.h>
14#include <malloc/malloc.h>
15#import <objc/runtime.h>
16#include <spawn.h>
17#include <sys/mman.h>
18#include <sys/sysctl.h>
19#include <sys/types.h>
20#include <sys/wait.h>
21
22#include <new>
23#include <string>
24
25#include "base/debug_util.h"
26#include "base/eintr_wrapper.h"
27#include "base/logging.h"
28#include "base/string_util.h"
29#include "base/sys_info.h"
30#include "base/sys_string_conversions.h"
31#include "base/time.h"
32
33namespace base {
34
35void RestoreDefaultExceptionHandler() {
36  // This function is tailored to remove the Breakpad exception handler.
37  // exception_mask matches s_exception_mask in
38  // breakpad/src/client/mac/handler/exception_handler.cc
39  const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
40                                          EXC_MASK_BAD_INSTRUCTION |
41                                          EXC_MASK_ARITHMETIC |
42                                          EXC_MASK_BREAKPOINT;
43
44  // Setting the exception port to MACH_PORT_NULL may not be entirely
45  // kosher to restore the default exception handler, but in practice,
46  // it results in the exception port being set to Apple Crash Reporter,
47  // the desired behavior.
48  task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
49                           EXCEPTION_DEFAULT, THREAD_STATE_NONE);
50}
51
52ProcessIterator::ProcessIterator(const ProcessFilter* filter)
53    : index_of_kinfo_proc_(0),
54      filter_(filter) {
55  // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
56  // but trying to find where we were in a constantly changing list is basically
57  // impossible.
58
59  int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, geteuid() };
60
61  // Since more processes could start between when we get the size and when
62  // we get the list, we do a loop to keep trying until we get it.
63  bool done = false;
64  int try_num = 1;
65  const int max_tries = 10;
66  do {
67    // Get the size of the buffer
68    size_t len = 0;
69    if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
70      LOG(ERROR) << "failed to get the size needed for the process list";
71      kinfo_procs_.resize(0);
72      done = true;
73    } else {
74      size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
75      // Leave some spare room for process table growth (more could show up
76      // between when we check and now)
77      num_of_kinfo_proc += 16;
78      kinfo_procs_.resize(num_of_kinfo_proc);
79      len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
80      // Load the list of processes
81      if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
82        // If we get a mem error, it just means we need a bigger buffer, so
83        // loop around again.  Anything else is a real error and give up.
84        if (errno != ENOMEM) {
85          LOG(ERROR) << "failed to get the process list";
86          kinfo_procs_.resize(0);
87          done = true;
88        }
89      } else {
90        // Got the list, just make sure we're sized exactly right
91        size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
92        kinfo_procs_.resize(num_of_kinfo_proc);
93        done = true;
94      }
95    }
96  } while (!done && (try_num++ < max_tries));
97
98  if (!done) {
99    LOG(ERROR) << "failed to collect the process list in a few tries";
100    kinfo_procs_.resize(0);
101  }
102}
103
104ProcessIterator::~ProcessIterator() {
105}
106
107bool ProcessIterator::CheckForNextProcess() {
108  std::string data;
109  for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
110    kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
111
112    // Skip processes just awaiting collection
113    if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
114      continue;
115
116    int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
117
118    // Find out what size buffer we need.
119    size_t data_len = 0;
120    if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
121      LOG(ERROR) << "failed to figure out the buffer size for a commandline";
122      continue;
123    }
124
125    data.resize(data_len);
126    if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
127      LOG(ERROR) << "failed to fetch a commandline";
128      continue;
129    }
130
131    // Data starts w/ the full path null termed, so we have to extract just the
132    // executable name from the path.
133
134    size_t exec_name_end = data.find('\0');
135    if (exec_name_end == std::string::npos) {
136      LOG(ERROR) << "command line data didn't match expected format";
137      continue;
138    }
139    entry_.pid_ = kinfo.kp_proc.p_pid;
140    entry_.ppid_ = kinfo.kp_eproc.e_ppid;
141    entry_.gid_ = kinfo.kp_eproc.e_pgid;
142    size_t last_slash = data.rfind('/', exec_name_end);
143    if (last_slash == std::string::npos)
144      entry_.exe_file_.assign(data, 0, exec_name_end);
145    else
146      entry_.exe_file_.assign(data, last_slash + 1,
147                              exec_name_end - last_slash - 1);
148    // Start w/ the next entry next time through
149    ++index_of_kinfo_proc_;
150    // Done
151    return true;
152  }
153  return false;
154}
155
156bool NamedProcessIterator::IncludeEntry() {
157  return (base::SysWideToUTF8(executable_name_) == entry().exe_file() &&
158          ProcessIterator::IncludeEntry());
159}
160
161
162// ------------------------------------------------------------------------
163// NOTE: about ProcessMetrics
164//
165// Getting a mach task from a pid for another process requires permissions in
166// general, so there doesn't really seem to be a way to do these (and spinning
167// up ps to fetch each stats seems dangerous to put in a base api for anyone to
168// call). Child processes ipc their port, so return something if available,
169// otherwise return 0.
170//
171
172ProcessMetrics::ProcessMetrics(ProcessHandle process,
173                               ProcessMetrics::PortProvider* port_provider)
174    : process_(process),
175      last_time_(0),
176      last_system_time_(0),
177      port_provider_(port_provider) {
178  processor_count_ = base::SysInfo::NumberOfProcessors();
179}
180
181// static
182ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
183    ProcessHandle process,
184    ProcessMetrics::PortProvider* port_provider) {
185  return new ProcessMetrics(process, port_provider);
186}
187
188bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
189  return false;
190}
191
192static bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
193  if (task == MACH_PORT_NULL)
194    return false;
195  mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
196  kern_return_t kr = task_info(task,
197                               TASK_BASIC_INFO_64,
198                               reinterpret_cast<task_info_t>(task_info_data),
199                               &count);
200  // Most likely cause for failure: |task| is a zombie.
201  return kr == KERN_SUCCESS;
202}
203
204size_t ProcessMetrics::GetPagefileUsage() const {
205  task_basic_info_64 task_info_data;
206  if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
207    return 0;
208  return task_info_data.virtual_size;
209}
210
211size_t ProcessMetrics::GetPeakPagefileUsage() const {
212  return 0;
213}
214
215size_t ProcessMetrics::GetWorkingSetSize() const {
216  task_basic_info_64 task_info_data;
217  if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
218    return 0;
219  return task_info_data.resident_size;
220}
221
222size_t ProcessMetrics::GetPeakWorkingSetSize() const {
223  return 0;
224}
225
226// OSX appears to use a different system to get its memory.
227bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
228                                    size_t* shared_bytes) {
229  if (private_bytes)
230    *private_bytes = 0;
231  if (shared_bytes)
232    *shared_bytes = 0;
233  return true;
234}
235
236void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
237}
238
239bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
240  size_t priv = GetWorkingSetSize();
241  if (!priv)
242    return false;
243  ws_usage->priv = priv / 1024;
244  ws_usage->shareable = 0;
245  ws_usage->shared = 0;
246  return true;
247}
248
249#define TIME_VALUE_TO_TIMEVAL(a, r) do {  \
250  (r)->tv_sec = (a)->seconds;             \
251  (r)->tv_usec = (a)->microseconds;       \
252} while (0)
253
254double ProcessMetrics::GetCPUUsage() {
255  mach_port_t task = TaskForPid(process_);
256  if (task == MACH_PORT_NULL)
257    return 0;
258
259  kern_return_t kr;
260
261  // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
262  // in libtop.c), but this is more concise and gives the same results:
263  task_thread_times_info thread_info_data;
264  mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
265  kr = task_info(task,
266                 TASK_THREAD_TIMES_INFO,
267                 reinterpret_cast<task_info_t>(&thread_info_data),
268                 &thread_info_count);
269  if (kr != KERN_SUCCESS) {
270    // Most likely cause: |task| is a zombie.
271    return 0;
272  }
273
274  task_basic_info_64 task_info_data;
275  if (!GetTaskInfo(task, &task_info_data))
276    return 0;
277
278  /* Set total_time. */
279  // thread info contains live time...
280  struct timeval user_timeval, system_timeval, task_timeval;
281  TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
282  TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
283  timeradd(&user_timeval, &system_timeval, &task_timeval);
284
285  // ... task info contains terminated time.
286  TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
287  TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
288  timeradd(&user_timeval, &task_timeval, &task_timeval);
289  timeradd(&system_timeval, &task_timeval, &task_timeval);
290
291  struct timeval now;
292  int retval = gettimeofday(&now, NULL);
293  if (retval)
294    return 0;
295
296  int64 time = TimeValToMicroseconds(now);
297  int64 task_time = TimeValToMicroseconds(task_timeval);
298
299  if ((last_system_time_ == 0) || (last_time_ == 0)) {
300    // First call, just set the last values.
301    last_system_time_ = task_time;
302    last_time_ = time;
303    return 0;
304  }
305
306  int64 system_time_delta = task_time - last_system_time_;
307  int64 time_delta = time - last_time_;
308  DCHECK(time_delta != 0);
309  if (time_delta == 0)
310    return 0;
311
312  // We add time_delta / 2 so the result is rounded.
313  double cpu = static_cast<double>((system_time_delta * 100.0) / time_delta);
314
315  last_system_time_ = task_time;
316  last_time_ = time;
317
318  return cpu;
319}
320
321mach_port_t ProcessMetrics::TaskForPid(ProcessHandle process) const {
322  mach_port_t task = MACH_PORT_NULL;
323  if (port_provider_)
324    task = port_provider_->TaskForPid(process_);
325  if (task == MACH_PORT_NULL && process_ == getpid())
326    task = mach_task_self();
327  return task;
328}
329
330// ------------------------------------------------------------------------
331
332// Bytes committed by the system.
333size_t GetSystemCommitCharge() {
334  host_name_port_t host = mach_host_self();
335  mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
336  vm_statistics_data_t data;
337  kern_return_t kr = host_statistics(host, HOST_VM_INFO,
338                                     reinterpret_cast<host_info_t>(&data),
339                                     &count);
340  if (kr) {
341    LOG(WARNING) << "Failed to fetch host statistics.";
342    return 0;
343  }
344
345  vm_size_t page_size;
346  kr = host_page_size(host, &page_size);
347  if (kr) {
348    LOG(ERROR) << "Failed to fetch host page size.";
349    return 0;
350  }
351
352  return (data.active_count * page_size) / 1024;
353}
354
355// ------------------------------------------------------------------------
356
357namespace {
358
359bool g_oom_killer_enabled;
360
361// === C malloc/calloc/valloc/realloc/posix_memalign ===
362
363// The extended version of malloc_zone_t from the 10.6 SDK's <malloc/malloc.h>,
364// included here to allow for compilation in 10.5. (10.5 has version 3 zone
365// allocators, while 10.6 has version 6 allocators.)
366struct ChromeMallocZone {
367  void* reserved1;
368  void* reserved2;
369  size_t (*size)(struct _malloc_zone_t* zone, const void* ptr);
370  void* (*malloc)(struct _malloc_zone_t* zone, size_t size);
371  void* (*calloc)(struct _malloc_zone_t* zone, size_t num_items, size_t size);
372  void* (*valloc)(struct _malloc_zone_t* zone, size_t size);
373  void (*free)(struct _malloc_zone_t* zone, void* ptr);
374  void* (*realloc)(struct _malloc_zone_t* zone, void* ptr, size_t size);
375  void (*destroy)(struct _malloc_zone_t* zone);
376  const char* zone_name;
377  unsigned (*batch_malloc)(struct _malloc_zone_t* zone, size_t size,
378                           void** results, unsigned num_requested);
379  void (*batch_free)(struct _malloc_zone_t* zone, void** to_be_freed,
380                     unsigned num_to_be_freed);
381  struct malloc_introspection_t* introspect;
382  unsigned version;
383  void* (*memalign)(struct _malloc_zone_t* zone, size_t alignment,
384                    size_t size);  // version >= 5
385  void (*free_definite_size)(struct _malloc_zone_t* zone, void* ptr,
386                             size_t size);  // version >= 6
387};
388
389typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
390                             size_t size);
391typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
392                             size_t num_items,
393                             size_t size);
394typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
395                             size_t size);
396typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
397                              void* ptr,
398                              size_t size);
399typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
400                               size_t alignment,
401                               size_t size);
402
403malloc_type g_old_malloc;
404calloc_type g_old_calloc;
405valloc_type g_old_valloc;
406realloc_type g_old_realloc;
407memalign_type g_old_memalign;
408
409malloc_type g_old_malloc_purgeable;
410calloc_type g_old_calloc_purgeable;
411valloc_type g_old_valloc_purgeable;
412realloc_type g_old_realloc_purgeable;
413memalign_type g_old_memalign_purgeable;
414
415void* oom_killer_malloc(struct _malloc_zone_t* zone,
416                        size_t size) {
417  void* result = g_old_malloc(zone, size);
418  if (!result && size)
419    DebugUtil::BreakDebugger();
420  return result;
421}
422
423void* oom_killer_calloc(struct _malloc_zone_t* zone,
424                        size_t num_items,
425                        size_t size) {
426  void* result = g_old_calloc(zone, num_items, size);
427  if (!result && num_items && size)
428    DebugUtil::BreakDebugger();
429  return result;
430}
431
432void* oom_killer_valloc(struct _malloc_zone_t* zone,
433                        size_t size) {
434  void* result = g_old_valloc(zone, size);
435  if (!result && size)
436    DebugUtil::BreakDebugger();
437  return result;
438}
439
440void* oom_killer_realloc(struct _malloc_zone_t* zone,
441                         void* ptr,
442                         size_t size) {
443  void* result = g_old_realloc(zone, ptr, size);
444  if (!result && size)
445    DebugUtil::BreakDebugger();
446  return result;
447}
448
449void* oom_killer_memalign(struct _malloc_zone_t* zone,
450                          size_t alignment,
451                          size_t size) {
452  void* result = g_old_memalign(zone, alignment, size);
453  // Only die if posix_memalign would have returned ENOMEM, since there are
454  // other reasons why NULL might be returned (see
455  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
456  if (!result && size && alignment >= sizeof(void*)
457      && (alignment & (alignment - 1)) == 0) {
458    DebugUtil::BreakDebugger();
459  }
460  return result;
461}
462
463void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
464                                  size_t size) {
465  void* result = g_old_malloc_purgeable(zone, size);
466  if (!result && size)
467    DebugUtil::BreakDebugger();
468  return result;
469}
470
471void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
472                                  size_t num_items,
473                                  size_t size) {
474  void* result = g_old_calloc_purgeable(zone, num_items, size);
475  if (!result && num_items && size)
476    DebugUtil::BreakDebugger();
477  return result;
478}
479
480void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
481                                  size_t size) {
482  void* result = g_old_valloc_purgeable(zone, size);
483  if (!result && size)
484    DebugUtil::BreakDebugger();
485  return result;
486}
487
488void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
489                                   void* ptr,
490                                   size_t size) {
491  void* result = g_old_realloc_purgeable(zone, ptr, size);
492  if (!result && size)
493    DebugUtil::BreakDebugger();
494  return result;
495}
496
497void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
498                                    size_t alignment,
499                                    size_t size) {
500  void* result = g_old_memalign_purgeable(zone, alignment, size);
501  // Only die if posix_memalign would have returned ENOMEM, since there are
502  // other reasons why NULL might be returned (see
503  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
504  if (!result && size && alignment >= sizeof(void*)
505      && (alignment & (alignment - 1)) == 0) {
506    DebugUtil::BreakDebugger();
507  }
508  return result;
509}
510
511// === C++ operator new ===
512
513void oom_killer_new() {
514  DebugUtil::BreakDebugger();
515}
516
517// === Core Foundation CFAllocators ===
518
519// This is the real structure of a CFAllocatorRef behind the scenes. See
520// http://opensource.apple.com/source/CF/CF-476.19/CFBase.c (10.5.8) and
521// http://opensource.apple.com/source/CF/CF-550/CFBase.c (10.6) for details.
522struct ChromeCFRuntimeBase {
523  uintptr_t _cfisa;
524  uint8_t _cfinfo[4];
525#if __LP64__
526  uint32_t _rc;
527#endif
528};
529
530struct ChromeCFAllocator {
531  ChromeCFRuntimeBase cf_runtime_base;
532  size_t (*size)(struct _malloc_zone_t* zone, const void* ptr);
533  void* (*malloc)(struct _malloc_zone_t* zone, size_t size);
534  void* (*calloc)(struct _malloc_zone_t* zone, size_t num_items, size_t size);
535  void* (*valloc)(struct _malloc_zone_t* zone, size_t size);
536  void (*free)(struct _malloc_zone_t* zone, void* ptr);
537  void* (*realloc)(struct _malloc_zone_t* zone, void* ptr, size_t size);
538  void (*destroy)(struct _malloc_zone_t* zone);
539  const char* zone_name;
540  unsigned (*batch_malloc)(struct _malloc_zone_t* zone, size_t size,
541                           void** results, unsigned num_requested);
542  void (*batch_free)(struct _malloc_zone_t* zone, void** to_be_freed,
543                     unsigned num_to_be_freed);
544  struct malloc_introspection_t* introspect;
545  void* reserved5;
546
547  void* allocator;
548  CFAllocatorContext context;
549};
550typedef ChromeCFAllocator* ChromeCFAllocatorRef;
551
552CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
553CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
554CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
555
556void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
557                                            CFOptionFlags hint,
558                                            void* info) {
559  void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
560  if (!result)
561    DebugUtil::BreakDebugger();
562  return result;
563}
564
565void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
566                                    CFOptionFlags hint,
567                                    void* info) {
568  void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
569  if (!result)
570    DebugUtil::BreakDebugger();
571  return result;
572}
573
574void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
575                                         CFOptionFlags hint,
576                                         void* info) {
577  void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
578  if (!result)
579    DebugUtil::BreakDebugger();
580  return result;
581}
582
583// === Cocoa NSObject allocation ===
584
585typedef id (*allocWithZone_t)(id, SEL, NSZone*);
586allocWithZone_t g_old_allocWithZone;
587
588id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
589{
590  id result = g_old_allocWithZone(self, _cmd, zone);
591  if (!result)
592    DebugUtil::BreakDebugger();
593  return result;
594}
595
596}  // namespace
597
598malloc_zone_t* GetPurgeableZone() {
599  // malloc_default_purgeable_zone only exists on >= 10.6. Use dlsym to grab it
600  // at runtime because it may not be present in the SDK used for compilation.
601  typedef malloc_zone_t* (*malloc_default_purgeable_zone_t)(void);
602  malloc_default_purgeable_zone_t malloc_purgeable_zone =
603      reinterpret_cast<malloc_default_purgeable_zone_t>(
604          dlsym(RTLD_DEFAULT, "malloc_default_purgeable_zone"));
605  if (malloc_purgeable_zone)
606    return malloc_purgeable_zone();
607  return NULL;
608}
609
610void EnableTerminationOnOutOfMemory() {
611  if (g_oom_killer_enabled)
612    return;
613
614  g_oom_killer_enabled = true;
615
616  int32 os_major;
617  int32 os_minor;
618  int32 os_bugfix;
619  SysInfo::OperatingSystemVersionNumbers(&os_major, &os_minor, &os_bugfix);
620
621  // === C malloc/calloc/valloc/realloc/posix_memalign ===
622
623  // This approach is not perfect, as requests for amounts of memory larger than
624  // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
625  // still fail with a NULL rather than dying (see
626  // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
627  // Unfortunately, it's the best we can do. Also note that this does not affect
628  // allocations from non-default zones.
629
630  CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
631        !g_old_memalign) << "Old allocators unexpectedly non-null";
632
633  CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
634        !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
635        !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
636
637  // See http://trac.webkit.org/changeset/53362/trunk/WebKitTools/DumpRenderTree/mac
638  bool zone_allocators_protected =
639      ((os_major == 10 && os_minor > 6) || os_major > 10);
640
641  ChromeMallocZone* default_zone =
642      reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
643  ChromeMallocZone* purgeable_zone =
644      reinterpret_cast<ChromeMallocZone*>(GetPurgeableZone());
645
646  vm_address_t page_start_default = NULL;
647  vm_address_t page_start_purgeable = NULL;
648  vm_size_t len_default = 0;
649  vm_size_t len_purgeable = 0;
650  if (zone_allocators_protected) {
651    page_start_default = reinterpret_cast<vm_address_t>(default_zone) &
652        static_cast<vm_size_t>(~(getpagesize() - 1));
653    len_default = reinterpret_cast<vm_address_t>(default_zone) -
654        page_start_default + sizeof(ChromeMallocZone);
655    mprotect(reinterpret_cast<void*>(page_start_default), len_default,
656             PROT_READ | PROT_WRITE);
657
658    if (purgeable_zone) {
659      page_start_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) &
660          static_cast<vm_size_t>(~(getpagesize() - 1));
661      len_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) -
662          page_start_purgeable + sizeof(ChromeMallocZone);
663      mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
664               PROT_READ | PROT_WRITE);
665    }
666  }
667
668  // Default zone
669
670  g_old_malloc = default_zone->malloc;
671  g_old_calloc = default_zone->calloc;
672  g_old_valloc = default_zone->valloc;
673  g_old_realloc = default_zone->realloc;
674  CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_realloc)
675      << "Failed to get system allocation functions.";
676
677  default_zone->malloc = oom_killer_malloc;
678  default_zone->calloc = oom_killer_calloc;
679  default_zone->valloc = oom_killer_valloc;
680  default_zone->realloc = oom_killer_realloc;
681
682  if (default_zone->version >= 5) {
683    g_old_memalign = default_zone->memalign;
684    if (g_old_memalign)
685      default_zone->memalign = oom_killer_memalign;
686  }
687
688  // Purgeable zone (if it exists)
689
690  if (purgeable_zone) {
691    g_old_malloc_purgeable = purgeable_zone->malloc;
692    g_old_calloc_purgeable = purgeable_zone->calloc;
693    g_old_valloc_purgeable = purgeable_zone->valloc;
694    g_old_realloc_purgeable = purgeable_zone->realloc;
695    CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
696          g_old_valloc_purgeable && g_old_realloc_purgeable)
697        << "Failed to get system allocation functions.";
698
699    purgeable_zone->malloc = oom_killer_malloc_purgeable;
700    purgeable_zone->calloc = oom_killer_calloc_purgeable;
701    purgeable_zone->valloc = oom_killer_valloc_purgeable;
702    purgeable_zone->realloc = oom_killer_realloc_purgeable;
703
704    if (purgeable_zone->version >= 5) {
705      g_old_memalign_purgeable = purgeable_zone->memalign;
706      if (g_old_memalign_purgeable)
707        purgeable_zone->memalign = oom_killer_memalign_purgeable;
708    }
709  }
710
711  if (zone_allocators_protected) {
712    mprotect(reinterpret_cast<void*>(page_start_default), len_default,
713             PROT_READ);
714    if (purgeable_zone) {
715      mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
716               PROT_READ);
717    }
718  }
719
720  // === C malloc_zone_batch_malloc ===
721
722  // batch_malloc is omitted because the default malloc zone's implementation
723  // only supports batch_malloc for "tiny" allocations from the free list. It
724  // will fail for allocations larger than "tiny", and will only allocate as
725  // many blocks as it's able to from the free list. These factors mean that it
726  // can return less than the requested memory even in a non-out-of-memory
727  // situation. There's no good way to detect whether a batch_malloc failure is
728  // due to these other factors, or due to genuine memory or address space
729  // exhaustion. The fact that it only allocates space from the "tiny" free list
730  // means that it's likely that a failure will not be due to memory exhaustion.
731  // Similarly, these constraints on batch_malloc mean that callers must always
732  // be expecting to receive less memory than was requested, even in situations
733  // where memory pressure is not a concern. Finally, the only public interface
734  // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
735  // system's malloc implementation. It's unlikely that anyone's even heard of
736  // it.
737
738  // === C++ operator new ===
739
740  // Yes, operator new does call through to malloc, but this will catch failures
741  // that our imperfect handling of malloc cannot.
742
743  std::set_new_handler(oom_killer_new);
744
745  // === Core Foundation CFAllocators ===
746
747  // This will not catch allocation done by custom allocators, but will catch
748  // all allocation done by system-provided ones.
749
750  CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
751        !g_old_cfallocator_malloc_zone)
752      << "Old allocators unexpectedly non-null";
753
754  bool cf_allocator_internals_known =
755      (os_major == 10 && (os_minor == 5 || os_minor == 6));
756
757  if (cf_allocator_internals_known) {
758    ChromeCFAllocatorRef allocator = const_cast<ChromeCFAllocatorRef>(
759        reinterpret_cast<const ChromeCFAllocator*>(kCFAllocatorSystemDefault));
760    g_old_cfallocator_system_default = allocator->context.allocate;
761    CHECK(g_old_cfallocator_system_default)
762        << "Failed to get kCFAllocatorSystemDefault allocation function.";
763    allocator->context.allocate = oom_killer_cfallocator_system_default;
764
765    allocator = const_cast<ChromeCFAllocatorRef>(
766        reinterpret_cast<const ChromeCFAllocator*>(kCFAllocatorMalloc));
767    g_old_cfallocator_malloc = allocator->context.allocate;
768    CHECK(g_old_cfallocator_malloc)
769        << "Failed to get kCFAllocatorMalloc allocation function.";
770    allocator->context.allocate = oom_killer_cfallocator_malloc;
771
772    allocator = const_cast<ChromeCFAllocatorRef>(
773        reinterpret_cast<const ChromeCFAllocator*>(kCFAllocatorMallocZone));
774    g_old_cfallocator_malloc_zone = allocator->context.allocate;
775    CHECK(g_old_cfallocator_malloc_zone)
776        << "Failed to get kCFAllocatorMallocZone allocation function.";
777    allocator->context.allocate = oom_killer_cfallocator_malloc_zone;
778  } else {
779    NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
780        "CFAllocator will not result in termination. http://crbug.com/45650");
781  }
782
783  // === Cocoa NSObject allocation ===
784
785  // Note that both +[NSObject new] and +[NSObject alloc] call through to
786  // +[NSObject allocWithZone:].
787
788  CHECK(!g_old_allocWithZone)
789      << "Old allocator unexpectedly non-null";
790
791  Class nsobject_class = [NSObject class];
792  Method orig_method = class_getClassMethod(nsobject_class,
793                                            @selector(allocWithZone:));
794  g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
795      method_getImplementation(orig_method));
796  CHECK(g_old_allocWithZone)
797      << "Failed to get allocWithZone allocation function.";
798  method_setImplementation(orig_method,
799                           reinterpret_cast<IMP>(oom_killer_allocWithZone));
800}
801
802}  // namespace base
803