1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <errno.h>
18#include <inttypes.h>
19#include <sys/mman.h>
20#include <unistd.h>
21
22#include <map>
23#include <utility>
24
25#include "Allocator.h"
26#include "HeapWalker.h"
27#include "LeakFolding.h"
28#include "ScopedSignalHandler.h"
29#include "log.h"
30
31bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
32  if (end == begin) {
33    end = begin + 1;
34  }
35  Range range{begin, end};
36  auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
37  if (inserted.second) {
38    valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
39    valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
40    allocation_bytes_ += range.size();
41    return true;
42  } else {
43    Range overlap = inserted.first->first;
44    if (overlap != range) {
45      ALOGE("range %p-%p overlaps with existing range %p-%p",
46          reinterpret_cast<void*>(begin),
47          reinterpret_cast<void*>(end),
48          reinterpret_cast<void*>(overlap.begin),
49          reinterpret_cast<void*>(overlap.end));
50    }
51    return false;
52  }
53}
54
55bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
56  walking_ptr_ = word_ptr;
57  // This access may segfault if the process under test has done something strange,
58  // for example mprotect(PROT_NONE) on a native heap page.  If so, it will be
59  // caught and handled by mmaping a zero page over the faulting page.
60  uintptr_t value = *reinterpret_cast<uintptr_t*>(word_ptr);
61  walking_ptr_ = 0;
62  if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
63    AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
64    if (it != allocations_.end()) {
65      *range = it->first;
66      *info = &it->second;
67      return true;
68    }
69  }
70  return false;
71}
72
73void HeapWalker::RecurseRoot(const Range& root) {
74  allocator::vector<Range> to_do(1, root, allocator_);
75  while (!to_do.empty()) {
76    Range range = to_do.back();
77    to_do.pop_back();
78
79    ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
80      if (!ref_info->referenced_from_root) {
81        ref_info->referenced_from_root = true;
82        to_do.push_back(ref_range);
83      }
84    });
85  }
86}
87
88void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
89  roots_.push_back(Range{begin, end});
90}
91
92void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
93  root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
94}
95
96size_t HeapWalker::Allocations() {
97  return allocations_.size();
98}
99
100size_t HeapWalker::AllocationBytes() {
101  return allocation_bytes_;
102}
103
104bool HeapWalker::DetectLeaks() {
105  // Recursively walk pointers from roots to mark referenced allocations
106  for (auto it = roots_.begin(); it != roots_.end(); it++) {
107    RecurseRoot(*it);
108  }
109
110  Range vals;
111  vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
112  vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
113
114  RecurseRoot(vals);
115
116  return true;
117}
118
119bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
120    size_t* num_leaks_out, size_t* leak_bytes_out) {
121  leaked.clear();
122
123  size_t num_leaks = 0;
124  size_t leak_bytes = 0;
125  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
126    if (!it->second.referenced_from_root) {
127      num_leaks++;
128      leak_bytes += it->first.end - it->first.begin;
129    }
130  }
131
132  size_t n = 0;
133  for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
134    if (!it->second.referenced_from_root) {
135      if (n++ < limit) {
136        leaked.push_back(it->first);
137      }
138    }
139  }
140
141  if (num_leaks_out) {
142    *num_leaks_out = num_leaks;
143  }
144  if (leak_bytes_out) {
145    *leak_bytes_out = leak_bytes;
146  }
147
148  return true;
149}
150
151static bool MapOverPage(void* addr) {
152  const size_t page_size = sysconf(_SC_PAGE_SIZE);
153  void *page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size-1));
154
155  void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
156  if (ret == MAP_FAILED) {
157    ALOGE("failed to map page at %p: %s", page, strerror(errno));
158    return false;
159  }
160
161  return true;
162}
163
164void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, void* /*uctx*/) {
165  uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
166  if (addr != walking_ptr_) {
167    handler.reset();
168    return;
169  }
170  ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
171  if (!MapOverPage(si->si_addr)) {
172    handler.reset();
173  }
174}
175
176ScopedSignalHandler::SignalFn ScopedSignalHandler::handler_;
177