mem_map.cc revision 9de65ff3a9c49b91d80be292020f012f3d0a24ef
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
19#include <inttypes.h>
20#include <backtrace/BacktraceMap.h>
21
22#include "UniquePtr.h"
23#include "base/stringprintf.h"
24#include "ScopedFd.h"
25#include "utils.h"
26
27#define USE_ASHMEM 1
28
29#ifdef USE_ASHMEM
30#include <cutils/ashmem.h>
31#endif
32
33namespace art {
34
35static std::ostream& operator<<(
36    std::ostream& os,
37    std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
38  for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
39    os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
40                       static_cast<uint32_t>(it->start),
41                       static_cast<uint32_t>(it->end),
42                       (it->flags & PROT_READ) ? 'r' : '-',
43                       (it->flags & PROT_WRITE) ? 'w' : '-',
44                       (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
45  }
46  return os;
47}
48
49#if defined(__LP64__) && !defined(__x86_64__)
50// Where to start with low memory allocation.
51static constexpr uintptr_t LOW_MEM_START = kPageSize * 2;
52
53uintptr_t MemMap::next_mem_pos_ = LOW_MEM_START;   // first page to check for low-mem extent
54#endif
55
56static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
57                            std::ostringstream* error_msg) {
58  // Handled first by caller for more specific error messages.
59  CHECK(actual_ptr != MAP_FAILED);
60
61  if (expected_ptr == nullptr) {
62    return true;
63  }
64
65  if (expected_ptr == actual_ptr) {
66    return true;
67  }
68
69  // We asked for an address but didn't get what we wanted, all paths below here should fail.
70  int result = munmap(actual_ptr, byte_count);
71  if (result == -1) {
72    PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
73  }
74
75  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
76  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
77  uintptr_t limit = expected + byte_count;
78
79  UniquePtr<BacktraceMap> map(BacktraceMap::Create(getpid()));
80  if (!map->Build()) {
81    *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
82                               "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
83
84    return false;
85  }
86  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
87    if ((expected >= it->start && expected < it->end)  // start of new within old
88        || (limit > it->start && limit < it->end)      // end of new within old
89        || (expected <= it->start && limit > it->end)) {  // start/end of new includes all of old
90      *error_msg
91          << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
92                          "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
93                          expected, limit,
94                          static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
95                          it->name.c_str())
96          << std::make_pair(it, map->end());
97      return false;
98    }
99  }
100  *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
101                             "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
102  return false;
103}
104
105MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
106                             bool low_4gb, std::string* error_msg) {
107  if (byte_count == 0) {
108    return new MemMap(name, nullptr, 0, nullptr, 0, prot);
109  }
110  size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
111
112#ifdef USE_ASHMEM
113  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
114  // prefixed "dalvik-".
115  std::string debug_friendly_name("dalvik-");
116  debug_friendly_name += name;
117  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
118  if (fd.get() == -1) {
119    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
120    return nullptr;
121  }
122  int flags = MAP_PRIVATE;
123#else
124  ScopedFd fd(-1);
125  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
126#endif
127
128  // TODO:
129  // A page allocator would be a useful abstraction here, as
130  // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
131  // 2) The linear scheme, even with simple saving of the last known position, is very crude
132#if defined(__LP64__) && !defined(__x86_64__)
133  // MAP_32BIT only available on x86_64.
134  void* actual = MAP_FAILED;
135  std::string strerr;
136  if (low_4gb && expected == nullptr) {
137    flags |= MAP_FIXED;
138
139    bool first_run = true;
140
141    for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
142      if (4U * GB - ptr < page_aligned_byte_count) {
143        // Not enough memory until 4GB.
144        if (first_run) {
145          // Try another time from the bottom;
146          ptr = LOW_MEM_START - kPageSize;
147          first_run = false;
148          continue;
149        } else {
150          // Second try failed.
151          break;
152        }
153      }
154
155      uintptr_t tail_ptr;
156
157      // Check pages are free.
158      bool safe = true;
159      for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
160        if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
161          safe = false;
162          break;
163        } else {
164          DCHECK_EQ(errno, ENOMEM);
165        }
166      }
167
168      next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
169
170      if (safe == true) {
171        actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
172                      0);
173        if (actual != MAP_FAILED) {
174          break;
175        }
176      } else {
177        // Skip over last page.
178        ptr = tail_ptr;
179      }
180    }
181
182    if (actual == MAP_FAILED) {
183      strerr = "Could not find contiguous low-memory space.";
184    }
185  } else {
186    actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
187    strerr = strerror(errno);
188  }
189
190#else
191#ifdef __x86_64__
192  if (low_4gb) {
193    flags |= MAP_32BIT;
194  }
195#endif
196
197  void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
198  std::string strerr(strerror(errno));
199#endif
200
201  if (actual == MAP_FAILED) {
202    std::string maps;
203    ReadFileToString("/proc/self/maps", &maps);
204    *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
205                              expected, page_aligned_byte_count, prot, flags, fd.get(),
206                              strerr.c_str(), maps.c_str());
207    return nullptr;
208  }
209  std::ostringstream check_map_request_error_msg;
210  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
211    *error_msg = check_map_request_error_msg.str();
212    return nullptr;
213  }
214  return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
215                    page_aligned_byte_count, prot);
216}
217
218MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
219                                 off_t start, bool reuse, const char* filename,
220                                 std::string* error_msg) {
221  CHECK_NE(0, prot);
222  CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
223  if (reuse) {
224    // reuse means it is okay that it overlaps an existing page mapping.
225    // Only use this if you actually made the page reservation yourself.
226    CHECK(expected != nullptr);
227    flags |= MAP_FIXED;
228  } else {
229    CHECK_EQ(0, flags & MAP_FIXED);
230  }
231
232  if (byte_count == 0) {
233    return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
234  }
235  // Adjust 'offset' to be page-aligned as required by mmap.
236  int page_offset = start % kPageSize;
237  off_t page_aligned_offset = start - page_offset;
238  // Adjust 'byte_count' to be page-aligned as we will map this anyway.
239  size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
240  // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
241  // necessarily to virtual memory. mmap will page align 'expected' for us.
242  byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
243
244  byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
245                                              page_aligned_byte_count,
246                                              prot,
247                                              flags,
248                                              fd,
249                                              page_aligned_offset));
250  std::string strerr(strerror(errno));
251  if (actual == MAP_FAILED) {
252    std::string maps;
253    ReadFileToString("/proc/self/maps", &maps);
254    *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
255                              ") of file '%s' failed: %s\n%s",
256                              page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
257                              static_cast<int64_t>(page_aligned_offset), filename, strerr.c_str(),
258                              maps.c_str());
259    return nullptr;
260  }
261  std::ostringstream check_map_request_error_msg;
262  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
263    *error_msg = check_map_request_error_msg.str();
264    return nullptr;
265  }
266  return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
267                    prot);
268}
269
270MemMap::~MemMap() {
271  if (base_begin_ == nullptr && base_size_ == 0) {
272    return;
273  }
274  int result = munmap(base_begin_, base_size_);
275  if (result == -1) {
276    PLOG(FATAL) << "munmap failed";
277  }
278}
279
280MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
281               size_t base_size, int prot)
282    : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
283      prot_(prot) {
284  if (size_ == 0) {
285    CHECK(begin_ == nullptr);
286    CHECK(base_begin_ == nullptr);
287    CHECK_EQ(base_size_, 0U);
288  } else {
289    CHECK(begin_ != nullptr);
290    CHECK(base_begin_ != nullptr);
291    CHECK_NE(base_size_, 0U);
292  }
293};
294
295MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
296                           std::string* error_msg) {
297  DCHECK_GE(new_end, Begin());
298  DCHECK_LE(new_end, End());
299  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
300  DCHECK(IsAligned<kPageSize>(begin_));
301  DCHECK(IsAligned<kPageSize>(base_begin_));
302  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
303  DCHECK(IsAligned<kPageSize>(new_end));
304  byte* old_end = begin_ + size_;
305  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
306  byte* new_base_end = new_end;
307  DCHECK_LE(new_base_end, old_base_end);
308  if (new_base_end == old_base_end) {
309    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
310  }
311  size_ = new_end - reinterpret_cast<byte*>(begin_);
312  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
313  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
314  size_t tail_size = old_end - new_end;
315  byte* tail_base_begin = new_base_end;
316  size_t tail_base_size = old_base_end - new_base_end;
317  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
318  DCHECK(IsAligned<kPageSize>(tail_base_size));
319
320#ifdef USE_ASHMEM
321  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
322  // prefixed "dalvik-".
323  std::string debug_friendly_name("dalvik-");
324  debug_friendly_name += tail_name;
325  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
326  int flags = MAP_PRIVATE | MAP_FIXED;
327  if (fd.get() == -1) {
328    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
329                              tail_name, strerror(errno));
330    return nullptr;
331  }
332#else
333  ScopedFd fd(-1);
334  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
335#endif
336
337  // Unmap/map the tail region.
338  int result = munmap(tail_base_begin, tail_base_size);
339  if (result == -1) {
340    std::string maps;
341    ReadFileToString("/proc/self/maps", &maps);
342    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
343                              tail_base_begin, tail_base_size, name_.c_str(),
344                              maps.c_str());
345    return nullptr;
346  }
347  // Don't cause memory allocation between the munmap and the mmap
348  // calls. Otherwise, libc (or something else) might take this memory
349  // region. Note this isn't perfect as there's no way to prevent
350  // other threads to try to take this memory region here.
351  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
352                                              flags, fd.get(), 0));
353  if (actual == MAP_FAILED) {
354    std::string maps;
355    ReadFileToString("/proc/self/maps", &maps);
356    *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s",
357                              tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
358                              maps.c_str());
359    return nullptr;
360  }
361  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
362}
363
364bool MemMap::Protect(int prot) {
365  if (base_begin_ == nullptr && base_size_ == 0) {
366    prot_ = prot;
367    return true;
368  }
369
370  if (mprotect(base_begin_, base_size_, prot) == 0) {
371    prot_ = prot;
372    return true;
373  }
374
375  PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
376              << prot << ") failed";
377  return false;
378}
379
380std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
381  os << StringPrintf("[MemMap: %s prot=0x%x %p-%p]",
382                     mem_map.GetName().c_str(), mem_map.GetProtect(),
383                     mem_map.BaseBegin(), mem_map.BaseEnd());
384  return os;
385}
386
387}  // namespace art
388