mem_map.cc revision 3b6f0fae76fddf81930a263a075dc87b6039b7fc
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
19#include <backtrace/backtrace.h>
20
21#include "base/stringprintf.h"
22#include "ScopedFd.h"
23#include "utils.h"
24
25#define USE_ASHMEM 1
26
27#ifdef USE_ASHMEM
28#include <cutils/ashmem.h>
29#endif
30
31namespace art {
32
33#if !defined(NDEBUG)
34
35static std::ostream& operator<<(std::ostream& os, backtrace_map_info_t* rhs) {
36  for (backtrace_map_info_t* m = rhs; m != NULL; m = m->next) {
37    os << StringPrintf("0x%08x-0x%08x %c%c %s\n",
38                       static_cast<uint32_t>(m->start),
39                       static_cast<uint32_t>(m->end),
40                       m->is_readable ? 'r' : '-', m->is_executable ? 'x' : '-', m->name);
41  }
42  return os;
43}
44
45static void CheckMapRequest(byte* addr, size_t byte_count) {
46  if (addr == NULL) {
47    return;
48  }
49
50  uint32_t base = reinterpret_cast<size_t>(addr);
51  uint32_t limit = base + byte_count;
52
53  backtrace_map_info_t* map_info_list = backtrace_create_map_info_list(getpid());
54  for (backtrace_map_info_t* m = map_info_list; m != NULL; m = m->next) {
55    CHECK(!(base >= m->start && base < m->end)     // start of new within old
56        && !(limit > m->start && limit < m->end)   // end of new within old
57        && !(base <= m->start && limit > m->end))  // start/end of new includes all of old
58        << StringPrintf("Requested region 0x%08x-0x%08x overlaps with existing map 0x%08x-0x%08x (%s)\n",
59                        base, limit,
60                        static_cast<uint32_t>(m->start), static_cast<uint32_t>(m->end), m->name)
61        << map_info_list;
62  }
63  backtrace_destroy_map_info_list(map_info_list);
64}
65
66#else
67static void CheckMapRequest(byte*, size_t) { }
68#endif
69
70MemMap* MemMap::MapAnonymous(const char* name, byte* addr, size_t byte_count, int prot,
71                             std::string* error_msg) {
72  if (byte_count == 0) {
73    return new MemMap(name, NULL, 0, NULL, 0, prot);
74  }
75  size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
76  CheckMapRequest(addr, page_aligned_byte_count);
77
78#ifdef USE_ASHMEM
79  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
80  // prefixed "dalvik-".
81  std::string debug_friendly_name("dalvik-");
82  debug_friendly_name += name;
83  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
84  int flags = MAP_PRIVATE;
85  if (fd.get() == -1) {
86    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
87    return nullptr;
88  }
89#else
90  ScopedFd fd(-1);
91  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
92#endif
93
94  byte* actual = reinterpret_cast<byte*>(mmap(addr, page_aligned_byte_count, prot, flags, fd.get(), 0));
95  if (actual == MAP_FAILED) {
96    std::string maps;
97    ReadFileToString("/proc/self/maps", &maps);
98    *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
99                              addr, page_aligned_byte_count, prot, flags, fd.get(),
100                              maps.c_str());
101    return nullptr;
102  }
103  return new MemMap(name, actual, byte_count, actual, page_aligned_byte_count, prot);
104}
105
106MemMap* MemMap::MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
107                                 off_t start, bool reuse, const char* filename,
108                                 std::string* error_msg) {
109  CHECK_NE(0, prot);
110  CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
111  if (byte_count == 0) {
112    return new MemMap("file", NULL, 0, NULL, 0, prot);
113  }
114  // Adjust 'offset' to be page-aligned as required by mmap.
115  int page_offset = start % kPageSize;
116  off_t page_aligned_offset = start - page_offset;
117  // Adjust 'byte_count' to be page-aligned as we will map this anyway.
118  size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
119  // The 'addr' is modified (if specified, ie non-null) to be page aligned to the file but not
120  // necessarily to virtual memory. mmap will page align 'addr' for us.
121  byte* page_aligned_addr = (addr == NULL) ? NULL : (addr - page_offset);
122  if (!reuse) {
123    // reuse means it is okay that it overlaps an existing page mapping.
124    // Only use this if you actually made the page reservation yourself.
125    CheckMapRequest(page_aligned_addr, page_aligned_byte_count);
126  } else {
127    CHECK(addr != NULL);
128  }
129  byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_addr,
130                                              page_aligned_byte_count,
131                                              prot,
132                                              flags,
133                                              fd,
134                                              page_aligned_offset));
135  if (actual == MAP_FAILED) {
136    std::string strerr(strerror(errno));
137    std::string maps;
138    ReadFileToString("/proc/self/maps", &maps);
139    *error_msg = StringPrintf("mmap(%p, %zd, %x, %x, %d, %lld) of file '%s' failed: %s\n%s",
140                              page_aligned_addr, page_aligned_byte_count, prot, flags, fd,
141                              static_cast<int64_t>(page_aligned_offset), filename, strerr.c_str(),
142                              maps.c_str());
143    return NULL;
144  }
145  return new MemMap("file", actual + page_offset, byte_count, actual, page_aligned_byte_count,
146                    prot);
147}
148
149MemMap::~MemMap() {
150  if (base_begin_ == NULL && base_size_ == 0) {
151    return;
152  }
153  int result = munmap(base_begin_, base_size_);
154  if (result == -1) {
155    PLOG(FATAL) << "munmap failed";
156  }
157}
158
159MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
160               size_t base_size, int prot)
161    : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
162      prot_(prot) {
163  if (size_ == 0) {
164    CHECK(begin_ == NULL);
165    CHECK(base_begin_ == NULL);
166    CHECK_EQ(base_size_, 0U);
167  } else {
168    CHECK(begin_ != NULL);
169    CHECK(base_begin_ != NULL);
170    CHECK_NE(base_size_, 0U);
171  }
172};
173
174MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
175                           std::string* error_msg) {
176  DCHECK_GE(new_end, Begin());
177  DCHECK_LE(new_end, End());
178  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
179  DCHECK(IsAligned<kPageSize>(begin_));
180  DCHECK(IsAligned<kPageSize>(base_begin_));
181  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
182  DCHECK(IsAligned<kPageSize>(new_end));
183  byte* old_end = begin_ + size_;
184  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
185  byte* new_base_end = new_end;
186  DCHECK_LE(new_base_end, old_base_end);
187  if (new_base_end == old_base_end) {
188    return new MemMap(tail_name, NULL, 0, NULL, 0, tail_prot);
189  }
190  size_ = new_end - reinterpret_cast<byte*>(begin_);
191  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
192  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
193  size_t tail_size = old_end - new_end;
194  byte* tail_base_begin = new_base_end;
195  size_t tail_base_size = old_base_end - new_base_end;
196  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
197  DCHECK(IsAligned<kPageSize>(tail_base_size));
198
199#ifdef USE_ASHMEM
200  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
201  // prefixed "dalvik-".
202  std::string debug_friendly_name("dalvik-");
203  debug_friendly_name += tail_name;
204  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
205  int flags = MAP_PRIVATE;
206  if (fd.get() == -1) {
207    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
208                              tail_name, strerror(errno));
209    return nullptr;
210  }
211#else
212  ScopedFd fd(-1);
213  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
214#endif
215
216  // Unmap/map the tail region.
217  int result = munmap(tail_base_begin, tail_base_size);
218  if (result == -1) {
219    std::string maps;
220    ReadFileToString("/proc/self/maps", &maps);
221    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
222                              tail_base_begin, tail_base_size, name_.c_str(),
223                              maps.c_str());
224    return nullptr;
225  }
226  // Don't cause memory allocation between the munmap and the mmap
227  // calls. Otherwise, libc (or something else) might take this memory
228  // region. Note this isn't perfect as there's no way to prevent
229  // other threads to try to take this memory region here.
230  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
231                                              flags, fd.get(), 0));
232  if (actual == MAP_FAILED) {
233    std::string maps;
234    ReadFileToString("/proc/self/maps", &maps);
235    *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
236                              tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
237                              maps.c_str());
238    return nullptr;
239  }
240  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
241}
242
243bool MemMap::Protect(int prot) {
244  if (base_begin_ == NULL && base_size_ == 0) {
245    prot_ = prot;
246    return true;
247  }
248
249  if (mprotect(base_begin_, base_size_, prot) == 0) {
250    prot_ = prot;
251    return true;
252  }
253
254  PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
255              << prot << ") failed";
256  return false;
257}
258
259}  // namespace art
260