mem_map.cc revision 27a10f618357cf85cc0677a04f0a5a3a8a437aed
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18#include "thread-inl.h"
19
20#include <inttypes.h>
21#include <backtrace/BacktraceMap.h>
22#include <memory>
23
24// See CreateStartPos below.
25#ifdef __BIONIC__
26#include <sys/auxv.h>
27#endif
28
29#include "base/stringprintf.h"
30#include "ScopedFd.h"
31#include "utils.h"
32
33#define USE_ASHMEM 1
34
35#ifdef USE_ASHMEM
36#include <cutils/ashmem.h>
37#ifndef ANDROID_OS
38#include <sys/resource.h>
39#endif
40#endif
41
42#ifndef MAP_ANONYMOUS
43#define MAP_ANONYMOUS MAP_ANON
44#endif
45
46namespace art {
47
48static std::ostream& operator<<(
49    std::ostream& os,
50    std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
51  for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
52    os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
53                       static_cast<uint32_t>(it->start),
54                       static_cast<uint32_t>(it->end),
55                       (it->flags & PROT_READ) ? 'r' : '-',
56                       (it->flags & PROT_WRITE) ? 'w' : '-',
57                       (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
58  }
59  return os;
60}
61
62std::ostream& operator<<(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
63  os << "MemMap:" << std::endl;
64  for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
65    void* base = it->first;
66    MemMap* map = it->second;
67    CHECK_EQ(base, map->BaseBegin());
68    os << *map << std::endl;
69  }
70  return os;
71}
72
73std::multimap<void*, MemMap*> MemMap::maps_;
74
75#if USE_ART_LOW_4G_ALLOCATOR
76// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
77
78// The regular start of memory allocations. The first 64KB is protected by SELinux.
79static constexpr uintptr_t LOW_MEM_START = 64 * KB;
80
81// Generate random starting position.
82// To not interfere with image position, take the image's address and only place it below. Current
83// formula (sketch):
84//
85// ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
86// ----------------------------------------
87//                    = 0000111111111111111
88// & ~(kPageSize - 1) =~0000000000000001111
89// ----------------------------------------
90// mask               = 0000111111111110000
91// & random data      = YYYYYYYYYYYYYYYYYYY
92// -----------------------------------
93// tmp                = 0000YYYYYYYYYYY0000
94// + LOW_MEM_START    = 0000000000001000000
95// --------------------------------------
96// start
97//
98// getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we
99// do not have Bionic, simply start with LOW_MEM_START.
100
101// Function is standalone so it can be tested somewhat in mem_map_test.cc.
102#ifdef __BIONIC__
103uintptr_t CreateStartPos(uint64_t input) {
104  CHECK_NE(0, ART_BASE_ADDRESS);
105
106  // Start with all bits below highest bit in ART_BASE_ADDRESS.
107  constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
108  constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
109
110  // Lowest (usually 12) bits are not used, as aligned by page size.
111  constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
112
113  // Mask input data.
114  return (input & mask) + LOW_MEM_START;
115}
116#endif
117
118static uintptr_t GenerateNextMemPos() {
119#ifdef __BIONIC__
120  uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM));
121  // The lower 8B are taken for the stack guard. Use the upper 8B (with mask).
122  return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8));
123#else
124  // No auxv on host, see above.
125  return LOW_MEM_START;
126#endif
127}
128
129// Initialize linear scan to random position.
130uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
131#endif
132
133static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
134                            std::ostringstream* error_msg) {
135  // Handled first by caller for more specific error messages.
136  CHECK(actual_ptr != MAP_FAILED);
137
138  if (expected_ptr == nullptr) {
139    return true;
140  }
141
142  if (expected_ptr == actual_ptr) {
143    return true;
144  }
145
146  // We asked for an address but didn't get what we wanted, all paths below here should fail.
147  int result = munmap(actual_ptr, byte_count);
148  if (result == -1) {
149    PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
150  }
151
152  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
153  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
154  uintptr_t limit = expected + byte_count;
155
156  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
157  if (map.get() == NULL) {
158    *error_msg << StringPrintf("Failed to create process map to determine why mmap returned "
159                               "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
160
161    return false;
162  }
163  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
164    if ((expected >= it->start && expected < it->end)  // start of new within old
165        || (limit > it->start && limit < it->end)      // end of new within old
166        || (expected <= it->start && limit > it->end)) {  // start/end of new includes all of old
167      *error_msg
168          << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
169                          "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
170                          expected, limit,
171                          static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
172                          it->name.c_str())
173          << std::make_pair(it, map->end());
174      return false;
175    }
176  }
177  *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
178                             "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
179  return false;
180}
181
182MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
183                             bool low_4gb, std::string* error_msg) {
184  if (byte_count == 0) {
185    return new MemMap(name, nullptr, 0, nullptr, 0, prot);
186  }
187  size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
188
189  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
190  ScopedFd fd(-1);
191
192#ifdef USE_ASHMEM
193#ifdef HAVE_ANDROID_OS
194  const bool use_ashmem = true;
195#else
196  // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
197  // fail due to ulimit restrictions. If they will then use a regular mmap.
198  struct rlimit rlimit_fsize;
199  CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
200  const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
201      (page_aligned_byte_count < rlimit_fsize.rlim_cur);
202#endif
203  if (use_ashmem) {
204    // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
205    // prefixed "dalvik-".
206    std::string debug_friendly_name("dalvik-");
207    debug_friendly_name += name;
208    fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
209    if (fd.get() == -1) {
210      *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
211      return nullptr;
212    }
213    flags = MAP_PRIVATE;
214  }
215#endif
216
217  // We need to store and potentially set an error number for pretty printing of errors
218  int saved_errno = 0;
219
220#ifdef __LP64__
221  // When requesting low_4g memory and having an expectation, the requested range should fit into
222  // 4GB.
223  if (low_4gb && (
224      // Start out of bounds.
225      (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
226      // End out of bounds. For simplicity, this will fail for the last page of memory.
227      (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
228    *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
229                              expected, expected + page_aligned_byte_count);
230    return nullptr;
231  }
232#endif
233
234  // TODO:
235  // A page allocator would be a useful abstraction here, as
236  // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
237  // 2) The linear scheme, even with simple saving of the last known position, is very crude
238#if USE_ART_LOW_4G_ALLOCATOR
239  // MAP_32BIT only available on x86_64.
240  void* actual = MAP_FAILED;
241  if (low_4gb && expected == nullptr) {
242    bool first_run = true;
243
244    for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
245      if (4U * GB - ptr < page_aligned_byte_count) {
246        // Not enough memory until 4GB.
247        if (first_run) {
248          // Try another time from the bottom;
249          ptr = LOW_MEM_START - kPageSize;
250          first_run = false;
251          continue;
252        } else {
253          // Second try failed.
254          break;
255        }
256      }
257
258      uintptr_t tail_ptr;
259
260      // Check pages are free.
261      bool safe = true;
262      for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
263        if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
264          safe = false;
265          break;
266        } else {
267          DCHECK_EQ(errno, ENOMEM);
268        }
269      }
270
271      next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
272
273      if (safe == true) {
274        actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
275                      0);
276        if (actual != MAP_FAILED) {
277          // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
278          // 4GB. If this is the case, unmap and retry.
279          if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) {
280            break;
281          } else {
282            munmap(actual, page_aligned_byte_count);
283            actual = MAP_FAILED;
284          }
285        }
286      } else {
287        // Skip over last page.
288        ptr = tail_ptr;
289      }
290    }
291
292    if (actual == MAP_FAILED) {
293      LOG(ERROR) << "Could not find contiguous low-memory space.";
294      saved_errno = ENOMEM;
295    }
296  } else {
297    actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
298    saved_errno = errno;
299  }
300
301#else
302#if defined(__LP64__)
303  if (low_4gb && expected == nullptr) {
304    flags |= MAP_32BIT;
305  }
306#endif
307
308  void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
309  saved_errno = errno;
310#endif
311
312  if (actual == MAP_FAILED) {
313    std::string maps;
314    ReadFileToString("/proc/self/maps", &maps);
315
316    *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
317                              expected, page_aligned_byte_count, prot, flags, fd.get(),
318                              strerror(saved_errno), maps.c_str());
319    return nullptr;
320  }
321  std::ostringstream check_map_request_error_msg;
322  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
323    *error_msg = check_map_request_error_msg.str();
324    return nullptr;
325  }
326  return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
327                    page_aligned_byte_count, prot);
328}
329
330MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
331                                 off_t start, bool reuse, const char* filename,
332                                 std::string* error_msg) {
333  CHECK_NE(0, prot);
334  CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
335  if (reuse) {
336    // reuse means it is okay that it overlaps an existing page mapping.
337    // Only use this if you actually made the page reservation yourself.
338    CHECK(expected != nullptr);
339    flags |= MAP_FIXED;
340  } else {
341    CHECK_EQ(0, flags & MAP_FIXED);
342  }
343
344  if (byte_count == 0) {
345    return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
346  }
347  // Adjust 'offset' to be page-aligned as required by mmap.
348  int page_offset = start % kPageSize;
349  off_t page_aligned_offset = start - page_offset;
350  // Adjust 'byte_count' to be page-aligned as we will map this anyway.
351  size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
352  // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
353  // necessarily to virtual memory. mmap will page align 'expected' for us.
354  byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
355
356  byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
357                                              page_aligned_byte_count,
358                                              prot,
359                                              flags,
360                                              fd,
361                                              page_aligned_offset));
362  if (actual == MAP_FAILED) {
363    auto saved_errno = errno;
364
365    std::string maps;
366    ReadFileToString("/proc/self/maps", &maps);
367
368    *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
369                              ") of file '%s' failed: %s\n%s",
370                              page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
371                              static_cast<int64_t>(page_aligned_offset), filename,
372                              strerror(saved_errno), maps.c_str());
373    return nullptr;
374  }
375  std::ostringstream check_map_request_error_msg;
376  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
377    *error_msg = check_map_request_error_msg.str();
378    return nullptr;
379  }
380  return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
381                    prot);
382}
383
384MemMap::~MemMap() {
385  if (base_begin_ == nullptr && base_size_ == 0) {
386    return;
387  }
388  int result = munmap(base_begin_, base_size_);
389  if (result == -1) {
390    PLOG(FATAL) << "munmap failed";
391  }
392
393  // Remove it from maps_.
394  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
395  bool found = false;
396  for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
397       it != end && it->first == base_begin_; ++it) {
398    if (it->second == this) {
399      found = true;
400      maps_.erase(it);
401      break;
402    }
403  }
404  CHECK(found) << "MemMap not found";
405}
406
407MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
408               size_t base_size, int prot)
409    : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
410      prot_(prot) {
411  if (size_ == 0) {
412    CHECK(begin_ == nullptr);
413    CHECK(base_begin_ == nullptr);
414    CHECK_EQ(base_size_, 0U);
415  } else {
416    CHECK(begin_ != nullptr);
417    CHECK(base_begin_ != nullptr);
418    CHECK_NE(base_size_, 0U);
419
420    // Add it to maps_.
421    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
422    maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
423  }
424};
425
426MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
427                           std::string* error_msg) {
428  DCHECK_GE(new_end, Begin());
429  DCHECK_LE(new_end, End());
430  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
431  DCHECK(IsAligned<kPageSize>(begin_));
432  DCHECK(IsAligned<kPageSize>(base_begin_));
433  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
434  DCHECK(IsAligned<kPageSize>(new_end));
435  byte* old_end = begin_ + size_;
436  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
437  byte* new_base_end = new_end;
438  DCHECK_LE(new_base_end, old_base_end);
439  if (new_base_end == old_base_end) {
440    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
441  }
442  size_ = new_end - reinterpret_cast<byte*>(begin_);
443  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
444  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
445  size_t tail_size = old_end - new_end;
446  byte* tail_base_begin = new_base_end;
447  size_t tail_base_size = old_base_end - new_base_end;
448  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
449  DCHECK(IsAligned<kPageSize>(tail_base_size));
450
451#ifdef USE_ASHMEM
452  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
453  // prefixed "dalvik-".
454  std::string debug_friendly_name("dalvik-");
455  debug_friendly_name += tail_name;
456  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
457  int flags = MAP_PRIVATE | MAP_FIXED;
458  if (fd.get() == -1) {
459    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
460                              tail_name, strerror(errno));
461    return nullptr;
462  }
463#else
464  ScopedFd fd(-1);
465  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
466#endif
467
468  // Unmap/map the tail region.
469  int result = munmap(tail_base_begin, tail_base_size);
470  if (result == -1) {
471    std::string maps;
472    ReadFileToString("/proc/self/maps", &maps);
473    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
474                              tail_base_begin, tail_base_size, name_.c_str(),
475                              maps.c_str());
476    return nullptr;
477  }
478  // Don't cause memory allocation between the munmap and the mmap
479  // calls. Otherwise, libc (or something else) might take this memory
480  // region. Note this isn't perfect as there's no way to prevent
481  // other threads to try to take this memory region here.
482  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
483                                              flags, fd.get(), 0));
484  if (actual == MAP_FAILED) {
485    std::string maps;
486    ReadFileToString("/proc/self/maps", &maps);
487    *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s",
488                              tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
489                              maps.c_str());
490    return nullptr;
491  }
492  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
493}
494
495void MemMap::MadviseDontNeedAndZero() {
496  if (base_begin_ != nullptr || base_size_ != 0) {
497    if (!kMadviseZeroes) {
498      memset(base_begin_, 0, base_size_);
499    }
500    int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
501    if (result == -1) {
502      PLOG(WARNING) << "madvise failed";
503    }
504  }
505}
506
507bool MemMap::Protect(int prot) {
508  if (base_begin_ == nullptr && base_size_ == 0) {
509    prot_ = prot;
510    return true;
511  }
512
513  if (mprotect(base_begin_, base_size_, prot) == 0) {
514    prot_ = prot;
515    return true;
516  }
517
518  PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
519              << prot << ") failed";
520  return false;
521}
522
523bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
524  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
525  CHECK(begin_map != nullptr);
526  CHECK(end_map != nullptr);
527  CHECK(HasMemMap(begin_map));
528  CHECK(HasMemMap(end_map));
529  CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
530  MemMap* map = begin_map;
531  while (map->BaseBegin() != end_map->BaseBegin()) {
532    MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
533    if (next_map == nullptr) {
534      // Found a gap.
535      return false;
536    }
537    map = next_map;
538  }
539  return true;
540}
541
542void MemMap::DumpMaps(std::ostream& os) {
543  DumpMaps(os, maps_);
544}
545
546void MemMap::DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
547  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
548  DumpMapsLocked(os, mem_maps);
549}
550
551void MemMap::DumpMapsLocked(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
552  os << mem_maps;
553}
554
555bool MemMap::HasMemMap(MemMap* map) {
556  void* base_begin = map->BaseBegin();
557  for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
558       it != end && it->first == base_begin; ++it) {
559    if (it->second == map) {
560      return true;
561    }
562  }
563  return false;
564}
565
566MemMap* MemMap::GetLargestMemMapAt(void* address) {
567  size_t largest_size = 0;
568  MemMap* largest_map = nullptr;
569  for (auto it = maps_.lower_bound(address), end = maps_.end();
570       it != end && it->first == address; ++it) {
571    MemMap* map = it->second;
572    CHECK(map != nullptr);
573    if (largest_size < map->BaseSize()) {
574      largest_size = map->BaseSize();
575      largest_map = map;
576    }
577  }
578  return largest_map;
579}
580
581std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
582  os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
583                     mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
584                     mem_map.GetName().c_str());
585  return os;
586}
587
588}  // namespace art
589