mem_map.cc revision 7ec0904eac1d799d3443b4c5e83545b72eae9ad3
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
19#include "base/memory_tool.h"
20#include <backtrace/BacktraceMap.h>
21#include <inttypes.h>
22#include <stdlib.h>
23
24#include <memory>
25#include <sstream>
26
27#include "base/stringprintf.h"
28
29#pragma GCC diagnostic push
30#pragma GCC diagnostic ignored "-Wshadow"
31#include "ScopedFd.h"
32#pragma GCC diagnostic pop
33
34#include "thread-inl.h"
35#include "utils.h"
36
37#include <cutils/ashmem.h>
38
39#ifndef ANDROID_OS
40#include <sys/resource.h>
41#endif
42
43#ifndef MAP_ANONYMOUS
44#define MAP_ANONYMOUS MAP_ANON
45#endif
46
47namespace art {
48
49static std::ostream& operator<<(
50    std::ostream& os,
51    std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
52  for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
53    os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
54                       static_cast<uint32_t>(it->start),
55                       static_cast<uint32_t>(it->end),
56                       (it->flags & PROT_READ) ? 'r' : '-',
57                       (it->flags & PROT_WRITE) ? 'w' : '-',
58                       (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
59  }
60  return os;
61}
62
63std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
64  os << "MemMap:" << std::endl;
65  for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
66    void* base = it->first;
67    MemMap* map = it->second;
68    CHECK_EQ(base, map->BaseBegin());
69    os << *map << std::endl;
70  }
71  return os;
72}
73
74MemMap::Maps* MemMap::maps_ = nullptr;
75
76#if USE_ART_LOW_4G_ALLOCATOR
77// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
78
79// The regular start of memory allocations. The first 64KB is protected by SELinux.
80static constexpr uintptr_t LOW_MEM_START = 64 * KB;
81
82// Generate random starting position.
83// To not interfere with image position, take the image's address and only place it below. Current
84// formula (sketch):
85//
86// ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
87// ----------------------------------------
88//                    = 0000111111111111111
89// & ~(kPageSize - 1) =~0000000000000001111
90// ----------------------------------------
91// mask               = 0000111111111110000
92// & random data      = YYYYYYYYYYYYYYYYYYY
93// -----------------------------------
94// tmp                = 0000YYYYYYYYYYY0000
95// + LOW_MEM_START    = 0000000000001000000
96// --------------------------------------
97// start
98//
99// arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
100// do not have Bionic, simply start with LOW_MEM_START.
101
102// Function is standalone so it can be tested somewhat in mem_map_test.cc.
103#ifdef __BIONIC__
104uintptr_t CreateStartPos(uint64_t input) {
105  CHECK_NE(0, ART_BASE_ADDRESS);
106
107  // Start with all bits below highest bit in ART_BASE_ADDRESS.
108  constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
109  constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
110
111  // Lowest (usually 12) bits are not used, as aligned by page size.
112  constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
113
114  // Mask input data.
115  return (input & mask) + LOW_MEM_START;
116}
117#endif
118
119static uintptr_t GenerateNextMemPos() {
120#ifdef __BIONIC__
121  uint64_t random_data;
122  arc4random_buf(&random_data, sizeof(random_data));
123  return CreateStartPos(random_data);
124#else
125  // No arc4random on host, see above.
126  return LOW_MEM_START;
127#endif
128}
129
130// Initialize linear scan to random position.
131uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
132#endif
133
134// Return true if the address range is contained in a single memory map by either reading
135// the maps_ variable or the /proc/self/map entry.
136bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
137  uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
138  uintptr_t end = begin + size;
139
140  // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
141  // further.
142  {
143    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
144    for (auto& pair : *maps_) {
145      MemMap* const map = pair.second;
146      if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
147          end <= reinterpret_cast<uintptr_t>(map->End())) {
148        return true;
149      }
150    }
151  }
152
153  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
154  if (map == nullptr) {
155    if (error_msg != nullptr) {
156      *error_msg = StringPrintf("Failed to build process map");
157    }
158    return false;
159  }
160  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
161    if ((begin >= it->start && begin < it->end)  // start of new within old
162        && (end > it->start && end <= it->end)) {  // end of new within old
163      return true;
164    }
165  }
166  if (error_msg != nullptr) {
167    PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
168    *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
169                              "any existing map. See process maps in the log.", begin, end);
170  }
171  return false;
172}
173
174// Return true if the address range does not conflict with any /proc/self/maps entry.
175static bool CheckNonOverlapping(uintptr_t begin,
176                                uintptr_t end,
177                                std::string* error_msg) {
178  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
179  if (map.get() == nullptr) {
180    *error_msg = StringPrintf("Failed to build process map");
181    return false;
182  }
183  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
184    if ((begin >= it->start && begin < it->end)      // start of new within old
185        || (end > it->start && end < it->end)        // end of new within old
186        || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
187      std::ostringstream map_info;
188      map_info << std::make_pair(it, map->end());
189      *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
190                                "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
191                                begin, end,
192                                static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
193                                it->name.c_str(),
194                                map_info.str().c_str());
195      return false;
196    }
197  }
198  return true;
199}
200
201// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
202// the expected value, calling munmap if validation fails, giving the
203// reason in error_msg.
204//
205// If the expected_ptr is null, nothing is checked beyond the fact
206// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
207// non-null, we check that pointer is the actual_ptr == expected_ptr,
208// and if not, report in error_msg what the conflict mapping was if
209// found, or a generic error in other cases.
210static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
211                            std::string* error_msg) {
212  // Handled first by caller for more specific error messages.
213  CHECK(actual_ptr != MAP_FAILED);
214
215  if (expected_ptr == nullptr) {
216    return true;
217  }
218
219  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
220  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
221  uintptr_t limit = expected + byte_count;
222
223  if (expected_ptr == actual_ptr) {
224    return true;
225  }
226
227  // We asked for an address but didn't get what we wanted, all paths below here should fail.
228  int result = munmap(actual_ptr, byte_count);
229  if (result == -1) {
230    PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
231  }
232
233  if (error_msg != nullptr) {
234    // We call this here so that we can try and generate a full error
235    // message with the overlapping mapping. There's no guarantee that
236    // that there will be an overlap though, since
237    // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
238    //   true, even if there is no overlap
239    // - There might have been an overlap at the point of mmap, but the
240    //   overlapping region has since been unmapped.
241    std::string error_detail;
242    CheckNonOverlapping(expected, limit, &error_detail);
243    std::ostringstream os;
244    os <<  StringPrintf("Failed to mmap at expected address, mapped at "
245                        "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
246                        actual, expected);
247    if (!error_detail.empty()) {
248      os << " : " << error_detail;
249    }
250    *error_msg = os.str();
251  }
252  return false;
253}
254
255#if USE_ART_LOW_4G_ALLOCATOR
256static inline void* TryMemMapLow4GB(void* ptr,
257                                    size_t page_aligned_byte_count,
258                                    int prot,
259                                    int flags,
260                                    int fd,
261                                    off_t offset) {
262  void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
263  if (actual != MAP_FAILED) {
264    // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
265    // 4GB. If this is the case, unmap and retry.
266    if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
267      munmap(actual, page_aligned_byte_count);
268      actual = MAP_FAILED;
269    }
270  }
271  return actual;
272}
273#endif
274
275MemMap* MemMap::MapAnonymous(const char* name,
276                             uint8_t* expected_ptr,
277                             size_t byte_count,
278                             int prot,
279                             bool low_4gb,
280                             bool reuse,
281                             std::string* error_msg,
282                             bool use_ashmem) {
283#ifndef __LP64__
284  UNUSED(low_4gb);
285#endif
286  if (byte_count == 0) {
287    return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
288  }
289  size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
290
291  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
292  if (reuse) {
293    // reuse means it is okay that it overlaps an existing page mapping.
294    // Only use this if you actually made the page reservation yourself.
295    CHECK(expected_ptr != nullptr);
296
297    DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
298    flags |= MAP_FIXED;
299  }
300
301  ScopedFd fd(-1);
302
303  if (use_ashmem) {
304    if (!kIsTargetBuild) {
305      // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
306      // fail due to ulimit restrictions. If they will then use a regular mmap.
307      struct rlimit rlimit_fsize;
308      CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
309      use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
310        (page_aligned_byte_count < rlimit_fsize.rlim_cur);
311    }
312  }
313
314  if (use_ashmem) {
315    // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
316    // prefixed "dalvik-".
317    std::string debug_friendly_name("dalvik-");
318    debug_friendly_name += name;
319    fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
320    if (fd.get() == -1) {
321      *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
322      return nullptr;
323    }
324    flags &= ~MAP_ANONYMOUS;
325  }
326
327  // We need to store and potentially set an error number for pretty printing of errors
328  int saved_errno = 0;
329
330  void* actual = MapInternal(expected_ptr,
331                             page_aligned_byte_count,
332                             prot,
333                             flags,
334                             fd.get(),
335                             0,
336                             low_4gb);
337  saved_errno = errno;
338
339  if (actual == MAP_FAILED) {
340    if (error_msg != nullptr) {
341      PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
342
343      *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
344                                    "See process maps in the log.",
345                                expected_ptr,
346                                page_aligned_byte_count,
347                                prot,
348                                flags,
349                                fd.get(),
350                                strerror(saved_errno));
351    }
352    return nullptr;
353  }
354  std::ostringstream check_map_request_error_msg;
355  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
356    return nullptr;
357  }
358  return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
359                    page_aligned_byte_count, prot, reuse);
360}
361
362MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
363  if (byte_count == 0) {
364    return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
365  }
366  const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
367  return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
368}
369
370MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
371                                 size_t byte_count,
372                                 int prot,
373                                 int flags,
374                                 int fd,
375                                 off_t start,
376                                 bool low_4gb,
377                                 bool reuse,
378                                 const char* filename,
379                                 std::string* error_msg) {
380  CHECK_NE(0, prot);
381  CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
382
383  // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
384  // expect his mapping to be contained within an existing map.
385  if (reuse) {
386    // reuse means it is okay that it overlaps an existing page mapping.
387    // Only use this if you actually made the page reservation yourself.
388    CHECK(expected_ptr != nullptr);
389
390    DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
391        << ((error_msg != nullptr) ? *error_msg : std::string());
392    flags |= MAP_FIXED;
393  } else {
394    CHECK_EQ(0, flags & MAP_FIXED);
395    // Don't bother checking for an overlapping region here. We'll
396    // check this if required after the fact inside CheckMapRequest.
397  }
398
399  if (byte_count == 0) {
400    return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
401  }
402  // Adjust 'offset' to be page-aligned as required by mmap.
403  int page_offset = start % kPageSize;
404  off_t page_aligned_offset = start - page_offset;
405  // Adjust 'byte_count' to be page-aligned as we will map this anyway.
406  size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
407  // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
408  // not necessarily to virtual memory. mmap will page align 'expected' for us.
409  uint8_t* page_aligned_expected =
410      (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
411
412  size_t redzone_size = 0;
413  if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
414    redzone_size = kPageSize;
415    page_aligned_byte_count += redzone_size;
416  }
417
418  uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
419                                                           page_aligned_byte_count,
420                                                           prot,
421                                                           flags,
422                                                           fd,
423                                                           page_aligned_offset,
424                                                           low_4gb));
425  if (actual == MAP_FAILED) {
426    if (error_msg != nullptr) {
427      auto saved_errno = errno;
428
429      if (kIsDebugBuild || VLOG_IS_ON(oat)) {
430        PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
431      }
432
433      *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
434                                ") of file '%s' failed: %s. See process maps in the log.",
435                                page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
436                                static_cast<int64_t>(page_aligned_offset), filename,
437                                strerror(saved_errno));
438    }
439    return nullptr;
440  }
441  std::ostringstream check_map_request_error_msg;
442  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
443    return nullptr;
444  }
445  if (redzone_size != 0) {
446    const uint8_t *real_start = actual + page_offset;
447    const uint8_t *real_end = actual + page_offset + byte_count;
448    const uint8_t *mapping_end = actual + page_aligned_byte_count;
449
450    MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
451    MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
452    page_aligned_byte_count -= redzone_size;
453  }
454
455  return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
456                    prot, reuse, redzone_size);
457}
458
459MemMap::~MemMap() {
460  if (base_begin_ == nullptr && base_size_ == 0) {
461    return;
462  }
463
464  // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
465  // before it is returned to the system.
466  if (redzone_size_ != 0) {
467    MEMORY_TOOL_MAKE_UNDEFINED(
468        reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
469        redzone_size_);
470  }
471
472  if (!reuse_) {
473    MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
474    int result = munmap(base_begin_, base_size_);
475    if (result == -1) {
476      PLOG(FATAL) << "munmap failed";
477    }
478  }
479
480  // Remove it from maps_.
481  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
482  bool found = false;
483  DCHECK(maps_ != nullptr);
484  for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
485       it != end && it->first == base_begin_; ++it) {
486    if (it->second == this) {
487      found = true;
488      maps_->erase(it);
489      break;
490    }
491  }
492  CHECK(found) << "MemMap not found";
493}
494
495MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
496               size_t base_size, int prot, bool reuse, size_t redzone_size)
497    : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
498      prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
499  if (size_ == 0) {
500    CHECK(begin_ == nullptr);
501    CHECK(base_begin_ == nullptr);
502    CHECK_EQ(base_size_, 0U);
503  } else {
504    CHECK(begin_ != nullptr);
505    CHECK(base_begin_ != nullptr);
506    CHECK_NE(base_size_, 0U);
507
508    // Add it to maps_.
509    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
510    DCHECK(maps_ != nullptr);
511    maps_->insert(std::make_pair(base_begin_, this));
512  }
513}
514
515MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
516                           std::string* error_msg, bool use_ashmem) {
517  DCHECK_GE(new_end, Begin());
518  DCHECK_LE(new_end, End());
519  DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
520  DCHECK_ALIGNED(begin_, kPageSize);
521  DCHECK_ALIGNED(base_begin_, kPageSize);
522  DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
523  DCHECK_ALIGNED(new_end, kPageSize);
524  uint8_t* old_end = begin_ + size_;
525  uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
526  uint8_t* new_base_end = new_end;
527  DCHECK_LE(new_base_end, old_base_end);
528  if (new_base_end == old_base_end) {
529    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
530  }
531  size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
532  base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
533  DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
534  size_t tail_size = old_end - new_end;
535  uint8_t* tail_base_begin = new_base_end;
536  size_t tail_base_size = old_base_end - new_base_end;
537  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
538  DCHECK_ALIGNED(tail_base_size, kPageSize);
539
540  int int_fd = -1;
541  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
542  if (use_ashmem) {
543    // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
544    // prefixed "dalvik-".
545    std::string debug_friendly_name("dalvik-");
546    debug_friendly_name += tail_name;
547    int_fd = ashmem_create_region(debug_friendly_name.c_str(), tail_base_size);
548    flags = MAP_PRIVATE | MAP_FIXED;
549    if (int_fd == -1) {
550      *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
551                                tail_name, strerror(errno));
552      return nullptr;
553    }
554  }
555  ScopedFd fd(int_fd);
556
557  MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
558  // Unmap/map the tail region.
559  int result = munmap(tail_base_begin, tail_base_size);
560  if (result == -1) {
561    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
562    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
563                              tail_base_begin, tail_base_size, name_.c_str());
564    return nullptr;
565  }
566  // Don't cause memory allocation between the munmap and the mmap
567  // calls. Otherwise, libc (or something else) might take this memory
568  // region. Note this isn't perfect as there's no way to prevent
569  // other threads to try to take this memory region here.
570  uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot,
571                                              flags, fd.get(), 0));
572  if (actual == MAP_FAILED) {
573    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
574    *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
575                              "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
576                              fd.get());
577    return nullptr;
578  }
579  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
580}
581
582void MemMap::MadviseDontNeedAndZero() {
583  if (base_begin_ != nullptr || base_size_ != 0) {
584    if (!kMadviseZeroes) {
585      memset(base_begin_, 0, base_size_);
586    }
587    int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
588    if (result == -1) {
589      PLOG(WARNING) << "madvise failed";
590    }
591  }
592}
593
594bool MemMap::Sync() {
595  bool result;
596  if (redzone_size_ != 0) {
597    // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
598    // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
599    // noaccess protection from the msync range. b/27552451.
600    uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
601    MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
602    result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
603    MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
604  } else {
605    result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
606  }
607  return result;
608}
609
610bool MemMap::Protect(int prot) {
611  if (base_begin_ == nullptr && base_size_ == 0) {
612    prot_ = prot;
613    return true;
614  }
615
616  if (mprotect(base_begin_, base_size_, prot) == 0) {
617    prot_ = prot;
618    return true;
619  }
620
621  PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
622              << prot << ") failed";
623  return false;
624}
625
626bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
627  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
628  CHECK(begin_map != nullptr);
629  CHECK(end_map != nullptr);
630  CHECK(HasMemMap(begin_map));
631  CHECK(HasMemMap(end_map));
632  CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
633  MemMap* map = begin_map;
634  while (map->BaseBegin() != end_map->BaseBegin()) {
635    MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
636    if (next_map == nullptr) {
637      // Found a gap.
638      return false;
639    }
640    map = next_map;
641  }
642  return true;
643}
644
645void MemMap::DumpMaps(std::ostream& os, bool terse) {
646  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
647  DumpMapsLocked(os, terse);
648}
649
650void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
651  const auto& mem_maps = *maps_;
652  if (!terse) {
653    os << mem_maps;
654    return;
655  }
656
657  // Terse output example:
658  //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
659  //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
660  // The details:
661  //   "+0x20P" means 0x20 pages taken by a single mapping,
662  //   "~0x11dP" means a gap of 0x11d pages,
663  //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
664  os << "MemMap:" << std::endl;
665  for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
666    MemMap* map = it->second;
667    void* base = it->first;
668    CHECK_EQ(base, map->BaseBegin());
669    os << "[MemMap: " << base;
670    ++it;
671    // Merge consecutive maps with the same protect flags and name.
672    constexpr size_t kMaxGaps = 9;
673    size_t num_gaps = 0;
674    size_t num = 1u;
675    size_t size = map->BaseSize();
676    CHECK_ALIGNED(size, kPageSize);
677    void* end = map->BaseEnd();
678    while (it != maps_end &&
679        it->second->GetProtect() == map->GetProtect() &&
680        it->second->GetName() == map->GetName() &&
681        (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
682      if (it->second->BaseBegin() != end) {
683        ++num_gaps;
684        os << "+0x" << std::hex << (size / kPageSize) << "P";
685        if (num != 1u) {
686          os << "(" << std::dec << num << ")";
687        }
688        size_t gap =
689            reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
690        CHECK_ALIGNED(gap, kPageSize);
691        os << "~0x" << std::hex << (gap / kPageSize) << "P";
692        num = 0u;
693        size = 0u;
694      }
695      CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
696      ++num;
697      size += it->second->BaseSize();
698      end = it->second->BaseEnd();
699      ++it;
700    }
701    os << "+0x" << std::hex << (size / kPageSize) << "P";
702    if (num != 1u) {
703      os << "(" << std::dec << num << ")";
704    }
705    os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
706  }
707}
708
709bool MemMap::HasMemMap(MemMap* map) {
710  void* base_begin = map->BaseBegin();
711  for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
712       it != end && it->first == base_begin; ++it) {
713    if (it->second == map) {
714      return true;
715    }
716  }
717  return false;
718}
719
720MemMap* MemMap::GetLargestMemMapAt(void* address) {
721  size_t largest_size = 0;
722  MemMap* largest_map = nullptr;
723  DCHECK(maps_ != nullptr);
724  for (auto it = maps_->lower_bound(address), end = maps_->end();
725       it != end && it->first == address; ++it) {
726    MemMap* map = it->second;
727    CHECK(map != nullptr);
728    if (largest_size < map->BaseSize()) {
729      largest_size = map->BaseSize();
730      largest_map = map;
731    }
732  }
733  return largest_map;
734}
735
736void MemMap::Init() {
737  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
738  if (maps_ == nullptr) {
739    // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
740    maps_ = new Maps;
741  }
742}
743
744void MemMap::Shutdown() {
745  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
746  delete maps_;
747  maps_ = nullptr;
748}
749
750void MemMap::SetSize(size_t new_size) {
751  if (new_size == base_size_) {
752    return;
753  }
754  CHECK_ALIGNED(new_size, kPageSize);
755  CHECK_EQ(base_size_, size_) << "Unsupported";
756  CHECK_LE(new_size, base_size_);
757  MEMORY_TOOL_MAKE_UNDEFINED(
758      reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
759                              new_size),
760      base_size_ - new_size);
761  CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
762                  base_size_ - new_size), 0) << new_size << " " << base_size_;
763  base_size_ = new_size;
764  size_ = new_size;
765}
766
767void* MemMap::MapInternal(void* addr,
768                          size_t length,
769                          int prot,
770                          int flags,
771                          int fd,
772                          off_t offset,
773                          bool low_4gb) {
774#ifdef __LP64__
775  // When requesting low_4g memory and having an expectation, the requested range should fit into
776  // 4GB.
777  if (low_4gb && (
778      // Start out of bounds.
779      (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
780      // End out of bounds. For simplicity, this will fail for the last page of memory.
781      ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
782    LOG(ERROR) << "The requested address space (" << addr << ", "
783               << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
784               << ") cannot fit in low_4gb";
785    return MAP_FAILED;
786  }
787#else
788  UNUSED(low_4gb);
789#endif
790  DCHECK_ALIGNED(length, kPageSize);
791  if (low_4gb) {
792    DCHECK_EQ(flags & MAP_FIXED, 0);
793  }
794  // TODO:
795  // A page allocator would be a useful abstraction here, as
796  // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
797  void* actual = MAP_FAILED;
798#if USE_ART_LOW_4G_ALLOCATOR
799  // MAP_32BIT only available on x86_64.
800  if (low_4gb && addr == nullptr) {
801    bool first_run = true;
802
803    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
804    for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
805      // Use maps_ as an optimization to skip over large maps.
806      // Find the first map which is address > ptr.
807      auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
808      if (it != maps_->begin()) {
809        auto before_it = it;
810        --before_it;
811        // Start at the end of the map before the upper bound.
812        ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
813        CHECK_ALIGNED(ptr, kPageSize);
814      }
815      while (it != maps_->end()) {
816        // How much space do we have until the next map?
817        size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
818        // If the space may be sufficient, break out of the loop.
819        if (delta >= length) {
820          break;
821        }
822        // Otherwise, skip to the end of the map.
823        ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
824        CHECK_ALIGNED(ptr, kPageSize);
825        ++it;
826      }
827
828      // Try to see if we get lucky with this address since none of the ART maps overlap.
829      actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
830      if (actual != MAP_FAILED) {
831        next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
832        return actual;
833      }
834
835      if (4U * GB - ptr < length) {
836        // Not enough memory until 4GB.
837        if (first_run) {
838          // Try another time from the bottom;
839          ptr = LOW_MEM_START - kPageSize;
840          first_run = false;
841          continue;
842        } else {
843          // Second try failed.
844          break;
845        }
846      }
847
848      uintptr_t tail_ptr;
849
850      // Check pages are free.
851      bool safe = true;
852      for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
853        if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
854          safe = false;
855          break;
856        } else {
857          DCHECK_EQ(errno, ENOMEM);
858        }
859      }
860
861      next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
862
863      if (safe == true) {
864        actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
865        if (actual != MAP_FAILED) {
866          return actual;
867        }
868      } else {
869        // Skip over last page.
870        ptr = tail_ptr;
871      }
872    }
873
874    if (actual == MAP_FAILED) {
875      LOG(ERROR) << "Could not find contiguous low-memory space.";
876      errno = ENOMEM;
877    }
878  } else {
879    actual = mmap(addr, length, prot, flags, fd, offset);
880  }
881
882#else
883#if defined(__LP64__)
884  if (low_4gb && addr == nullptr) {
885    flags |= MAP_32BIT;
886  }
887#endif
888  actual = mmap(addr, length, prot, flags, fd, offset);
889#endif
890  return actual;
891}
892
893std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
894  os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
895                     mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
896                     mem_map.GetName().c_str());
897  return os;
898}
899
900void MemMap::TryReadable() {
901  if (base_begin_ == nullptr && base_size_ == 0) {
902    return;
903  }
904  CHECK_NE(prot_ & PROT_READ, 0);
905  volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
906  volatile uint8_t* end = begin + base_size_;
907  DCHECK(IsAligned<kPageSize>(begin));
908  DCHECK(IsAligned<kPageSize>(end));
909  // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
910  // reads.
911  for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
912    // This read could fault if protection wasn't set correctly.
913    uint8_t value = *ptr;
914    UNUSED(value);
915  }
916}
917
918}  // namespace art
919