mem_map.cc revision 9bdf108885a27ba05fae8501725649574d7c491b
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mem_map.h" 18 19#include "base/memory_tool.h" 20#include <backtrace/BacktraceMap.h> 21#include <inttypes.h> 22#include <stdlib.h> 23 24#include <memory> 25#include <sstream> 26 27#include "base/stringprintf.h" 28 29#pragma GCC diagnostic push 30#pragma GCC diagnostic ignored "-Wshadow" 31#include "ScopedFd.h" 32#pragma GCC diagnostic pop 33 34#include "thread-inl.h" 35#include "utils.h" 36 37#include <cutils/ashmem.h> 38 39#ifndef ANDROID_OS 40#include <sys/resource.h> 41#endif 42 43#ifndef MAP_ANONYMOUS 44#define MAP_ANONYMOUS MAP_ANON 45#endif 46 47namespace art { 48 49static std::ostream& operator<<( 50 std::ostream& os, 51 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) { 52 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) { 53 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n", 54 static_cast<uint32_t>(it->start), 55 static_cast<uint32_t>(it->end), 56 (it->flags & PROT_READ) ? 'r' : '-', 57 (it->flags & PROT_WRITE) ? 'w' : '-', 58 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str()); 59 } 60 return os; 61} 62 63std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) { 64 os << "MemMap:" << std::endl; 65 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) { 66 void* base = it->first; 67 MemMap* map = it->second; 68 CHECK_EQ(base, map->BaseBegin()); 69 os << *map << std::endl; 70 } 71 return os; 72} 73 74MemMap::Maps* MemMap::maps_ = nullptr; 75 76#if USE_ART_LOW_4G_ALLOCATOR 77// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT. 78 79// The regular start of memory allocations. The first 64KB is protected by SELinux. 80static constexpr uintptr_t LOW_MEM_START = 64 * KB; 81 82// Generate random starting position. 83// To not interfere with image position, take the image's address and only place it below. Current 84// formula (sketch): 85// 86// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX 87// ---------------------------------------- 88// = 0000111111111111111 89// & ~(kPageSize - 1) =~0000000000000001111 90// ---------------------------------------- 91// mask = 0000111111111110000 92// & random data = YYYYYYYYYYYYYYYYYYY 93// ----------------------------------- 94// tmp = 0000YYYYYYYYYYY0000 95// + LOW_MEM_START = 0000000000001000000 96// -------------------------------------- 97// start 98// 99// arc4random as an entropy source is exposed in Bionic, but not in glibc. When we 100// do not have Bionic, simply start with LOW_MEM_START. 101 102// Function is standalone so it can be tested somewhat in mem_map_test.cc. 103#ifdef __BIONIC__ 104uintptr_t CreateStartPos(uint64_t input) { 105 CHECK_NE(0, ART_BASE_ADDRESS); 106 107 // Start with all bits below highest bit in ART_BASE_ADDRESS. 108 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS)); 109 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1; 110 111 // Lowest (usually 12) bits are not used, as aligned by page size. 112 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); 113 114 // Mask input data. 115 return (input & mask) + LOW_MEM_START; 116} 117#endif 118 119static uintptr_t GenerateNextMemPos() { 120#ifdef __BIONIC__ 121 uint64_t random_data; 122 arc4random_buf(&random_data, sizeof(random_data)); 123 return CreateStartPos(random_data); 124#else 125 // No arc4random on host, see above. 126 return LOW_MEM_START; 127#endif 128} 129 130// Initialize linear scan to random position. 131uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos(); 132#endif 133 134// Return true if the address range is contained in a single memory map by either reading 135// the maps_ variable or the /proc/self/map entry. 136bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) { 137 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr); 138 uintptr_t end = begin + size; 139 140 // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate 141 // further. 142 { 143 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 144 for (auto& pair : *maps_) { 145 MemMap* const map = pair.second; 146 if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) && 147 end <= reinterpret_cast<uintptr_t>(map->End())) { 148 return true; 149 } 150 } 151 } 152 153 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 154 if (map == nullptr) { 155 if (error_msg != nullptr) { 156 *error_msg = StringPrintf("Failed to build process map"); 157 } 158 return false; 159 } 160 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 161 if ((begin >= it->start && begin < it->end) // start of new within old 162 && (end > it->start && end <= it->end)) { // end of new within old 163 return true; 164 } 165 } 166 if (error_msg != nullptr) { 167 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); 168 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap " 169 "any existing map. See process maps in the log.", begin, end); 170 } 171 return false; 172} 173 174// Return true if the address range does not conflict with any /proc/self/maps entry. 175static bool CheckNonOverlapping(uintptr_t begin, 176 uintptr_t end, 177 std::string* error_msg) { 178 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 179 if (map.get() == nullptr) { 180 *error_msg = StringPrintf("Failed to build process map"); 181 return false; 182 } 183 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 184 if ((begin >= it->start && begin < it->end) // start of new within old 185 || (end > it->start && end < it->end) // end of new within old 186 || (begin <= it->start && end > it->end)) { // start/end of new includes all of old 187 std::ostringstream map_info; 188 map_info << std::make_pair(it, map->end()); 189 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with " 190 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s", 191 begin, end, 192 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end), 193 it->name.c_str(), 194 map_info.str().c_str()); 195 return false; 196 } 197 } 198 return true; 199} 200 201// CheckMapRequest to validate a non-MAP_FAILED mmap result based on 202// the expected value, calling munmap if validation fails, giving the 203// reason in error_msg. 204// 205// If the expected_ptr is null, nothing is checked beyond the fact 206// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is 207// non-null, we check that pointer is the actual_ptr == expected_ptr, 208// and if not, report in error_msg what the conflict mapping was if 209// found, or a generic error in other cases. 210static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count, 211 std::string* error_msg) { 212 // Handled first by caller for more specific error messages. 213 CHECK(actual_ptr != MAP_FAILED); 214 215 if (expected_ptr == nullptr) { 216 return true; 217 } 218 219 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr); 220 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr); 221 uintptr_t limit = expected + byte_count; 222 223 if (expected_ptr == actual_ptr) { 224 return true; 225 } 226 227 // We asked for an address but didn't get what we wanted, all paths below here should fail. 228 int result = munmap(actual_ptr, byte_count); 229 if (result == -1) { 230 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count); 231 } 232 233 // We call this here so that we can try and generate a full error 234 // message with the overlapping mapping. There's no guarantee that 235 // that there will be an overlap though, since 236 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is 237 // true, even if there is no overlap 238 // - There might have been an overlap at the point of mmap, but the 239 // overlapping region has since been unmapped. 240 std::string error_detail; 241 CheckNonOverlapping(expected, limit, &error_detail); 242 243 if (error_msg != nullptr) { 244 std::ostringstream os; 245 os << StringPrintf("Failed to mmap at expected address, mapped at " 246 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, 247 actual, expected); 248 if (!error_detail.empty()) { 249 os << " : " << error_detail; 250 } 251 *error_msg = os.str(); 252 } 253 return false; 254} 255 256#if USE_ART_LOW_4G_ALLOCATOR 257static inline void* TryMemMapLow4GB(void* ptr, 258 size_t page_aligned_byte_count, 259 int prot, 260 int flags, 261 int fd, 262 off_t offset) { 263 void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset); 264 if (actual != MAP_FAILED) { 265 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low 266 // 4GB. If this is the case, unmap and retry. 267 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) { 268 munmap(actual, page_aligned_byte_count); 269 actual = MAP_FAILED; 270 } 271 } 272 return actual; 273} 274#endif 275 276MemMap* MemMap::MapAnonymous(const char* name, 277 uint8_t* expected_ptr, 278 size_t byte_count, 279 int prot, 280 bool low_4gb, 281 bool reuse, 282 std::string* error_msg, 283 bool use_ashmem) { 284#ifndef __LP64__ 285 UNUSED(low_4gb); 286#endif 287 if (byte_count == 0) { 288 return new MemMap(name, nullptr, 0, nullptr, 0, prot, false); 289 } 290 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 291 292 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 293 if (reuse) { 294 // reuse means it is okay that it overlaps an existing page mapping. 295 // Only use this if you actually made the page reservation yourself. 296 CHECK(expected_ptr != nullptr); 297 298 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg; 299 flags |= MAP_FIXED; 300 } 301 302 ScopedFd fd(-1); 303 304 if (use_ashmem) { 305 if (!kIsTargetBuild) { 306 // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't 307 // fail due to ulimit restrictions. If they will then use a regular mmap. 308 struct rlimit rlimit_fsize; 309 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0); 310 use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) || 311 (page_aligned_byte_count < rlimit_fsize.rlim_cur); 312 } 313 } 314 315 if (use_ashmem) { 316 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 317 // prefixed "dalvik-". 318 std::string debug_friendly_name("dalvik-"); 319 debug_friendly_name += name; 320 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count)); 321 if (fd.get() == -1) { 322 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno)); 323 return nullptr; 324 } 325 flags &= ~MAP_ANONYMOUS; 326 } 327 328 // We need to store and potentially set an error number for pretty printing of errors 329 int saved_errno = 0; 330 331 void* actual = MapInternal(expected_ptr, 332 page_aligned_byte_count, 333 prot, 334 flags, 335 fd.get(), 336 0, 337 low_4gb); 338 saved_errno = errno; 339 340 if (actual == MAP_FAILED) { 341 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 342 343 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process " 344 "maps in the log.", expected_ptr, page_aligned_byte_count, prot, 345 flags, fd.get(), strerror(saved_errno)); 346 return nullptr; 347 } 348 std::ostringstream check_map_request_error_msg; 349 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 350 return nullptr; 351 } 352 return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual, 353 page_aligned_byte_count, prot, reuse); 354} 355 356MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) { 357 if (byte_count == 0) { 358 return new MemMap(name, nullptr, 0, nullptr, 0, 0, false); 359 } 360 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 361 return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */); 362} 363 364MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, 365 size_t byte_count, 366 int prot, 367 int flags, 368 int fd, 369 off_t start, 370 bool low_4gb, 371 bool reuse, 372 const char* filename, 373 std::string* error_msg) { 374 CHECK_NE(0, prot); 375 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE)); 376 377 // Note that we do not allow MAP_FIXED unless reuse == true, i.e we 378 // expect his mapping to be contained within an existing map. 379 if (reuse) { 380 // reuse means it is okay that it overlaps an existing page mapping. 381 // Only use this if you actually made the page reservation yourself. 382 CHECK(expected_ptr != nullptr); 383 384 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) 385 << ((error_msg != nullptr) ? *error_msg : std::string()); 386 flags |= MAP_FIXED; 387 } else { 388 CHECK_EQ(0, flags & MAP_FIXED); 389 // Don't bother checking for an overlapping region here. We'll 390 // check this if required after the fact inside CheckMapRequest. 391 } 392 393 if (byte_count == 0) { 394 return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false); 395 } 396 // Adjust 'offset' to be page-aligned as required by mmap. 397 int page_offset = start % kPageSize; 398 off_t page_aligned_offset = start - page_offset; 399 // Adjust 'byte_count' to be page-aligned as we will map this anyway. 400 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); 401 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but 402 // not necessarily to virtual memory. mmap will page align 'expected' for us. 403 uint8_t* page_aligned_expected = 404 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset); 405 406 size_t redzone_size = 0; 407 if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) { 408 redzone_size = kPageSize; 409 page_aligned_byte_count += redzone_size; 410 } 411 412 uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected, 413 page_aligned_byte_count, 414 prot, 415 flags, 416 fd, 417 page_aligned_offset, 418 low_4gb)); 419 if (actual == MAP_FAILED) { 420 if (error_msg != nullptr) { 421 auto saved_errno = errno; 422 423 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 424 425 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64 426 ") of file '%s' failed: %s. See process maps in the log.", 427 page_aligned_expected, page_aligned_byte_count, prot, flags, fd, 428 static_cast<int64_t>(page_aligned_offset), filename, 429 strerror(saved_errno)); 430 } 431 return nullptr; 432 } 433 std::ostringstream check_map_request_error_msg; 434 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 435 return nullptr; 436 } 437 if (redzone_size != 0) { 438 const uint8_t *real_start = actual + page_offset; 439 const uint8_t *real_end = actual + page_offset + byte_count; 440 const uint8_t *mapping_end = actual + page_aligned_byte_count; 441 442 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual); 443 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end); 444 page_aligned_byte_count -= redzone_size; 445 } 446 447 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count, 448 prot, reuse, redzone_size); 449} 450 451MemMap::~MemMap() { 452 if (base_begin_ == nullptr && base_size_ == 0) { 453 return; 454 } 455 456 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned 457 // before it is returned to the system. 458 if (redzone_size_ != 0) { 459 MEMORY_TOOL_MAKE_UNDEFINED( 460 reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_, 461 redzone_size_); 462 } 463 464 if (!reuse_) { 465 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_); 466 int result = munmap(base_begin_, base_size_); 467 if (result == -1) { 468 PLOG(FATAL) << "munmap failed"; 469 } 470 } 471 472 // Remove it from maps_. 473 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 474 bool found = false; 475 DCHECK(maps_ != nullptr); 476 for (auto it = maps_->lower_bound(base_begin_), end = maps_->end(); 477 it != end && it->first == base_begin_; ++it) { 478 if (it->second == this) { 479 found = true; 480 maps_->erase(it); 481 break; 482 } 483 } 484 CHECK(found) << "MemMap not found"; 485} 486 487MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, 488 size_t base_size, int prot, bool reuse, size_t redzone_size) 489 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), 490 prot_(prot), reuse_(reuse), redzone_size_(redzone_size) { 491 if (size_ == 0) { 492 CHECK(begin_ == nullptr); 493 CHECK(base_begin_ == nullptr); 494 CHECK_EQ(base_size_, 0U); 495 } else { 496 CHECK(begin_ != nullptr); 497 CHECK(base_begin_ != nullptr); 498 CHECK_NE(base_size_, 0U); 499 500 // Add it to maps_. 501 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 502 DCHECK(maps_ != nullptr); 503 maps_->insert(std::make_pair(base_begin_, this)); 504 } 505} 506 507MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot, 508 std::string* error_msg, bool use_ashmem) { 509 DCHECK_GE(new_end, Begin()); 510 DCHECK_LE(new_end, End()); 511 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); 512 DCHECK_ALIGNED(begin_, kPageSize); 513 DCHECK_ALIGNED(base_begin_, kPageSize); 514 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); 515 DCHECK_ALIGNED(new_end, kPageSize); 516 uint8_t* old_end = begin_ + size_; 517 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_; 518 uint8_t* new_base_end = new_end; 519 DCHECK_LE(new_base_end, old_base_end); 520 if (new_base_end == old_base_end) { 521 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false); 522 } 523 size_ = new_end - reinterpret_cast<uint8_t*>(begin_); 524 base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_); 525 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); 526 size_t tail_size = old_end - new_end; 527 uint8_t* tail_base_begin = new_base_end; 528 size_t tail_base_size = old_base_end - new_base_end; 529 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); 530 DCHECK_ALIGNED(tail_base_size, kPageSize); 531 532 int int_fd = -1; 533 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 534 if (use_ashmem) { 535 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 536 // prefixed "dalvik-". 537 std::string debug_friendly_name("dalvik-"); 538 debug_friendly_name += tail_name; 539 int_fd = ashmem_create_region(debug_friendly_name.c_str(), tail_base_size); 540 flags = MAP_PRIVATE | MAP_FIXED; 541 if (int_fd == -1) { 542 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", 543 tail_name, strerror(errno)); 544 return nullptr; 545 } 546 } 547 ScopedFd fd(int_fd); 548 549 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size); 550 // Unmap/map the tail region. 551 int result = munmap(tail_base_begin, tail_base_size); 552 if (result == -1) { 553 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 554 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.", 555 tail_base_begin, tail_base_size, name_.c_str()); 556 return nullptr; 557 } 558 // Don't cause memory allocation between the munmap and the mmap 559 // calls. Otherwise, libc (or something else) might take this memory 560 // region. Note this isn't perfect as there's no way to prevent 561 // other threads to try to take this memory region here. 562 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot, 563 flags, fd.get(), 0)); 564 if (actual == MAP_FAILED) { 565 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 566 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process " 567 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags, 568 fd.get()); 569 return nullptr; 570 } 571 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false); 572} 573 574void MemMap::MadviseDontNeedAndZero() { 575 if (base_begin_ != nullptr || base_size_ != 0) { 576 if (!kMadviseZeroes) { 577 memset(base_begin_, 0, base_size_); 578 } 579 int result = madvise(base_begin_, base_size_, MADV_DONTNEED); 580 if (result == -1) { 581 PLOG(WARNING) << "madvise failed"; 582 } 583 } 584} 585 586bool MemMap::Sync() { 587 return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0; 588} 589 590bool MemMap::Protect(int prot) { 591 if (base_begin_ == nullptr && base_size_ == 0) { 592 prot_ = prot; 593 return true; 594 } 595 596 if (mprotect(base_begin_, base_size_, prot) == 0) { 597 prot_ = prot; 598 return true; 599 } 600 601 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", " 602 << prot << ") failed"; 603 return false; 604} 605 606bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) { 607 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 608 CHECK(begin_map != nullptr); 609 CHECK(end_map != nullptr); 610 CHECK(HasMemMap(begin_map)); 611 CHECK(HasMemMap(end_map)); 612 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin()); 613 MemMap* map = begin_map; 614 while (map->BaseBegin() != end_map->BaseBegin()) { 615 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd()); 616 if (next_map == nullptr) { 617 // Found a gap. 618 return false; 619 } 620 map = next_map; 621 } 622 return true; 623} 624 625void MemMap::DumpMaps(std::ostream& os, bool terse) { 626 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 627 DumpMapsLocked(os, terse); 628} 629 630void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { 631 const auto& mem_maps = *maps_; 632 if (!terse) { 633 os << mem_maps; 634 return; 635 } 636 637 // Terse output example: 638 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc] 639 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation] 640 // The details: 641 // "+0x20P" means 0x20 pages taken by a single mapping, 642 // "~0x11dP" means a gap of 0x11d pages, 643 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages. 644 os << "MemMap:" << std::endl; 645 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) { 646 MemMap* map = it->second; 647 void* base = it->first; 648 CHECK_EQ(base, map->BaseBegin()); 649 os << "[MemMap: " << base; 650 ++it; 651 // Merge consecutive maps with the same protect flags and name. 652 constexpr size_t kMaxGaps = 9; 653 size_t num_gaps = 0; 654 size_t num = 1u; 655 size_t size = map->BaseSize(); 656 CHECK_ALIGNED(size, kPageSize); 657 void* end = map->BaseEnd(); 658 while (it != maps_end && 659 it->second->GetProtect() == map->GetProtect() && 660 it->second->GetName() == map->GetName() && 661 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) { 662 if (it->second->BaseBegin() != end) { 663 ++num_gaps; 664 os << "+0x" << std::hex << (size / kPageSize) << "P"; 665 if (num != 1u) { 666 os << "(" << std::dec << num << ")"; 667 } 668 size_t gap = 669 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end); 670 CHECK_ALIGNED(gap, kPageSize); 671 os << "~0x" << std::hex << (gap / kPageSize) << "P"; 672 num = 0u; 673 size = 0u; 674 } 675 CHECK_ALIGNED(it->second->BaseSize(), kPageSize); 676 ++num; 677 size += it->second->BaseSize(); 678 end = it->second->BaseEnd(); 679 ++it; 680 } 681 os << "+0x" << std::hex << (size / kPageSize) << "P"; 682 if (num != 1u) { 683 os << "(" << std::dec << num << ")"; 684 } 685 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl; 686 } 687} 688 689bool MemMap::HasMemMap(MemMap* map) { 690 void* base_begin = map->BaseBegin(); 691 for (auto it = maps_->lower_bound(base_begin), end = maps_->end(); 692 it != end && it->first == base_begin; ++it) { 693 if (it->second == map) { 694 return true; 695 } 696 } 697 return false; 698} 699 700MemMap* MemMap::GetLargestMemMapAt(void* address) { 701 size_t largest_size = 0; 702 MemMap* largest_map = nullptr; 703 DCHECK(maps_ != nullptr); 704 for (auto it = maps_->lower_bound(address), end = maps_->end(); 705 it != end && it->first == address; ++it) { 706 MemMap* map = it->second; 707 CHECK(map != nullptr); 708 if (largest_size < map->BaseSize()) { 709 largest_size = map->BaseSize(); 710 largest_map = map; 711 } 712 } 713 return largest_map; 714} 715 716void MemMap::Init() { 717 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 718 if (maps_ == nullptr) { 719 // dex2oat calls MemMap::Init twice since its needed before the runtime is created. 720 maps_ = new Maps; 721 } 722} 723 724void MemMap::Shutdown() { 725 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 726 delete maps_; 727 maps_ = nullptr; 728} 729 730void MemMap::SetSize(size_t new_size) { 731 if (new_size == base_size_) { 732 return; 733 } 734 CHECK_ALIGNED(new_size, kPageSize); 735 CHECK_EQ(base_size_, size_) << "Unsupported"; 736 CHECK_LE(new_size, base_size_); 737 MEMORY_TOOL_MAKE_UNDEFINED( 738 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + 739 new_size), 740 base_size_ - new_size); 741 CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size), 742 base_size_ - new_size), 0) << new_size << " " << base_size_; 743 base_size_ = new_size; 744 size_ = new_size; 745} 746 747void* MemMap::MapInternal(void* addr, 748 size_t length, 749 int prot, 750 int flags, 751 int fd, 752 off_t offset, 753 bool low_4gb) { 754#ifdef __LP64__ 755 // When requesting low_4g memory and having an expectation, the requested range should fit into 756 // 4GB. 757 if (low_4gb && ( 758 // Start out of bounds. 759 (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 || 760 // End out of bounds. For simplicity, this will fail for the last page of memory. 761 ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) { 762 LOG(ERROR) << "The requested address space (" << addr << ", " 763 << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length) 764 << ") cannot fit in low_4gb"; 765 return MAP_FAILED; 766 } 767#else 768 UNUSED(low_4gb); 769#endif 770 DCHECK_ALIGNED(length, kPageSize); 771 if (low_4gb) { 772 DCHECK_EQ(flags & MAP_FIXED, 0); 773 } 774 // TODO: 775 // A page allocator would be a useful abstraction here, as 776 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us 777 void* actual = MAP_FAILED; 778#if USE_ART_LOW_4G_ALLOCATOR 779 // MAP_32BIT only available on x86_64. 780 if (low_4gb && addr == nullptr) { 781 bool first_run = true; 782 783 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 784 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { 785 // Use maps_ as an optimization to skip over large maps. 786 // Find the first map which is address > ptr. 787 auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr)); 788 if (it != maps_->begin()) { 789 auto before_it = it; 790 --before_it; 791 // Start at the end of the map before the upper bound. 792 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd())); 793 CHECK_ALIGNED(ptr, kPageSize); 794 } 795 while (it != maps_->end()) { 796 // How much space do we have until the next map? 797 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr; 798 // If the space may be sufficient, break out of the loop. 799 if (delta >= length) { 800 break; 801 } 802 // Otherwise, skip to the end of the map. 803 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd()); 804 CHECK_ALIGNED(ptr, kPageSize); 805 ++it; 806 } 807 808 // Try to see if we get lucky with this address since none of the ART maps overlap. 809 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset); 810 if (actual != MAP_FAILED) { 811 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length; 812 return actual; 813 } 814 815 if (4U * GB - ptr < length) { 816 // Not enough memory until 4GB. 817 if (first_run) { 818 // Try another time from the bottom; 819 ptr = LOW_MEM_START - kPageSize; 820 first_run = false; 821 continue; 822 } else { 823 // Second try failed. 824 break; 825 } 826 } 827 828 uintptr_t tail_ptr; 829 830 // Check pages are free. 831 bool safe = true; 832 for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) { 833 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) { 834 safe = false; 835 break; 836 } else { 837 DCHECK_EQ(errno, ENOMEM); 838 } 839 } 840 841 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region 842 843 if (safe == true) { 844 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset); 845 if (actual != MAP_FAILED) { 846 return actual; 847 } 848 } else { 849 // Skip over last page. 850 ptr = tail_ptr; 851 } 852 } 853 854 if (actual == MAP_FAILED) { 855 LOG(ERROR) << "Could not find contiguous low-memory space."; 856 errno = ENOMEM; 857 } 858 } else { 859 actual = mmap(addr, length, prot, flags, fd, offset); 860 } 861 862#else 863#if defined(__LP64__) 864 if (low_4gb && addr == nullptr) { 865 flags |= MAP_32BIT; 866 } 867#endif 868 actual = mmap(addr, length, prot, flags, fd, offset); 869#endif 870 return actual; 871} 872 873std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) { 874 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]", 875 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(), 876 mem_map.GetName().c_str()); 877 return os; 878} 879 880} // namespace art 881