mem_map.cc revision 0389cd57de8faedb85b749656b8e1735a7bce002
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mem_map.h" 18 19#include "base/memory_tool.h" 20#include <backtrace/BacktraceMap.h> 21#include <inttypes.h> 22#include <stdlib.h> 23 24#include <memory> 25#include <sstream> 26 27#include "base/stringprintf.h" 28 29#pragma GCC diagnostic push 30#pragma GCC diagnostic ignored "-Wshadow" 31#include "ScopedFd.h" 32#pragma GCC diagnostic pop 33 34#include "thread-inl.h" 35#include "utils.h" 36 37#define USE_ASHMEM 1 38 39#ifdef USE_ASHMEM 40#include <cutils/ashmem.h> 41#ifndef ANDROID_OS 42#include <sys/resource.h> 43#endif 44#endif 45 46#ifndef MAP_ANONYMOUS 47#define MAP_ANONYMOUS MAP_ANON 48#endif 49 50namespace art { 51 52static std::ostream& operator<<( 53 std::ostream& os, 54 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) { 55 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) { 56 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n", 57 static_cast<uint32_t>(it->start), 58 static_cast<uint32_t>(it->end), 59 (it->flags & PROT_READ) ? 'r' : '-', 60 (it->flags & PROT_WRITE) ? 'w' : '-', 61 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str()); 62 } 63 return os; 64} 65 66std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) { 67 os << "MemMap:" << std::endl; 68 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) { 69 void* base = it->first; 70 MemMap* map = it->second; 71 CHECK_EQ(base, map->BaseBegin()); 72 os << *map << std::endl; 73 } 74 return os; 75} 76 77MemMap::Maps* MemMap::maps_ = nullptr; 78 79#if USE_ART_LOW_4G_ALLOCATOR 80// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT. 81 82// The regular start of memory allocations. The first 64KB is protected by SELinux. 83static constexpr uintptr_t LOW_MEM_START = 64 * KB; 84 85// Generate random starting position. 86// To not interfere with image position, take the image's address and only place it below. Current 87// formula (sketch): 88// 89// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX 90// ---------------------------------------- 91// = 0000111111111111111 92// & ~(kPageSize - 1) =~0000000000000001111 93// ---------------------------------------- 94// mask = 0000111111111110000 95// & random data = YYYYYYYYYYYYYYYYYYY 96// ----------------------------------- 97// tmp = 0000YYYYYYYYYYY0000 98// + LOW_MEM_START = 0000000000001000000 99// -------------------------------------- 100// start 101// 102// arc4random as an entropy source is exposed in Bionic, but not in glibc. When we 103// do not have Bionic, simply start with LOW_MEM_START. 104 105// Function is standalone so it can be tested somewhat in mem_map_test.cc. 106#ifdef __BIONIC__ 107uintptr_t CreateStartPos(uint64_t input) { 108 CHECK_NE(0, ART_BASE_ADDRESS); 109 110 // Start with all bits below highest bit in ART_BASE_ADDRESS. 111 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS)); 112 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1; 113 114 // Lowest (usually 12) bits are not used, as aligned by page size. 115 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); 116 117 // Mask input data. 118 return (input & mask) + LOW_MEM_START; 119} 120#endif 121 122static uintptr_t GenerateNextMemPos() { 123#ifdef __BIONIC__ 124 uint64_t random_data; 125 arc4random_buf(&random_data, sizeof(random_data)); 126 return CreateStartPos(random_data); 127#else 128 // No arc4random on host, see above. 129 return LOW_MEM_START; 130#endif 131} 132 133// Initialize linear scan to random position. 134uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos(); 135#endif 136 137// Return true if the address range is contained in a single /proc/self/map entry. 138static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, 139 std::string* error_msg) { 140 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr); 141 uintptr_t end = begin + size; 142 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 143 if (map.get() == nullptr) { 144 *error_msg = StringPrintf("Failed to build process map"); 145 return false; 146 } 147 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 148 if ((begin >= it->start && begin < it->end) // start of new within old 149 && (end > it->start && end <= it->end)) { // end of new within old 150 return true; 151 } 152 } 153 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); 154 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap " 155 "any existing map. See process maps in the log.", begin, end); 156 return false; 157} 158 159// Return true if the address range does not conflict with any /proc/self/maps entry. 160static bool CheckNonOverlapping(uintptr_t begin, 161 uintptr_t end, 162 std::string* error_msg) { 163 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 164 if (map.get() == nullptr) { 165 *error_msg = StringPrintf("Failed to build process map"); 166 return false; 167 } 168 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 169 if ((begin >= it->start && begin < it->end) // start of new within old 170 || (end > it->start && end < it->end) // end of new within old 171 || (begin <= it->start && end > it->end)) { // start/end of new includes all of old 172 std::ostringstream map_info; 173 map_info << std::make_pair(it, map->end()); 174 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with " 175 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s", 176 begin, end, 177 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end), 178 it->name.c_str(), 179 map_info.str().c_str()); 180 return false; 181 } 182 } 183 return true; 184} 185 186// CheckMapRequest to validate a non-MAP_FAILED mmap result based on 187// the expected value, calling munmap if validation fails, giving the 188// reason in error_msg. 189// 190// If the expected_ptr is null, nothing is checked beyond the fact 191// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is 192// non-null, we check that pointer is the actual_ptr == expected_ptr, 193// and if not, report in error_msg what the conflict mapping was if 194// found, or a generic error in other cases. 195static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count, 196 std::string* error_msg) { 197 // Handled first by caller for more specific error messages. 198 CHECK(actual_ptr != MAP_FAILED); 199 200 if (expected_ptr == nullptr) { 201 return true; 202 } 203 204 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr); 205 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr); 206 uintptr_t limit = expected + byte_count; 207 208 if (expected_ptr == actual_ptr) { 209 return true; 210 } 211 212 // We asked for an address but didn't get what we wanted, all paths below here should fail. 213 int result = munmap(actual_ptr, byte_count); 214 if (result == -1) { 215 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count); 216 } 217 218 // We call this here so that we can try and generate a full error 219 // message with the overlapping mapping. There's no guarantee that 220 // that there will be an overlap though, since 221 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is 222 // true, even if there is no overlap 223 // - There might have been an overlap at the point of mmap, but the 224 // overlapping region has since been unmapped. 225 std::string error_detail; 226 CheckNonOverlapping(expected, limit, &error_detail); 227 228 std::ostringstream os; 229 os << StringPrintf("Failed to mmap at expected address, mapped at " 230 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, 231 actual, expected); 232 if (!error_detail.empty()) { 233 os << " : " << error_detail; 234 } 235 236 *error_msg = os.str(); 237 return false; 238} 239 240#if USE_ART_LOW_4G_ALLOCATOR 241static inline void* TryMemMapLow4GB(void* ptr, size_t page_aligned_byte_count, int prot, int flags, 242 int fd) { 243 void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, 0); 244 if (actual != MAP_FAILED) { 245 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low 246 // 4GB. If this is the case, unmap and retry. 247 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) { 248 munmap(actual, page_aligned_byte_count); 249 actual = MAP_FAILED; 250 } 251 } 252 return actual; 253} 254#endif 255 256MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot, 257 bool low_4gb, bool reuse, std::string* error_msg) { 258#ifndef __LP64__ 259 UNUSED(low_4gb); 260#endif 261 if (byte_count == 0) { 262 return new MemMap(name, nullptr, 0, nullptr, 0, prot, false); 263 } 264 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 265 266 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 267 if (reuse) { 268 // reuse means it is okay that it overlaps an existing page mapping. 269 // Only use this if you actually made the page reservation yourself. 270 CHECK(expected_ptr != nullptr); 271 272 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg; 273 flags |= MAP_FIXED; 274 } 275 276 ScopedFd fd(-1); 277 278#ifdef USE_ASHMEM 279#ifdef __ANDROID__ 280 const bool use_ashmem = true; 281#else 282 // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't 283 // fail due to ulimit restrictions. If they will then use a regular mmap. 284 struct rlimit rlimit_fsize; 285 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0); 286 const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) || 287 (page_aligned_byte_count < rlimit_fsize.rlim_cur); 288#endif 289 if (use_ashmem) { 290 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 291 // prefixed "dalvik-". 292 std::string debug_friendly_name("dalvik-"); 293 debug_friendly_name += name; 294 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count)); 295 if (fd.get() == -1) { 296 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno)); 297 return nullptr; 298 } 299 flags &= ~MAP_ANONYMOUS; 300 } 301#endif 302 303 // We need to store and potentially set an error number for pretty printing of errors 304 int saved_errno = 0; 305 306#ifdef __LP64__ 307 // When requesting low_4g memory and having an expectation, the requested range should fit into 308 // 4GB. 309 if (low_4gb && ( 310 // Start out of bounds. 311 (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 || 312 // End out of bounds. For simplicity, this will fail for the last page of memory. 313 (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) { 314 *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb", 315 expected_ptr, expected_ptr + page_aligned_byte_count); 316 return nullptr; 317 } 318#endif 319 320 // TODO: 321 // A page allocator would be a useful abstraction here, as 322 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us 323 // 2) The linear scheme, even with simple saving of the last known position, is very crude 324#if USE_ART_LOW_4G_ALLOCATOR 325 // MAP_32BIT only available on x86_64. 326 void* actual = MAP_FAILED; 327 if (low_4gb && expected_ptr == nullptr) { 328 bool first_run = true; 329 330 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 331 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { 332 // Use maps_ as an optimization to skip over large maps. 333 // Find the first map which is address > ptr. 334 auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr)); 335 if (it != maps_->begin()) { 336 auto before_it = it; 337 --before_it; 338 // Start at the end of the map before the upper bound. 339 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd())); 340 CHECK_ALIGNED(ptr, kPageSize); 341 } 342 while (it != maps_->end()) { 343 // How much space do we have until the next map? 344 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr; 345 // If the space may be sufficient, break out of the loop. 346 if (delta >= page_aligned_byte_count) { 347 break; 348 } 349 // Otherwise, skip to the end of the map. 350 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd()); 351 CHECK_ALIGNED(ptr, kPageSize); 352 ++it; 353 } 354 355 // Try to see if we get lucky with this address since none of the ART maps overlap. 356 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, 357 fd.get()); 358 if (actual != MAP_FAILED) { 359 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count; 360 break; 361 } 362 363 if (4U * GB - ptr < page_aligned_byte_count) { 364 // Not enough memory until 4GB. 365 if (first_run) { 366 // Try another time from the bottom; 367 ptr = LOW_MEM_START - kPageSize; 368 first_run = false; 369 continue; 370 } else { 371 // Second try failed. 372 break; 373 } 374 } 375 376 uintptr_t tail_ptr; 377 378 // Check pages are free. 379 bool safe = true; 380 for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) { 381 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) { 382 safe = false; 383 break; 384 } else { 385 DCHECK_EQ(errno, ENOMEM); 386 } 387 } 388 389 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region 390 391 if (safe == true) { 392 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, 393 fd.get()); 394 if (actual != MAP_FAILED) { 395 break; 396 } 397 } else { 398 // Skip over last page. 399 ptr = tail_ptr; 400 } 401 } 402 403 if (actual == MAP_FAILED) { 404 LOG(ERROR) << "Could not find contiguous low-memory space."; 405 saved_errno = ENOMEM; 406 } 407 } else { 408 actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0); 409 saved_errno = errno; 410 } 411 412#else 413#if defined(__LP64__) 414 if (low_4gb && expected_ptr == nullptr) { 415 flags |= MAP_32BIT; 416 } 417#endif 418 419 void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0); 420 saved_errno = errno; 421#endif 422 423 if (actual == MAP_FAILED) { 424 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 425 426 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process " 427 "maps in the log.", expected_ptr, page_aligned_byte_count, prot, 428 flags, fd.get(), strerror(saved_errno)); 429 return nullptr; 430 } 431 std::ostringstream check_map_request_error_msg; 432 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 433 return nullptr; 434 } 435 return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual, 436 page_aligned_byte_count, prot, reuse); 437} 438 439MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) { 440 if (byte_count == 0) { 441 return new MemMap(name, nullptr, 0, nullptr, 0, 0, false); 442 } 443 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 444 return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */); 445} 446 447MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, 448 int fd, off_t start, bool reuse, const char* filename, 449 std::string* error_msg) { 450 CHECK_NE(0, prot); 451 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE)); 452 453 // Note that we do not allow MAP_FIXED unless reuse == true, i.e we 454 // expect his mapping to be contained within an existing map. 455 if (reuse) { 456 // reuse means it is okay that it overlaps an existing page mapping. 457 // Only use this if you actually made the page reservation yourself. 458 CHECK(expected_ptr != nullptr); 459 460 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg; 461 flags |= MAP_FIXED; 462 } else { 463 CHECK_EQ(0, flags & MAP_FIXED); 464 // Don't bother checking for an overlapping region here. We'll 465 // check this if required after the fact inside CheckMapRequest. 466 } 467 468 if (byte_count == 0) { 469 return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false); 470 } 471 // Adjust 'offset' to be page-aligned as required by mmap. 472 int page_offset = start % kPageSize; 473 off_t page_aligned_offset = start - page_offset; 474 // Adjust 'byte_count' to be page-aligned as we will map this anyway. 475 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); 476 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but 477 // not necessarily to virtual memory. mmap will page align 'expected' for us. 478 uint8_t* page_aligned_expected = 479 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset); 480 481 size_t redzone_size = 0; 482 if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) { 483 redzone_size = kPageSize; 484 page_aligned_byte_count += redzone_size; 485 } 486 487 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected, 488 page_aligned_byte_count, 489 prot, 490 flags, 491 fd, 492 page_aligned_offset)); 493 if (actual == MAP_FAILED) { 494 auto saved_errno = errno; 495 496 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 497 498 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64 499 ") of file '%s' failed: %s. See process maps in the log.", 500 page_aligned_expected, page_aligned_byte_count, prot, flags, fd, 501 static_cast<int64_t>(page_aligned_offset), filename, 502 strerror(saved_errno)); 503 return nullptr; 504 } 505 std::ostringstream check_map_request_error_msg; 506 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 507 return nullptr; 508 } 509 if (redzone_size != 0) { 510 const uint8_t *real_start = actual + page_offset; 511 const uint8_t *real_end = actual + page_offset + byte_count; 512 const uint8_t *mapping_end = actual + page_aligned_byte_count; 513 514 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual); 515 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end); 516 page_aligned_byte_count -= redzone_size; 517 } 518 519 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count, 520 prot, reuse, redzone_size); 521} 522 523MemMap::~MemMap() { 524 if (base_begin_ == nullptr && base_size_ == 0) { 525 return; 526 } 527 528 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned 529 // before it is returned to the system. 530 if (redzone_size_ != 0) { 531 MEMORY_TOOL_MAKE_UNDEFINED( 532 reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_, 533 redzone_size_); 534 } 535 536 if (!reuse_) { 537 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_); 538 int result = munmap(base_begin_, base_size_); 539 if (result == -1) { 540 PLOG(FATAL) << "munmap failed"; 541 } 542 } 543 544 // Remove it from maps_. 545 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 546 bool found = false; 547 DCHECK(maps_ != nullptr); 548 for (auto it = maps_->lower_bound(base_begin_), end = maps_->end(); 549 it != end && it->first == base_begin_; ++it) { 550 if (it->second == this) { 551 found = true; 552 maps_->erase(it); 553 break; 554 } 555 } 556 CHECK(found) << "MemMap not found"; 557} 558 559MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, 560 size_t base_size, int prot, bool reuse, size_t redzone_size) 561 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), 562 prot_(prot), reuse_(reuse), redzone_size_(redzone_size) { 563 if (size_ == 0) { 564 CHECK(begin_ == nullptr); 565 CHECK(base_begin_ == nullptr); 566 CHECK_EQ(base_size_, 0U); 567 } else { 568 CHECK(begin_ != nullptr); 569 CHECK(base_begin_ != nullptr); 570 CHECK_NE(base_size_, 0U); 571 572 // Add it to maps_. 573 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 574 DCHECK(maps_ != nullptr); 575 maps_->insert(std::make_pair(base_begin_, this)); 576 } 577} 578 579MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot, 580 std::string* error_msg) { 581 DCHECK_GE(new_end, Begin()); 582 DCHECK_LE(new_end, End()); 583 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); 584 DCHECK_ALIGNED(begin_, kPageSize); 585 DCHECK_ALIGNED(base_begin_, kPageSize); 586 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); 587 DCHECK_ALIGNED(new_end, kPageSize); 588 uint8_t* old_end = begin_ + size_; 589 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_; 590 uint8_t* new_base_end = new_end; 591 DCHECK_LE(new_base_end, old_base_end); 592 if (new_base_end == old_base_end) { 593 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false); 594 } 595 size_ = new_end - reinterpret_cast<uint8_t*>(begin_); 596 base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_); 597 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); 598 size_t tail_size = old_end - new_end; 599 uint8_t* tail_base_begin = new_base_end; 600 size_t tail_base_size = old_base_end - new_base_end; 601 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); 602 DCHECK_ALIGNED(tail_base_size, kPageSize); 603 604#ifdef USE_ASHMEM 605 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 606 // prefixed "dalvik-". 607 std::string debug_friendly_name("dalvik-"); 608 debug_friendly_name += tail_name; 609 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size)); 610 int flags = MAP_PRIVATE | MAP_FIXED; 611 if (fd.get() == -1) { 612 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", 613 tail_name, strerror(errno)); 614 return nullptr; 615 } 616#else 617 ScopedFd fd(-1); 618 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 619#endif 620 621 622 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size); 623 // Unmap/map the tail region. 624 int result = munmap(tail_base_begin, tail_base_size); 625 if (result == -1) { 626 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 627 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.", 628 tail_base_begin, tail_base_size, name_.c_str()); 629 return nullptr; 630 } 631 // Don't cause memory allocation between the munmap and the mmap 632 // calls. Otherwise, libc (or something else) might take this memory 633 // region. Note this isn't perfect as there's no way to prevent 634 // other threads to try to take this memory region here. 635 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot, 636 flags, fd.get(), 0)); 637 if (actual == MAP_FAILED) { 638 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 639 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process " 640 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags, 641 fd.get()); 642 return nullptr; 643 } 644 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false); 645} 646 647void MemMap::MadviseDontNeedAndZero() { 648 if (base_begin_ != nullptr || base_size_ != 0) { 649 if (!kMadviseZeroes) { 650 memset(base_begin_, 0, base_size_); 651 } 652 int result = madvise(base_begin_, base_size_, MADV_DONTNEED); 653 if (result == -1) { 654 PLOG(WARNING) << "madvise failed"; 655 } 656 } 657} 658 659bool MemMap::Protect(int prot) { 660 if (base_begin_ == nullptr && base_size_ == 0) { 661 prot_ = prot; 662 return true; 663 } 664 665 if (mprotect(base_begin_, base_size_, prot) == 0) { 666 prot_ = prot; 667 return true; 668 } 669 670 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", " 671 << prot << ") failed"; 672 return false; 673} 674 675bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) { 676 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 677 CHECK(begin_map != nullptr); 678 CHECK(end_map != nullptr); 679 CHECK(HasMemMap(begin_map)); 680 CHECK(HasMemMap(end_map)); 681 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin()); 682 MemMap* map = begin_map; 683 while (map->BaseBegin() != end_map->BaseBegin()) { 684 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd()); 685 if (next_map == nullptr) { 686 // Found a gap. 687 return false; 688 } 689 map = next_map; 690 } 691 return true; 692} 693 694void MemMap::DumpMaps(std::ostream& os, bool terse) { 695 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 696 DumpMapsLocked(os, terse); 697} 698 699void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { 700 const auto& mem_maps = *maps_; 701 if (!terse) { 702 os << mem_maps; 703 return; 704 } 705 706 // Terse output example: 707 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc] 708 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation] 709 // The details: 710 // "+0x20P" means 0x20 pages taken by a single mapping, 711 // "~0x11dP" means a gap of 0x11d pages, 712 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages. 713 os << "MemMap:" << std::endl; 714 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) { 715 MemMap* map = it->second; 716 void* base = it->first; 717 CHECK_EQ(base, map->BaseBegin()); 718 os << "[MemMap: " << base; 719 ++it; 720 // Merge consecutive maps with the same protect flags and name. 721 constexpr size_t kMaxGaps = 9; 722 size_t num_gaps = 0; 723 size_t num = 1u; 724 size_t size = map->BaseSize(); 725 CHECK_ALIGNED(size, kPageSize); 726 void* end = map->BaseEnd(); 727 while (it != maps_end && 728 it->second->GetProtect() == map->GetProtect() && 729 it->second->GetName() == map->GetName() && 730 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) { 731 if (it->second->BaseBegin() != end) { 732 ++num_gaps; 733 os << "+0x" << std::hex << (size / kPageSize) << "P"; 734 if (num != 1u) { 735 os << "(" << std::dec << num << ")"; 736 } 737 size_t gap = 738 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end); 739 CHECK_ALIGNED(gap, kPageSize); 740 os << "~0x" << std::hex << (gap / kPageSize) << "P"; 741 num = 0u; 742 size = 0u; 743 } 744 CHECK_ALIGNED(it->second->BaseSize(), kPageSize); 745 ++num; 746 size += it->second->BaseSize(); 747 end = it->second->BaseEnd(); 748 ++it; 749 } 750 os << "+0x" << std::hex << (size / kPageSize) << "P"; 751 if (num != 1u) { 752 os << "(" << std::dec << num << ")"; 753 } 754 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl; 755 } 756} 757 758bool MemMap::HasMemMap(MemMap* map) { 759 void* base_begin = map->BaseBegin(); 760 for (auto it = maps_->lower_bound(base_begin), end = maps_->end(); 761 it != end && it->first == base_begin; ++it) { 762 if (it->second == map) { 763 return true; 764 } 765 } 766 return false; 767} 768 769MemMap* MemMap::GetLargestMemMapAt(void* address) { 770 size_t largest_size = 0; 771 MemMap* largest_map = nullptr; 772 DCHECK(maps_ != nullptr); 773 for (auto it = maps_->lower_bound(address), end = maps_->end(); 774 it != end && it->first == address; ++it) { 775 MemMap* map = it->second; 776 CHECK(map != nullptr); 777 if (largest_size < map->BaseSize()) { 778 largest_size = map->BaseSize(); 779 largest_map = map; 780 } 781 } 782 return largest_map; 783} 784 785void MemMap::Init() { 786 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 787 if (maps_ == nullptr) { 788 // dex2oat calls MemMap::Init twice since its needed before the runtime is created. 789 maps_ = new Maps; 790 } 791} 792 793void MemMap::Shutdown() { 794 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 795 delete maps_; 796 maps_ = nullptr; 797} 798 799void MemMap::SetSize(size_t new_size) { 800 if (new_size == base_size_) { 801 return; 802 } 803 CHECK_ALIGNED(new_size, kPageSize); 804 CHECK_EQ(base_size_, size_) << "Unsupported"; 805 CHECK_LE(new_size, base_size_); 806 MEMORY_TOOL_MAKE_UNDEFINED( 807 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + 808 new_size), 809 base_size_ - new_size); 810 CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size), 811 base_size_ - new_size), 0) << new_size << " " << base_size_; 812 base_size_ = new_size; 813 size_ = new_size; 814} 815 816std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) { 817 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]", 818 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(), 819 mem_map.GetName().c_str()); 820 return os; 821} 822 823} // namespace art 824