mem_map.cc revision 1e13374baf7dfaf442ffbf9809c37c131d681eaf
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mem_map.h" 18 19#include "base/memory_tool.h" 20#include <backtrace/BacktraceMap.h> 21#include <inttypes.h> 22 23#include <memory> 24#include <sstream> 25 26// See CreateStartPos below. 27#ifdef __BIONIC__ 28#include <sys/auxv.h> 29#endif 30 31#include "base/stringprintf.h" 32 33#pragma GCC diagnostic push 34#pragma GCC diagnostic ignored "-Wshadow" 35#include "ScopedFd.h" 36#pragma GCC diagnostic pop 37 38#include "thread-inl.h" 39#include "utils.h" 40 41#define USE_ASHMEM 1 42 43#ifdef USE_ASHMEM 44#include <cutils/ashmem.h> 45#ifndef ANDROID_OS 46#include <sys/resource.h> 47#endif 48#endif 49 50#ifndef MAP_ANONYMOUS 51#define MAP_ANONYMOUS MAP_ANON 52#endif 53 54namespace art { 55 56static std::ostream& operator<<( 57 std::ostream& os, 58 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) { 59 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) { 60 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n", 61 static_cast<uint32_t>(it->start), 62 static_cast<uint32_t>(it->end), 63 (it->flags & PROT_READ) ? 'r' : '-', 64 (it->flags & PROT_WRITE) ? 'w' : '-', 65 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str()); 66 } 67 return os; 68} 69 70std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) { 71 os << "MemMap:" << std::endl; 72 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) { 73 void* base = it->first; 74 MemMap* map = it->second; 75 CHECK_EQ(base, map->BaseBegin()); 76 os << *map << std::endl; 77 } 78 return os; 79} 80 81MemMap::Maps* MemMap::maps_ = nullptr; 82 83#if USE_ART_LOW_4G_ALLOCATOR 84// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT. 85 86// The regular start of memory allocations. The first 64KB is protected by SELinux. 87static constexpr uintptr_t LOW_MEM_START = 64 * KB; 88 89// Generate random starting position. 90// To not interfere with image position, take the image's address and only place it below. Current 91// formula (sketch): 92// 93// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX 94// ---------------------------------------- 95// = 0000111111111111111 96// & ~(kPageSize - 1) =~0000000000000001111 97// ---------------------------------------- 98// mask = 0000111111111110000 99// & random data = YYYYYYYYYYYYYYYYYYY 100// ----------------------------------- 101// tmp = 0000YYYYYYYYYYY0000 102// + LOW_MEM_START = 0000000000001000000 103// -------------------------------------- 104// start 105// 106// getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we 107// do not have Bionic, simply start with LOW_MEM_START. 108 109// Function is standalone so it can be tested somewhat in mem_map_test.cc. 110#ifdef __BIONIC__ 111uintptr_t CreateStartPos(uint64_t input) { 112 CHECK_NE(0, ART_BASE_ADDRESS); 113 114 // Start with all bits below highest bit in ART_BASE_ADDRESS. 115 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS)); 116 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1; 117 118 // Lowest (usually 12) bits are not used, as aligned by page size. 119 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); 120 121 // Mask input data. 122 return (input & mask) + LOW_MEM_START; 123} 124#endif 125 126static uintptr_t GenerateNextMemPos() { 127#ifdef __BIONIC__ 128 uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM)); 129 // The lower 8B are taken for the stack guard. Use the upper 8B (with mask). 130 return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8)); 131#else 132 // No auxv on host, see above. 133 return LOW_MEM_START; 134#endif 135} 136 137// Initialize linear scan to random position. 138uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos(); 139#endif 140 141// Return true if the address range is contained in a single /proc/self/map entry. 142static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, 143 std::string* error_msg) { 144 uintptr_t begin = reinterpret_cast<uintptr_t>(ptr); 145 uintptr_t end = begin + size; 146 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 147 if (map.get() == nullptr) { 148 *error_msg = StringPrintf("Failed to build process map"); 149 return false; 150 } 151 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 152 if ((begin >= it->start && begin < it->end) // start of new within old 153 && (end > it->start && end <= it->end)) { // end of new within old 154 return true; 155 } 156 } 157 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); 158 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap " 159 "any existing map. See process maps in the log.", begin, end); 160 return false; 161} 162 163// Return true if the address range does not conflict with any /proc/self/maps entry. 164static bool CheckNonOverlapping(uintptr_t begin, 165 uintptr_t end, 166 std::string* error_msg) { 167 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 168 if (map.get() == nullptr) { 169 *error_msg = StringPrintf("Failed to build process map"); 170 return false; 171 } 172 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 173 if ((begin >= it->start && begin < it->end) // start of new within old 174 || (end > it->start && end < it->end) // end of new within old 175 || (begin <= it->start && end > it->end)) { // start/end of new includes all of old 176 std::ostringstream map_info; 177 map_info << std::make_pair(it, map->end()); 178 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with " 179 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s", 180 begin, end, 181 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end), 182 it->name.c_str(), 183 map_info.str().c_str()); 184 return false; 185 } 186 } 187 return true; 188} 189 190// CheckMapRequest to validate a non-MAP_FAILED mmap result based on 191// the expected value, calling munmap if validation fails, giving the 192// reason in error_msg. 193// 194// If the expected_ptr is null, nothing is checked beyond the fact 195// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is 196// non-null, we check that pointer is the actual_ptr == expected_ptr, 197// and if not, report in error_msg what the conflict mapping was if 198// found, or a generic error in other cases. 199static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count, 200 std::string* error_msg) { 201 // Handled first by caller for more specific error messages. 202 CHECK(actual_ptr != MAP_FAILED); 203 204 if (expected_ptr == nullptr) { 205 return true; 206 } 207 208 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr); 209 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr); 210 uintptr_t limit = expected + byte_count; 211 212 if (expected_ptr == actual_ptr) { 213 return true; 214 } 215 216 // We asked for an address but didn't get what we wanted, all paths below here should fail. 217 int result = munmap(actual_ptr, byte_count); 218 if (result == -1) { 219 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count); 220 } 221 222 // We call this here so that we can try and generate a full error 223 // message with the overlapping mapping. There's no guarantee that 224 // that there will be an overlap though, since 225 // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is 226 // true, even if there is no overlap 227 // - There might have been an overlap at the point of mmap, but the 228 // overlapping region has since been unmapped. 229 std::string error_detail; 230 CheckNonOverlapping(expected, limit, &error_detail); 231 232 std::ostringstream os; 233 os << StringPrintf("Failed to mmap at expected address, mapped at " 234 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, 235 actual, expected); 236 if (!error_detail.empty()) { 237 os << " : " << error_detail; 238 } 239 240 *error_msg = os.str(); 241 return false; 242} 243 244#if USE_ART_LOW_4G_ALLOCATOR 245static inline void* TryMemMapLow4GB(void* ptr, size_t page_aligned_byte_count, int prot, int flags, 246 int fd) { 247 void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, 0); 248 if (actual != MAP_FAILED) { 249 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low 250 // 4GB. If this is the case, unmap and retry. 251 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) { 252 munmap(actual, page_aligned_byte_count); 253 actual = MAP_FAILED; 254 } 255 } 256 return actual; 257} 258#endif 259 260MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot, 261 bool low_4gb, bool reuse, std::string* error_msg) { 262#ifndef __LP64__ 263 UNUSED(low_4gb); 264#endif 265 if (byte_count == 0) { 266 return new MemMap(name, nullptr, 0, nullptr, 0, prot, false); 267 } 268 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 269 270 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 271 if (reuse) { 272 // reuse means it is okay that it overlaps an existing page mapping. 273 // Only use this if you actually made the page reservation yourself. 274 CHECK(expected_ptr != nullptr); 275 276 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg; 277 flags |= MAP_FIXED; 278 } 279 280 ScopedFd fd(-1); 281 282#ifdef USE_ASHMEM 283#ifdef HAVE_ANDROID_OS 284 const bool use_ashmem = true; 285#else 286 // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't 287 // fail due to ulimit restrictions. If they will then use a regular mmap. 288 struct rlimit rlimit_fsize; 289 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0); 290 const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) || 291 (page_aligned_byte_count < rlimit_fsize.rlim_cur); 292#endif 293 if (use_ashmem) { 294 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 295 // prefixed "dalvik-". 296 std::string debug_friendly_name("dalvik-"); 297 debug_friendly_name += name; 298 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count)); 299 if (fd.get() == -1) { 300 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno)); 301 return nullptr; 302 } 303 flags &= ~MAP_ANONYMOUS; 304 } 305#endif 306 307 // We need to store and potentially set an error number for pretty printing of errors 308 int saved_errno = 0; 309 310#ifdef __LP64__ 311 // When requesting low_4g memory and having an expectation, the requested range should fit into 312 // 4GB. 313 if (low_4gb && ( 314 // Start out of bounds. 315 (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 || 316 // End out of bounds. For simplicity, this will fail for the last page of memory. 317 (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) { 318 *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb", 319 expected_ptr, expected_ptr + page_aligned_byte_count); 320 return nullptr; 321 } 322#endif 323 324 // TODO: 325 // A page allocator would be a useful abstraction here, as 326 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us 327 // 2) The linear scheme, even with simple saving of the last known position, is very crude 328#if USE_ART_LOW_4G_ALLOCATOR 329 // MAP_32BIT only available on x86_64. 330 void* actual = MAP_FAILED; 331 if (low_4gb && expected_ptr == nullptr) { 332 bool first_run = true; 333 334 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 335 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { 336 // Use maps_ as an optimization to skip over large maps. 337 // Find the first map which is address > ptr. 338 auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr)); 339 if (it != maps_->begin()) { 340 auto before_it = it; 341 --before_it; 342 // Start at the end of the map before the upper bound. 343 ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd())); 344 CHECK_ALIGNED(ptr, kPageSize); 345 } 346 while (it != maps_->end()) { 347 // How much space do we have until the next map? 348 size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr; 349 // If the space may be sufficient, break out of the loop. 350 if (delta >= page_aligned_byte_count) { 351 break; 352 } 353 // Otherwise, skip to the end of the map. 354 ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd()); 355 CHECK_ALIGNED(ptr, kPageSize); 356 ++it; 357 } 358 359 // Try to see if we get lucky with this address since none of the ART maps overlap. 360 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, 361 fd.get()); 362 if (actual != MAP_FAILED) { 363 next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count; 364 break; 365 } 366 367 if (4U * GB - ptr < page_aligned_byte_count) { 368 // Not enough memory until 4GB. 369 if (first_run) { 370 // Try another time from the bottom; 371 ptr = LOW_MEM_START - kPageSize; 372 first_run = false; 373 continue; 374 } else { 375 // Second try failed. 376 break; 377 } 378 } 379 380 uintptr_t tail_ptr; 381 382 // Check pages are free. 383 bool safe = true; 384 for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) { 385 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) { 386 safe = false; 387 break; 388 } else { 389 DCHECK_EQ(errno, ENOMEM); 390 } 391 } 392 393 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region 394 395 if (safe == true) { 396 actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, 397 fd.get()); 398 if (actual != MAP_FAILED) { 399 break; 400 } 401 } else { 402 // Skip over last page. 403 ptr = tail_ptr; 404 } 405 } 406 407 if (actual == MAP_FAILED) { 408 LOG(ERROR) << "Could not find contiguous low-memory space."; 409 saved_errno = ENOMEM; 410 } 411 } else { 412 actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0); 413 saved_errno = errno; 414 } 415 416#else 417#if defined(__LP64__) 418 if (low_4gb && expected_ptr == nullptr) { 419 flags |= MAP_32BIT; 420 } 421#endif 422 423 void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0); 424 saved_errno = errno; 425#endif 426 427 if (actual == MAP_FAILED) { 428 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 429 430 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. See process " 431 "maps in the log.", expected_ptr, page_aligned_byte_count, prot, 432 flags, fd.get(), strerror(saved_errno)); 433 return nullptr; 434 } 435 std::ostringstream check_map_request_error_msg; 436 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 437 return nullptr; 438 } 439 return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual, 440 page_aligned_byte_count, prot, reuse); 441} 442 443MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) { 444 if (byte_count == 0) { 445 return new MemMap(name, nullptr, 0, nullptr, 0, 0, false); 446 } 447 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 448 return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */); 449} 450 451MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, 452 int fd, off_t start, bool reuse, const char* filename, 453 std::string* error_msg) { 454 CHECK_NE(0, prot); 455 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE)); 456 457 // Note that we do not allow MAP_FIXED unless reuse == true, i.e we 458 // expect his mapping to be contained within an existing map. 459 if (reuse) { 460 // reuse means it is okay that it overlaps an existing page mapping. 461 // Only use this if you actually made the page reservation yourself. 462 CHECK(expected_ptr != nullptr); 463 464 DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg; 465 flags |= MAP_FIXED; 466 } else { 467 CHECK_EQ(0, flags & MAP_FIXED); 468 // Don't bother checking for an overlapping region here. We'll 469 // check this if required after the fact inside CheckMapRequest. 470 } 471 472 if (byte_count == 0) { 473 return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false); 474 } 475 // Adjust 'offset' to be page-aligned as required by mmap. 476 int page_offset = start % kPageSize; 477 off_t page_aligned_offset = start - page_offset; 478 // Adjust 'byte_count' to be page-aligned as we will map this anyway. 479 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); 480 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but 481 // not necessarily to virtual memory. mmap will page align 'expected' for us. 482 uint8_t* page_aligned_expected = 483 (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset); 484 485 size_t redzone_size = 0; 486 if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) { 487 redzone_size = kPageSize; 488 page_aligned_byte_count += redzone_size; 489 } 490 491 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected, 492 page_aligned_byte_count, 493 prot, 494 flags, 495 fd, 496 page_aligned_offset)); 497 if (actual == MAP_FAILED) { 498 auto saved_errno = errno; 499 500 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 501 502 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64 503 ") of file '%s' failed: %s. See process maps in the log.", 504 page_aligned_expected, page_aligned_byte_count, prot, flags, fd, 505 static_cast<int64_t>(page_aligned_offset), filename, 506 strerror(saved_errno)); 507 return nullptr; 508 } 509 std::ostringstream check_map_request_error_msg; 510 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 511 return nullptr; 512 } 513 if (redzone_size != 0) { 514 const uint8_t *real_start = actual + page_offset; 515 const uint8_t *real_end = actual + page_offset + byte_count; 516 const uint8_t *mapping_end = actual + page_aligned_byte_count; 517 518 MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual); 519 MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end); 520 page_aligned_byte_count -= redzone_size; 521 } 522 523 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count, 524 prot, reuse, redzone_size); 525} 526 527MemMap::~MemMap() { 528 if (base_begin_ == nullptr && base_size_ == 0) { 529 return; 530 } 531 532 // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned 533 // before it is returned to the system. 534 if (redzone_size_ != 0) { 535 MEMORY_TOOL_MAKE_UNDEFINED( 536 reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_, 537 redzone_size_); 538 } 539 540 if (!reuse_) { 541 MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_); 542 int result = munmap(base_begin_, base_size_); 543 if (result == -1) { 544 PLOG(FATAL) << "munmap failed"; 545 } 546 } 547 548 // Remove it from maps_. 549 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 550 bool found = false; 551 DCHECK(maps_ != nullptr); 552 for (auto it = maps_->lower_bound(base_begin_), end = maps_->end(); 553 it != end && it->first == base_begin_; ++it) { 554 if (it->second == this) { 555 found = true; 556 maps_->erase(it); 557 break; 558 } 559 } 560 CHECK(found) << "MemMap not found"; 561} 562 563MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, 564 size_t base_size, int prot, bool reuse, size_t redzone_size) 565 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), 566 prot_(prot), reuse_(reuse), redzone_size_(redzone_size) { 567 if (size_ == 0) { 568 CHECK(begin_ == nullptr); 569 CHECK(base_begin_ == nullptr); 570 CHECK_EQ(base_size_, 0U); 571 } else { 572 CHECK(begin_ != nullptr); 573 CHECK(base_begin_ != nullptr); 574 CHECK_NE(base_size_, 0U); 575 576 // Add it to maps_. 577 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 578 DCHECK(maps_ != nullptr); 579 maps_->insert(std::make_pair(base_begin_, this)); 580 } 581} 582 583MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot, 584 std::string* error_msg) { 585 DCHECK_GE(new_end, Begin()); 586 DCHECK_LE(new_end, End()); 587 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); 588 DCHECK(IsAligned<kPageSize>(begin_)); 589 DCHECK(IsAligned<kPageSize>(base_begin_)); 590 DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_)); 591 DCHECK(IsAligned<kPageSize>(new_end)); 592 uint8_t* old_end = begin_ + size_; 593 uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_; 594 uint8_t* new_base_end = new_end; 595 DCHECK_LE(new_base_end, old_base_end); 596 if (new_base_end == old_base_end) { 597 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false); 598 } 599 size_ = new_end - reinterpret_cast<uint8_t*>(begin_); 600 base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_); 601 DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); 602 size_t tail_size = old_end - new_end; 603 uint8_t* tail_base_begin = new_base_end; 604 size_t tail_base_size = old_base_end - new_base_end; 605 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); 606 DCHECK(IsAligned<kPageSize>(tail_base_size)); 607 608#ifdef USE_ASHMEM 609 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 610 // prefixed "dalvik-". 611 std::string debug_friendly_name("dalvik-"); 612 debug_friendly_name += tail_name; 613 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size)); 614 int flags = MAP_PRIVATE | MAP_FIXED; 615 if (fd.get() == -1) { 616 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", 617 tail_name, strerror(errno)); 618 return nullptr; 619 } 620#else 621 ScopedFd fd(-1); 622 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 623#endif 624 625 626 MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size); 627 // Unmap/map the tail region. 628 int result = munmap(tail_base_begin, tail_base_size); 629 if (result == -1) { 630 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 631 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.", 632 tail_base_begin, tail_base_size, name_.c_str()); 633 return nullptr; 634 } 635 // Don't cause memory allocation between the munmap and the mmap 636 // calls. Otherwise, libc (or something else) might take this memory 637 // region. Note this isn't perfect as there's no way to prevent 638 // other threads to try to take this memory region here. 639 uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin, tail_base_size, tail_prot, 640 flags, fd.get(), 0)); 641 if (actual == MAP_FAILED) { 642 PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); 643 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process " 644 "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags, 645 fd.get()); 646 return nullptr; 647 } 648 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false); 649} 650 651void MemMap::MadviseDontNeedAndZero() { 652 if (base_begin_ != nullptr || base_size_ != 0) { 653 if (!kMadviseZeroes) { 654 memset(base_begin_, 0, base_size_); 655 } 656 int result = madvise(base_begin_, base_size_, MADV_DONTNEED); 657 if (result == -1) { 658 PLOG(WARNING) << "madvise failed"; 659 } 660 } 661} 662 663bool MemMap::Protect(int prot) { 664 if (base_begin_ == nullptr && base_size_ == 0) { 665 prot_ = prot; 666 return true; 667 } 668 669 if (mprotect(base_begin_, base_size_, prot) == 0) { 670 prot_ = prot; 671 return true; 672 } 673 674 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", " 675 << prot << ") failed"; 676 return false; 677} 678 679bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) { 680 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 681 CHECK(begin_map != nullptr); 682 CHECK(end_map != nullptr); 683 CHECK(HasMemMap(begin_map)); 684 CHECK(HasMemMap(end_map)); 685 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin()); 686 MemMap* map = begin_map; 687 while (map->BaseBegin() != end_map->BaseBegin()) { 688 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd()); 689 if (next_map == nullptr) { 690 // Found a gap. 691 return false; 692 } 693 map = next_map; 694 } 695 return true; 696} 697 698void MemMap::DumpMaps(std::ostream& os, bool terse) { 699 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 700 DumpMapsLocked(os, terse); 701} 702 703void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { 704 const auto& mem_maps = *maps_; 705 if (!terse) { 706 os << mem_maps; 707 return; 708 } 709 710 // Terse output example: 711 // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc] 712 // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation] 713 // The details: 714 // "+0x20P" means 0x20 pages taken by a single mapping, 715 // "~0x11dP" means a gap of 0x11d pages, 716 // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages. 717 os << "MemMap:" << std::endl; 718 for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) { 719 MemMap* map = it->second; 720 void* base = it->first; 721 CHECK_EQ(base, map->BaseBegin()); 722 os << "[MemMap: " << base; 723 ++it; 724 // Merge consecutive maps with the same protect flags and name. 725 constexpr size_t kMaxGaps = 9; 726 size_t num_gaps = 0; 727 size_t num = 1u; 728 size_t size = map->BaseSize(); 729 CHECK(IsAligned<kPageSize>(size)); 730 void* end = map->BaseEnd(); 731 while (it != maps_end && 732 it->second->GetProtect() == map->GetProtect() && 733 it->second->GetName() == map->GetName() && 734 (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) { 735 if (it->second->BaseBegin() != end) { 736 ++num_gaps; 737 os << "+0x" << std::hex << (size / kPageSize) << "P"; 738 if (num != 1u) { 739 os << "(" << std::dec << num << ")"; 740 } 741 size_t gap = 742 reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end); 743 CHECK(IsAligned<kPageSize>(gap)); 744 os << "~0x" << std::hex << (gap / kPageSize) << "P"; 745 num = 0u; 746 size = 0u; 747 } 748 CHECK(IsAligned<kPageSize>(it->second->BaseSize())); 749 ++num; 750 size += it->second->BaseSize(); 751 end = it->second->BaseEnd(); 752 ++it; 753 } 754 os << "+0x" << std::hex << (size / kPageSize) << "P"; 755 if (num != 1u) { 756 os << "(" << std::dec << num << ")"; 757 } 758 os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl; 759 } 760} 761 762bool MemMap::HasMemMap(MemMap* map) { 763 void* base_begin = map->BaseBegin(); 764 for (auto it = maps_->lower_bound(base_begin), end = maps_->end(); 765 it != end && it->first == base_begin; ++it) { 766 if (it->second == map) { 767 return true; 768 } 769 } 770 return false; 771} 772 773MemMap* MemMap::GetLargestMemMapAt(void* address) { 774 size_t largest_size = 0; 775 MemMap* largest_map = nullptr; 776 DCHECK(maps_ != nullptr); 777 for (auto it = maps_->lower_bound(address), end = maps_->end(); 778 it != end && it->first == address; ++it) { 779 MemMap* map = it->second; 780 CHECK(map != nullptr); 781 if (largest_size < map->BaseSize()) { 782 largest_size = map->BaseSize(); 783 largest_map = map; 784 } 785 } 786 return largest_map; 787} 788 789void MemMap::Init() { 790 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 791 if (maps_ == nullptr) { 792 // dex2oat calls MemMap::Init twice since its needed before the runtime is created. 793 maps_ = new Maps; 794 } 795} 796 797void MemMap::Shutdown() { 798 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 799 delete maps_; 800 maps_ = nullptr; 801} 802 803void MemMap::SetSize(size_t new_size) { 804 if (new_size == base_size_) { 805 return; 806 } 807 CHECK_ALIGNED(new_size, kPageSize); 808 CHECK_EQ(base_size_, size_) << "Unsupported"; 809 CHECK_LE(new_size, base_size_); 810 MEMORY_TOOL_MAKE_UNDEFINED( 811 reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + 812 new_size), 813 base_size_ - new_size); 814 CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size), 815 base_size_ - new_size), 0) << new_size << " " << base_size_; 816 base_size_ = new_size; 817 size_ = new_size; 818} 819 820std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) { 821 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]", 822 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(), 823 mem_map.GetName().c_str()); 824 return os; 825} 826 827} // namespace art 828