mem_map.cc revision ac304133ab4b988777bcc5ad12257cbb99c3871e
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mem_map.h" 18#include "thread-inl.h" 19 20#include <inttypes.h> 21#include <backtrace/BacktraceMap.h> 22#include <memory> 23 24// See CreateStartPos below. 25#ifdef __BIONIC__ 26#include <sys/auxv.h> 27#endif 28 29#include "base/stringprintf.h" 30#include "ScopedFd.h" 31#include "utils.h" 32 33#define USE_ASHMEM 1 34 35#ifdef USE_ASHMEM 36#include <cutils/ashmem.h> 37#ifndef ANDROID_OS 38#include <sys/resource.h> 39#endif 40#endif 41 42#ifndef MAP_ANONYMOUS 43#define MAP_ANONYMOUS MAP_ANON 44#endif 45 46namespace art { 47 48static std::ostream& operator<<( 49 std::ostream& os, 50 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) { 51 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) { 52 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n", 53 static_cast<uint32_t>(it->start), 54 static_cast<uint32_t>(it->end), 55 (it->flags & PROT_READ) ? 'r' : '-', 56 (it->flags & PROT_WRITE) ? 'w' : '-', 57 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str()); 58 } 59 return os; 60} 61 62std::ostream& operator<<(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) { 63 os << "MemMap:" << std::endl; 64 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) { 65 void* base = it->first; 66 MemMap* map = it->second; 67 CHECK_EQ(base, map->BaseBegin()); 68 os << *map << std::endl; 69 } 70 return os; 71} 72 73std::multimap<void*, MemMap*> MemMap::maps_; 74 75#if USE_ART_LOW_4G_ALLOCATOR 76// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT. 77 78// The regular start of memory allocations. The first 64KB is protected by SELinux. 79static constexpr uintptr_t LOW_MEM_START = 64 * KB; 80 81// Generate random starting position. 82// To not interfere with image position, take the image's address and only place it below. Current 83// formula (sketch): 84// 85// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX 86// ---------------------------------------- 87// = 0000111111111111111 88// & ~(kPageSize - 1) =~0000000000000001111 89// ---------------------------------------- 90// mask = 0000111111111110000 91// & random data = YYYYYYYYYYYYYYYYYYY 92// ----------------------------------- 93// tmp = 0000YYYYYYYYYYY0000 94// + LOW_MEM_START = 0000000000001000000 95// -------------------------------------- 96// start 97// 98// getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we 99// do not have Bionic, simply start with LOW_MEM_START. 100 101// Function is standalone so it can be tested somewhat in mem_map_test.cc. 102#ifdef __BIONIC__ 103uintptr_t CreateStartPos(uint64_t input) { 104 CHECK_NE(0, ART_BASE_ADDRESS); 105 106 // Start with all bits below highest bit in ART_BASE_ADDRESS. 107 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS)); 108 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1; 109 110 // Lowest (usually 12) bits are not used, as aligned by page size. 111 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); 112 113 // Mask input data. 114 return (input & mask) + LOW_MEM_START; 115} 116#endif 117 118static uintptr_t GenerateNextMemPos() { 119#ifdef __BIONIC__ 120 uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM)); 121 // The lower 8B are taken for the stack guard. Use the upper 8B (with mask). 122 return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8)); 123#else 124 // No auxv on host, see above. 125 return LOW_MEM_START; 126#endif 127} 128 129// Initialize linear scan to random position. 130uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos(); 131#endif 132 133// Return true if the address range is contained in a single /proc/self/map entry. 134static bool CheckOverlapping(uintptr_t begin, 135 uintptr_t end, 136 std::string* error_msg) { 137 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 138 if (!map->Build()) { 139 *error_msg = StringPrintf("Failed to build process map"); 140 return false; 141 } 142 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 143 if ((begin >= it->start && begin < it->end) // start of new within old 144 && (end > it->start && end <= it->end)) { // end of new within old 145 return true; 146 } 147 } 148 std::string maps; 149 ReadFileToString("/proc/self/maps", &maps); 150 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap " 151 "any existing map:\n%s\n", 152 begin, end, maps.c_str()); 153 return false; 154} 155 156// Return true if the address range does not conflict with any /proc/self/maps entry. 157static bool CheckNonOverlapping(uintptr_t begin, 158 uintptr_t end, 159 std::string* error_msg) { 160 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true)); 161 if (!map->Build()) { 162 *error_msg = StringPrintf("Failed to build process map"); 163 return false; 164 } 165 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 166 if ((begin >= it->start && begin < it->end) // start of new within old 167 || (end > it->start && end < it->end) // end of new within old 168 || (begin <= it->start && end > it->end)) { // start/end of new includes all of old 169 std::ostringstream map_info; 170 map_info << std::make_pair(it, map->end()); 171 *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with " 172 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s", 173 begin, end, 174 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end), 175 it->name.c_str(), 176 map_info.str().c_str()); 177 return false; 178 } 179 } 180 return true; 181} 182 183// CheckMapRequest to validate a non-MAP_FAILED mmap result based on 184// the expected value, calling munmap if validation fails, giving the 185// reason in error_msg. 186// 187// If the expected_ptr is nullptr, nothing is checked beyond the fact 188// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is 189// non-null, we check that pointer is the actual_ptr == expected_ptr, 190// and if not, report in error_msg what the conflict mapping was if 191// found, or a generic error in other cases. 192static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count, 193 std::string* error_msg) { 194 // Handled first by caller for more specific error messages. 195 CHECK(actual_ptr != MAP_FAILED); 196 197 if (expected_ptr == nullptr) { 198 return true; 199 } 200 201 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr); 202 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr); 203 uintptr_t limit = expected + byte_count; 204 205 if (expected_ptr == actual_ptr) { 206 return true; 207 } 208 209 // We asked for an address but didn't get what we wanted, all paths below here should fail. 210 int result = munmap(actual_ptr, byte_count); 211 if (result == -1) { 212 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count); 213 } 214 215 if (!CheckNonOverlapping(expected, limit, error_msg)) { 216 return false; 217 } 218 219 *error_msg = StringPrintf("Failed to mmap at expected address, mapped at " 220 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected); 221 return false; 222} 223 224MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot, 225 bool low_4gb, std::string* error_msg) { 226 if (byte_count == 0) { 227 return new MemMap(name, nullptr, 0, nullptr, 0, prot, false); 228 } 229 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 230 231 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 232 ScopedFd fd(-1); 233 234#ifdef USE_ASHMEM 235#ifdef HAVE_ANDROID_OS 236 const bool use_ashmem = true; 237#else 238 // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't 239 // fail due to ulimit restrictions. If they will then use a regular mmap. 240 struct rlimit rlimit_fsize; 241 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0); 242 const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) || 243 (page_aligned_byte_count < rlimit_fsize.rlim_cur); 244#endif 245 if (use_ashmem) { 246 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 247 // prefixed "dalvik-". 248 std::string debug_friendly_name("dalvik-"); 249 debug_friendly_name += name; 250 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count)); 251 if (fd.get() == -1) { 252 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno)); 253 return nullptr; 254 } 255 flags = MAP_PRIVATE; 256 } 257#endif 258 259 // We need to store and potentially set an error number for pretty printing of errors 260 int saved_errno = 0; 261 262#ifdef __LP64__ 263 // When requesting low_4g memory and having an expectation, the requested range should fit into 264 // 4GB. 265 if (low_4gb && ( 266 // Start out of bounds. 267 (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 || 268 // End out of bounds. For simplicity, this will fail for the last page of memory. 269 (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) { 270 *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb", 271 expected_ptr, expected_ptr + page_aligned_byte_count); 272 return nullptr; 273 } 274#endif 275 276 // TODO: 277 // A page allocator would be a useful abstraction here, as 278 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us 279 // 2) The linear scheme, even with simple saving of the last known position, is very crude 280#if USE_ART_LOW_4G_ALLOCATOR 281 // MAP_32BIT only available on x86_64. 282 void* actual = MAP_FAILED; 283 if (low_4gb && expected_ptr == nullptr) { 284 bool first_run = true; 285 286 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { 287 if (4U * GB - ptr < page_aligned_byte_count) { 288 // Not enough memory until 4GB. 289 if (first_run) { 290 // Try another time from the bottom; 291 ptr = LOW_MEM_START - kPageSize; 292 first_run = false; 293 continue; 294 } else { 295 // Second try failed. 296 break; 297 } 298 } 299 300 uintptr_t tail_ptr; 301 302 // Check pages are free. 303 bool safe = true; 304 for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) { 305 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) { 306 safe = false; 307 break; 308 } else { 309 DCHECK_EQ(errno, ENOMEM); 310 } 311 } 312 313 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region 314 315 if (safe == true) { 316 actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(), 317 0); 318 if (actual != MAP_FAILED) { 319 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low 320 // 4GB. If this is the case, unmap and retry. 321 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) { 322 break; 323 } else { 324 munmap(actual, page_aligned_byte_count); 325 actual = MAP_FAILED; 326 } 327 } 328 } else { 329 // Skip over last page. 330 ptr = tail_ptr; 331 } 332 } 333 334 if (actual == MAP_FAILED) { 335 LOG(ERROR) << "Could not find contiguous low-memory space."; 336 saved_errno = ENOMEM; 337 } 338 } else { 339 actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0); 340 saved_errno = errno; 341 } 342 343#else 344#if defined(__LP64__) 345 if (low_4gb && expected_ptr == nullptr) { 346 flags |= MAP_32BIT; 347 } 348#endif 349 350 void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0); 351 saved_errno = errno; 352#endif 353 354 if (actual == MAP_FAILED) { 355 std::string maps; 356 ReadFileToString("/proc/self/maps", &maps); 357 358 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s", 359 expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 360 strerror(saved_errno), maps.c_str()); 361 return nullptr; 362 } 363 std::ostringstream check_map_request_error_msg; 364 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 365 return nullptr; 366 } 367 return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual, 368 page_aligned_byte_count, prot, false); 369} 370 371MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd, 372 off_t start, bool reuse, const char* filename, 373 std::string* error_msg) { 374 CHECK_NE(0, prot); 375 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE)); 376 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr); 377 uintptr_t limit = expected + byte_count; 378 if (reuse) { 379 // reuse means it is okay that it overlaps an existing page mapping. 380 // Only use this if you actually made the page reservation yourself. 381 CHECK(expected_ptr != nullptr); 382 if (!CheckOverlapping(expected, limit, error_msg)) { 383 return nullptr; 384 } 385 flags |= MAP_FIXED; 386 } else { 387 CHECK_EQ(0, flags & MAP_FIXED); 388 if (expected_ptr != nullptr && !CheckNonOverlapping(expected, limit, error_msg)) { 389 return nullptr; 390 } 391 } 392 393 if (byte_count == 0) { 394 return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false); 395 } 396 // Adjust 'offset' to be page-aligned as required by mmap. 397 int page_offset = start % kPageSize; 398 off_t page_aligned_offset = start - page_offset; 399 // Adjust 'byte_count' to be page-aligned as we will map this anyway. 400 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); 401 // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but 402 // not necessarily to virtual memory. mmap will page align 'expected' for us. 403 byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset); 404 405 byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected, 406 page_aligned_byte_count, 407 prot, 408 flags, 409 fd, 410 page_aligned_offset)); 411 if (actual == MAP_FAILED) { 412 auto saved_errno = errno; 413 414 std::string maps; 415 ReadFileToString("/proc/self/maps", &maps); 416 417 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64 418 ") of file '%s' failed: %s\n%s", 419 page_aligned_expected, page_aligned_byte_count, prot, flags, fd, 420 static_cast<int64_t>(page_aligned_offset), filename, 421 strerror(saved_errno), maps.c_str()); 422 return nullptr; 423 } 424 std::ostringstream check_map_request_error_msg; 425 if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { 426 return nullptr; 427 } 428 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count, 429 prot, reuse); 430} 431 432MemMap::~MemMap() { 433 if (base_begin_ == nullptr && base_size_ == 0) { 434 return; 435 } 436 if (!reuse_) { 437 int result = munmap(base_begin_, base_size_); 438 if (result == -1) { 439 PLOG(FATAL) << "munmap failed"; 440 } 441 } 442 443 // Remove it from maps_. 444 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 445 bool found = false; 446 for (auto it = maps_.lower_bound(base_begin_), end = maps_.end(); 447 it != end && it->first == base_begin_; ++it) { 448 if (it->second == this) { 449 found = true; 450 maps_.erase(it); 451 break; 452 } 453 } 454 CHECK(found) << "MemMap not found"; 455} 456 457MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, 458 size_t base_size, int prot, bool reuse) 459 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), 460 prot_(prot), reuse_(reuse) { 461 if (size_ == 0) { 462 CHECK(begin_ == nullptr); 463 CHECK(base_begin_ == nullptr); 464 CHECK_EQ(base_size_, 0U); 465 } else { 466 CHECK(begin_ != nullptr); 467 CHECK(base_begin_ != nullptr); 468 CHECK_NE(base_size_, 0U); 469 470 // Add it to maps_. 471 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 472 maps_.insert(std::pair<void*, MemMap*>(base_begin_, this)); 473 } 474}; 475 476MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot, 477 std::string* error_msg) { 478 DCHECK_GE(new_end, Begin()); 479 DCHECK_LE(new_end, End()); 480 DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_); 481 DCHECK(IsAligned<kPageSize>(begin_)); 482 DCHECK(IsAligned<kPageSize>(base_begin_)); 483 DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_)); 484 DCHECK(IsAligned<kPageSize>(new_end)); 485 byte* old_end = begin_ + size_; 486 byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_; 487 byte* new_base_end = new_end; 488 DCHECK_LE(new_base_end, old_base_end); 489 if (new_base_end == old_base_end) { 490 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false); 491 } 492 size_ = new_end - reinterpret_cast<byte*>(begin_); 493 base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_); 494 DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_); 495 size_t tail_size = old_end - new_end; 496 byte* tail_base_begin = new_base_end; 497 size_t tail_base_size = old_base_end - new_base_end; 498 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); 499 DCHECK(IsAligned<kPageSize>(tail_base_size)); 500 501#ifdef USE_ASHMEM 502 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 503 // prefixed "dalvik-". 504 std::string debug_friendly_name("dalvik-"); 505 debug_friendly_name += tail_name; 506 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size)); 507 int flags = MAP_PRIVATE | MAP_FIXED; 508 if (fd.get() == -1) { 509 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", 510 tail_name, strerror(errno)); 511 return nullptr; 512 } 513#else 514 ScopedFd fd(-1); 515 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 516#endif 517 518 // Unmap/map the tail region. 519 int result = munmap(tail_base_begin, tail_base_size); 520 if (result == -1) { 521 std::string maps; 522 ReadFileToString("/proc/self/maps", &maps); 523 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s", 524 tail_base_begin, tail_base_size, name_.c_str(), 525 maps.c_str()); 526 return nullptr; 527 } 528 // Don't cause memory allocation between the munmap and the mmap 529 // calls. Otherwise, libc (or something else) might take this memory 530 // region. Note this isn't perfect as there's no way to prevent 531 // other threads to try to take this memory region here. 532 byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot, 533 flags, fd.get(), 0)); 534 if (actual == MAP_FAILED) { 535 std::string maps; 536 ReadFileToString("/proc/self/maps", &maps); 537 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s", 538 tail_base_begin, tail_base_size, tail_prot, flags, fd.get(), 539 maps.c_str()); 540 return nullptr; 541 } 542 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false); 543} 544 545void MemMap::MadviseDontNeedAndZero() { 546 if (base_begin_ != nullptr || base_size_ != 0) { 547 if (!kMadviseZeroes) { 548 memset(base_begin_, 0, base_size_); 549 } 550 int result = madvise(base_begin_, base_size_, MADV_DONTNEED); 551 if (result == -1) { 552 PLOG(WARNING) << "madvise failed"; 553 } 554 } 555} 556 557bool MemMap::Protect(int prot) { 558 if (base_begin_ == nullptr && base_size_ == 0) { 559 prot_ = prot; 560 return true; 561 } 562 563 if (mprotect(base_begin_, base_size_, prot) == 0) { 564 prot_ = prot; 565 return true; 566 } 567 568 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", " 569 << prot << ") failed"; 570 return false; 571} 572 573bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) { 574 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 575 CHECK(begin_map != nullptr); 576 CHECK(end_map != nullptr); 577 CHECK(HasMemMap(begin_map)); 578 CHECK(HasMemMap(end_map)); 579 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin()); 580 MemMap* map = begin_map; 581 while (map->BaseBegin() != end_map->BaseBegin()) { 582 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd()); 583 if (next_map == nullptr) { 584 // Found a gap. 585 return false; 586 } 587 map = next_map; 588 } 589 return true; 590} 591 592void MemMap::DumpMaps(std::ostream& os) { 593 DumpMaps(os, maps_); 594} 595 596void MemMap::DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) { 597 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_); 598 DumpMapsLocked(os, mem_maps); 599} 600 601void MemMap::DumpMapsLocked(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) { 602 os << mem_maps; 603} 604 605bool MemMap::HasMemMap(MemMap* map) { 606 void* base_begin = map->BaseBegin(); 607 for (auto it = maps_.lower_bound(base_begin), end = maps_.end(); 608 it != end && it->first == base_begin; ++it) { 609 if (it->second == map) { 610 return true; 611 } 612 } 613 return false; 614} 615 616MemMap* MemMap::GetLargestMemMapAt(void* address) { 617 size_t largest_size = 0; 618 MemMap* largest_map = nullptr; 619 for (auto it = maps_.lower_bound(address), end = maps_.end(); 620 it != end && it->first == address; ++it) { 621 MemMap* map = it->second; 622 CHECK(map != nullptr); 623 if (largest_size < map->BaseSize()) { 624 largest_size = map->BaseSize(); 625 largest_map = map; 626 } 627 } 628 return largest_map; 629} 630 631std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) { 632 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]", 633 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(), 634 mem_map.GetName().c_str()); 635 return os; 636} 637 638} // namespace art 639