mem_map.cc revision d8f26dbebe72c1cbdfa85bdeeb003283c7435db3
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mem_map.h" 18 19#include <inttypes.h> 20#include <backtrace/BacktraceMap.h> 21#include <memory> 22 23// See CreateStartPos below. 24#ifdef __BIONIC__ 25#include <sys/auxv.h> 26#endif 27 28#include "base/stringprintf.h" 29#include "ScopedFd.h" 30#include "utils.h" 31 32#define USE_ASHMEM 1 33 34#ifdef USE_ASHMEM 35#include <cutils/ashmem.h> 36#endif 37 38namespace art { 39 40static std::ostream& operator<<( 41 std::ostream& os, 42 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) { 43 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) { 44 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n", 45 static_cast<uint32_t>(it->start), 46 static_cast<uint32_t>(it->end), 47 (it->flags & PROT_READ) ? 'r' : '-', 48 (it->flags & PROT_WRITE) ? 'w' : '-', 49 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str()); 50 } 51 return os; 52} 53 54#if defined(__LP64__) && !defined(__x86_64__) 55// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT. 56 57// The regular start of memory allocations. The first 64KB is protected by SELinux. 58static constexpr uintptr_t LOW_MEM_START = 64 * KB; 59 60// Generate random starting position. 61// To not interfere with image position, take the image's address and only place it below. Current 62// formula (sketch): 63// 64// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX 65// ---------------------------------------- 66// = 0000111111111111111 67// & ~(kPageSize - 1) =~0000000000000001111 68// ---------------------------------------- 69// mask = 0000111111111110000 70// & random data = YYYYYYYYYYYYYYYYYYY 71// ----------------------------------- 72// tmp = 0000YYYYYYYYYYY0000 73// + LOW_MEM_START = 0000000000001000000 74// -------------------------------------- 75// start 76// 77// getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we 78// do not have Bionic, simply start with LOW_MEM_START. 79 80// Function is standalone so it can be tested somewhat in mem_map_test.cc. 81#ifdef __BIONIC__ 82uintptr_t CreateStartPos(uint64_t input) { 83 CHECK_NE(0, ART_BASE_ADDRESS); 84 85 // Start with all bits below highest bit in ART_BASE_ADDRESS. 86 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS)); 87 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1; 88 89 // Lowest (usually 12) bits are not used, as aligned by page size. 90 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); 91 92 // Mask input data. 93 return (input & mask) + LOW_MEM_START; 94} 95#endif 96 97static uintptr_t GenerateNextMemPos() { 98#ifdef __BIONIC__ 99 uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM)); 100 // The lower 8B are taken for the stack guard. Use the upper 8B (with mask). 101 return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8)); 102#else 103 // No auxv on host, see above. 104 return LOW_MEM_START; 105#endif 106} 107 108// Initialize linear scan to random position. 109uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos(); 110#endif 111 112static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count, 113 std::ostringstream* error_msg) { 114 // Handled first by caller for more specific error messages. 115 CHECK(actual_ptr != MAP_FAILED); 116 117 if (expected_ptr == nullptr) { 118 return true; 119 } 120 121 if (expected_ptr == actual_ptr) { 122 return true; 123 } 124 125 // We asked for an address but didn't get what we wanted, all paths below here should fail. 126 int result = munmap(actual_ptr, byte_count); 127 if (result == -1) { 128 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count); 129 } 130 131 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr); 132 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr); 133 uintptr_t limit = expected + byte_count; 134 135 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid())); 136 if (!map->Build()) { 137 *error_msg << StringPrintf("Failed to build process map to determine why mmap returned " 138 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected); 139 140 return false; 141 } 142 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 143 if ((expected >= it->start && expected < it->end) // start of new within old 144 || (limit > it->start && limit < it->end) // end of new within old 145 || (expected <= it->start && limit > it->end)) { // start/end of new includes all of old 146 *error_msg 147 << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with " 148 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n", 149 expected, limit, 150 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end), 151 it->name.c_str()) 152 << std::make_pair(it, map->end()); 153 return false; 154 } 155 } 156 *error_msg << StringPrintf("Failed to mmap at expected address, mapped at " 157 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected); 158 return false; 159} 160 161MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot, 162 bool low_4gb, std::string* error_msg) { 163 if (byte_count == 0) { 164 return new MemMap(name, nullptr, 0, nullptr, 0, prot); 165 } 166 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); 167 168#ifdef USE_ASHMEM 169 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 170 // prefixed "dalvik-". 171 std::string debug_friendly_name("dalvik-"); 172 debug_friendly_name += name; 173 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count)); 174 if (fd.get() == -1) { 175 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno)); 176 return nullptr; 177 } 178 int flags = MAP_PRIVATE; 179#else 180 ScopedFd fd(-1); 181 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 182#endif 183 184 // We need to store and potentially set an error number for pretty printing of errors 185 int saved_errno = 0; 186 187#ifdef __LP64__ 188 // When requesting low_4g memory and having an expectation, the requested range should fit into 189 // 4GB. 190 if (low_4gb && ( 191 // Start out of bounds. 192 (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 || 193 // End out of bounds. For simplicity, this will fail for the last page of memory. 194 (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) { 195 *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb", 196 expected, expected + page_aligned_byte_count); 197 return nullptr; 198 } 199#endif 200 201 // TODO: 202 // A page allocator would be a useful abstraction here, as 203 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us 204 // 2) The linear scheme, even with simple saving of the last known position, is very crude 205#if defined(__LP64__) && !defined(__x86_64__) 206 // MAP_32BIT only available on x86_64. 207 void* actual = MAP_FAILED; 208 if (low_4gb && expected == nullptr) { 209 flags |= MAP_FIXED; 210 211 bool first_run = true; 212 213 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) { 214 if (4U * GB - ptr < page_aligned_byte_count) { 215 // Not enough memory until 4GB. 216 if (first_run) { 217 // Try another time from the bottom; 218 ptr = LOW_MEM_START - kPageSize; 219 first_run = false; 220 continue; 221 } else { 222 // Second try failed. 223 break; 224 } 225 } 226 227 uintptr_t tail_ptr; 228 229 // Check pages are free. 230 bool safe = true; 231 for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) { 232 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) { 233 safe = false; 234 break; 235 } else { 236 DCHECK_EQ(errno, ENOMEM); 237 } 238 } 239 240 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region 241 242 if (safe == true) { 243 actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(), 244 0); 245 if (actual != MAP_FAILED) { 246 break; 247 } 248 } else { 249 // Skip over last page. 250 ptr = tail_ptr; 251 } 252 } 253 254 if (actual == MAP_FAILED) { 255 LOG(ERROR) << "Could not find contiguous low-memory space."; 256 saved_errno = ENOMEM; 257 } 258 } else { 259 actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0); 260 saved_errno = errno; 261 } 262 263#else 264#ifdef __x86_64__ 265 if (low_4gb && expected == nullptr) { 266 flags |= MAP_32BIT; 267 } 268#endif 269 270 void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0); 271 saved_errno = errno; 272#endif 273 274 if (actual == MAP_FAILED) { 275 std::string maps; 276 ReadFileToString("/proc/self/maps", &maps); 277 278 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s", 279 expected, page_aligned_byte_count, prot, flags, fd.get(), 280 strerror(saved_errno), maps.c_str()); 281 return nullptr; 282 } 283 std::ostringstream check_map_request_error_msg; 284 if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) { 285 *error_msg = check_map_request_error_msg.str(); 286 return nullptr; 287 } 288 return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual, 289 page_aligned_byte_count, prot); 290} 291 292MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd, 293 off_t start, bool reuse, const char* filename, 294 std::string* error_msg) { 295 CHECK_NE(0, prot); 296 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE)); 297 if (reuse) { 298 // reuse means it is okay that it overlaps an existing page mapping. 299 // Only use this if you actually made the page reservation yourself. 300 CHECK(expected != nullptr); 301 flags |= MAP_FIXED; 302 } else { 303 CHECK_EQ(0, flags & MAP_FIXED); 304 } 305 306 if (byte_count == 0) { 307 return new MemMap(filename, nullptr, 0, nullptr, 0, prot); 308 } 309 // Adjust 'offset' to be page-aligned as required by mmap. 310 int page_offset = start % kPageSize; 311 off_t page_aligned_offset = start - page_offset; 312 // Adjust 'byte_count' to be page-aligned as we will map this anyway. 313 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); 314 // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not 315 // necessarily to virtual memory. mmap will page align 'expected' for us. 316 byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset); 317 318 byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected, 319 page_aligned_byte_count, 320 prot, 321 flags, 322 fd, 323 page_aligned_offset)); 324 if (actual == MAP_FAILED) { 325 auto saved_errno = errno; 326 327 std::string maps; 328 ReadFileToString("/proc/self/maps", &maps); 329 330 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64 331 ") of file '%s' failed: %s\n%s", 332 page_aligned_expected, page_aligned_byte_count, prot, flags, fd, 333 static_cast<int64_t>(page_aligned_offset), filename, 334 strerror(saved_errno), maps.c_str()); 335 return nullptr; 336 } 337 std::ostringstream check_map_request_error_msg; 338 if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) { 339 *error_msg = check_map_request_error_msg.str(); 340 return nullptr; 341 } 342 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count, 343 prot); 344} 345 346MemMap::~MemMap() { 347 if (base_begin_ == nullptr && base_size_ == 0) { 348 return; 349 } 350 int result = munmap(base_begin_, base_size_); 351 if (result == -1) { 352 PLOG(FATAL) << "munmap failed"; 353 } 354} 355 356MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, 357 size_t base_size, int prot) 358 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), 359 prot_(prot) { 360 if (size_ == 0) { 361 CHECK(begin_ == nullptr); 362 CHECK(base_begin_ == nullptr); 363 CHECK_EQ(base_size_, 0U); 364 } else { 365 CHECK(begin_ != nullptr); 366 CHECK(base_begin_ != nullptr); 367 CHECK_NE(base_size_, 0U); 368 } 369}; 370 371MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot, 372 std::string* error_msg) { 373 DCHECK_GE(new_end, Begin()); 374 DCHECK_LE(new_end, End()); 375 DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_); 376 DCHECK(IsAligned<kPageSize>(begin_)); 377 DCHECK(IsAligned<kPageSize>(base_begin_)); 378 DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_)); 379 DCHECK(IsAligned<kPageSize>(new_end)); 380 byte* old_end = begin_ + size_; 381 byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_; 382 byte* new_base_end = new_end; 383 DCHECK_LE(new_base_end, old_base_end); 384 if (new_base_end == old_base_end) { 385 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot); 386 } 387 size_ = new_end - reinterpret_cast<byte*>(begin_); 388 base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_); 389 DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_); 390 size_t tail_size = old_end - new_end; 391 byte* tail_base_begin = new_base_end; 392 size_t tail_base_size = old_base_end - new_base_end; 393 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); 394 DCHECK(IsAligned<kPageSize>(tail_base_size)); 395 396#ifdef USE_ASHMEM 397 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are 398 // prefixed "dalvik-". 399 std::string debug_friendly_name("dalvik-"); 400 debug_friendly_name += tail_name; 401 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size)); 402 int flags = MAP_PRIVATE | MAP_FIXED; 403 if (fd.get() == -1) { 404 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", 405 tail_name, strerror(errno)); 406 return nullptr; 407 } 408#else 409 ScopedFd fd(-1); 410 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 411#endif 412 413 // Unmap/map the tail region. 414 int result = munmap(tail_base_begin, tail_base_size); 415 if (result == -1) { 416 std::string maps; 417 ReadFileToString("/proc/self/maps", &maps); 418 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s", 419 tail_base_begin, tail_base_size, name_.c_str(), 420 maps.c_str()); 421 return nullptr; 422 } 423 // Don't cause memory allocation between the munmap and the mmap 424 // calls. Otherwise, libc (or something else) might take this memory 425 // region. Note this isn't perfect as there's no way to prevent 426 // other threads to try to take this memory region here. 427 byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot, 428 flags, fd.get(), 0)); 429 if (actual == MAP_FAILED) { 430 std::string maps; 431 ReadFileToString("/proc/self/maps", &maps); 432 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s", 433 tail_base_begin, tail_base_size, tail_prot, flags, fd.get(), 434 maps.c_str()); 435 return nullptr; 436 } 437 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot); 438} 439 440bool MemMap::Protect(int prot) { 441 if (base_begin_ == nullptr && base_size_ == 0) { 442 prot_ = prot; 443 return true; 444 } 445 446 if (mprotect(base_begin_, base_size_, prot) == 0) { 447 prot_ = prot; 448 return true; 449 } 450 451 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", " 452 << prot << ") failed"; 453 return false; 454} 455 456std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) { 457 os << StringPrintf("[MemMap: %s prot=0x%x %p-%p]", 458 mem_map.GetName().c_str(), mem_map.GetProtect(), 459 mem_map.BaseBegin(), mem_map.BaseEnd()); 460 return os; 461} 462 463} // namespace art 464