1/* 2 * Copyright (C) 2015 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include "linker_allocator.h" 30#include "linker_debug.h" 31#include "linker.h" 32 33#include <algorithm> 34#include <vector> 35 36#include <stdlib.h> 37#include <sys/mman.h> 38#include <unistd.h> 39 40#include <async_safe/log.h> 41 42#include "private/bionic_prctl.h" 43 44// 45// LinkerMemeoryAllocator is general purpose allocator 46// designed to provide the same functionality as the malloc/free/realloc 47// libc functions. 48// 49// On alloc: 50// If size is >= 1k allocator proxies malloc call directly to mmap 51// If size < 1k allocator uses SmallObjectAllocator for the size 52// rounded up to the nearest power of two. 53// 54// On free: 55// 56// For a pointer allocated using proxy-to-mmap allocator unmaps 57// the memory. 58// 59// For a pointer allocated using SmallObjectAllocator it adds 60// the block to free_blocks_list_. If the number of free pages reaches 2, 61// SmallObjectAllocator munmaps one of the pages keeping the other one 62// in reserve. 63 64static const char kSignature[4] = {'L', 'M', 'A', 1}; 65 66static const size_t kSmallObjectMaxSize = 1 << kSmallObjectMaxSizeLog2; 67 68// This type is used for large allocations (with size >1k) 69static const uint32_t kLargeObject = 111; 70 71bool operator<(const small_object_page_record& one, const small_object_page_record& two) { 72 return one.page_addr < two.page_addr; 73} 74 75static inline uint16_t log2(size_t number) { 76 uint16_t result = 0; 77 number--; 78 79 while (number != 0) { 80 result++; 81 number >>= 1; 82 } 83 84 return result; 85} 86 87LinkerSmallObjectAllocator::LinkerSmallObjectAllocator(uint32_t type, size_t block_size) 88 : type_(type), block_size_(block_size), free_pages_cnt_(0), free_blocks_list_(nullptr) {} 89 90void* LinkerSmallObjectAllocator::alloc() { 91 CHECK(block_size_ != 0); 92 93 if (free_blocks_list_ == nullptr) { 94 alloc_page(); 95 } 96 97 small_object_block_record* block_record = free_blocks_list_; 98 if (block_record->free_blocks_cnt > 1) { 99 small_object_block_record* next_free = reinterpret_cast<small_object_block_record*>( 100 reinterpret_cast<uint8_t*>(block_record) + block_size_); 101 next_free->next = block_record->next; 102 next_free->free_blocks_cnt = block_record->free_blocks_cnt - 1; 103 free_blocks_list_ = next_free; 104 } else { 105 free_blocks_list_ = block_record->next; 106 } 107 108 // bookkeeping... 109 auto page_record = find_page_record(block_record); 110 111 if (page_record->allocated_blocks_cnt == 0) { 112 free_pages_cnt_--; 113 } 114 115 page_record->free_blocks_cnt--; 116 page_record->allocated_blocks_cnt++; 117 118 memset(block_record, 0, block_size_); 119 120 return block_record; 121} 122 123void LinkerSmallObjectAllocator::free_page(linker_vector_t::iterator page_record) { 124 void* page_start = reinterpret_cast<void*>(page_record->page_addr); 125 void* page_end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(page_start) + PAGE_SIZE); 126 127 while (free_blocks_list_ != nullptr && 128 free_blocks_list_ > page_start && 129 free_blocks_list_ < page_end) { 130 free_blocks_list_ = free_blocks_list_->next; 131 } 132 133 small_object_block_record* current = free_blocks_list_; 134 135 while (current != nullptr) { 136 while (current->next > page_start && current->next < page_end) { 137 current->next = current->next->next; 138 } 139 140 current = current->next; 141 } 142 143 munmap(page_start, PAGE_SIZE); 144 page_records_.erase(page_record); 145 free_pages_cnt_--; 146} 147 148void LinkerSmallObjectAllocator::free(void* ptr) { 149 auto page_record = find_page_record(ptr); 150 151 ssize_t offset = reinterpret_cast<uintptr_t>(ptr) - sizeof(page_info); 152 153 if (offset % block_size_ != 0) { 154 async_safe_fatal("invalid pointer: %p (block_size=%zd)", ptr, block_size_); 155 } 156 157 memset(ptr, 0, block_size_); 158 small_object_block_record* block_record = reinterpret_cast<small_object_block_record*>(ptr); 159 160 block_record->next = free_blocks_list_; 161 block_record->free_blocks_cnt = 1; 162 163 free_blocks_list_ = block_record; 164 165 page_record->free_blocks_cnt++; 166 page_record->allocated_blocks_cnt--; 167 168 if (page_record->allocated_blocks_cnt == 0) { 169 if (free_pages_cnt_++ > 1) { 170 // if we already have a free page - unmap this one. 171 free_page(page_record); 172 } 173 } 174} 175 176linker_vector_t::iterator LinkerSmallObjectAllocator::find_page_record(void* ptr) { 177 void* addr = reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(ptr))); 178 small_object_page_record boundary; 179 boundary.page_addr = addr; 180 linker_vector_t::iterator it = std::lower_bound( 181 page_records_.begin(), page_records_.end(), boundary); 182 183 if (it == page_records_.end() || it->page_addr != addr) { 184 // not found... 185 async_safe_fatal("page record for %p was not found (block_size=%zd)", ptr, block_size_); 186 } 187 188 return it; 189} 190 191void LinkerSmallObjectAllocator::create_page_record(void* page_addr, size_t free_blocks_cnt) { 192 small_object_page_record record; 193 record.page_addr = page_addr; 194 record.free_blocks_cnt = free_blocks_cnt; 195 record.allocated_blocks_cnt = 0; 196 197 linker_vector_t::iterator it = std::lower_bound( 198 page_records_.begin(), page_records_.end(), record); 199 page_records_.insert(it, record); 200} 201 202void LinkerSmallObjectAllocator::alloc_page() { 203 static_assert(sizeof(page_info) % 16 == 0, 204 "sizeof(page_info) is not multiple of 16"); 205 void* map_ptr = mmap(nullptr, PAGE_SIZE, 206 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); 207 if (map_ptr == MAP_FAILED) { 208 async_safe_fatal("mmap failed"); 209 } 210 211 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, PAGE_SIZE, "linker_alloc_small_objects"); 212 213 page_info* info = reinterpret_cast<page_info*>(map_ptr); 214 memcpy(info->signature, kSignature, sizeof(kSignature)); 215 info->type = type_; 216 info->allocator_addr = this; 217 218 size_t free_blocks_cnt = (PAGE_SIZE - sizeof(page_info))/block_size_; 219 220 create_page_record(map_ptr, free_blocks_cnt); 221 222 small_object_block_record* first_block = reinterpret_cast<small_object_block_record*>(info + 1); 223 224 first_block->next = free_blocks_list_; 225 first_block->free_blocks_cnt = free_blocks_cnt; 226 227 free_blocks_list_ = first_block; 228} 229 230 231void LinkerMemoryAllocator::initialize_allocators() { 232 if (allocators_ != nullptr) { 233 return; 234 } 235 236 LinkerSmallObjectAllocator* allocators = 237 reinterpret_cast<LinkerSmallObjectAllocator*>(allocators_buf_); 238 239 for (size_t i = 0; i < kSmallObjectAllocatorsCount; ++i) { 240 uint32_t type = i + kSmallObjectMinSizeLog2; 241 new (allocators + i) LinkerSmallObjectAllocator(type, 1 << type); 242 } 243 244 allocators_ = allocators; 245} 246 247void* LinkerMemoryAllocator::alloc_mmap(size_t size) { 248 size_t allocated_size = PAGE_END(size + sizeof(page_info)); 249 void* map_ptr = mmap(nullptr, allocated_size, 250 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); 251 252 if (map_ptr == MAP_FAILED) { 253 async_safe_fatal("mmap failed"); 254 } 255 256 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, map_ptr, allocated_size, "linker_alloc_lob"); 257 258 page_info* info = reinterpret_cast<page_info*>(map_ptr); 259 memcpy(info->signature, kSignature, sizeof(kSignature)); 260 info->type = kLargeObject; 261 info->allocated_size = allocated_size; 262 263 return info + 1; 264} 265 266void* LinkerMemoryAllocator::alloc(size_t size) { 267 // treat alloc(0) as alloc(1) 268 if (size == 0) { 269 size = 1; 270 } 271 272 if (size > kSmallObjectMaxSize) { 273 return alloc_mmap(size); 274 } 275 276 uint16_t log2_size = log2(size); 277 278 if (log2_size < kSmallObjectMinSizeLog2) { 279 log2_size = kSmallObjectMinSizeLog2; 280 } 281 282 return get_small_object_allocator(log2_size)->alloc(); 283} 284 285page_info* LinkerMemoryAllocator::get_page_info(void* ptr) { 286 page_info* info = reinterpret_cast<page_info*>(PAGE_START(reinterpret_cast<size_t>(ptr))); 287 if (memcmp(info->signature, kSignature, sizeof(kSignature)) != 0) { 288 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr); 289 } 290 291 return info; 292} 293 294void* LinkerMemoryAllocator::realloc(void* ptr, size_t size) { 295 if (ptr == nullptr) { 296 return alloc(size); 297 } 298 299 if (size == 0) { 300 free(ptr); 301 return nullptr; 302 } 303 304 page_info* info = get_page_info(ptr); 305 306 size_t old_size = 0; 307 308 if (info->type == kLargeObject) { 309 old_size = info->allocated_size - sizeof(page_info); 310 } else { 311 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type); 312 if (allocator != info->allocator_addr) { 313 async_safe_fatal("invalid pointer %p (page signature mismatch)", ptr); 314 } 315 316 old_size = allocator->get_block_size(); 317 } 318 319 if (old_size < size) { 320 void *result = alloc(size); 321 memcpy(result, ptr, old_size); 322 free(ptr); 323 return result; 324 } 325 326 return ptr; 327} 328 329void LinkerMemoryAllocator::free(void* ptr) { 330 if (ptr == nullptr) { 331 return; 332 } 333 334 page_info* info = get_page_info(ptr); 335 336 if (info->type == kLargeObject) { 337 munmap(info, info->allocated_size); 338 } else { 339 LinkerSmallObjectAllocator* allocator = get_small_object_allocator(info->type); 340 if (allocator != info->allocator_addr) { 341 async_safe_fatal("invalid pointer %p (invalid allocator address for the page)", ptr); 342 } 343 344 allocator->free(ptr); 345 } 346} 347 348LinkerSmallObjectAllocator* LinkerMemoryAllocator::get_small_object_allocator(uint32_t type) { 349 if (type < kSmallObjectMinSizeLog2 || type > kSmallObjectMaxSizeLog2) { 350 async_safe_fatal("invalid type: %u", type); 351 } 352 353 initialize_allocators(); 354 return &allocators_[type - kSmallObjectMinSizeLog2]; 355} 356