linker.cpp revision 688157295f55edbfddb2277e7bdf5635f6be591a
1/* 2 * Copyright (C) 2008, 2009 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <dlfcn.h> 30#include <errno.h> 31#include <fcntl.h> 32#include <inttypes.h> 33#include <pthread.h> 34#include <stdio.h> 35#include <stdlib.h> 36#include <string.h> 37#include <sys/mman.h> 38#include <sys/param.h> 39#include <unistd.h> 40 41#include <new> 42 43// Private C library headers. 44#include "private/bionic_tls.h" 45#include "private/KernelArgumentBlock.h" 46#include "private/ScopedPthreadMutexLocker.h" 47#include "private/ScopedFd.h" 48#include "private/ScopeGuard.h" 49#include "private/UniquePtr.h" 50 51#include "linker.h" 52#include "linker_debug.h" 53#include "linker_environ.h" 54#include "linker_phdr.h" 55#include "linker_allocator.h" 56 57/* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<< 58 * 59 * Do NOT use malloc() and friends or pthread_*() code here. 60 * Don't use printf() either; it's caused mysterious memory 61 * corruption in the past. 62 * The linker runs before we bring up libc and it's easiest 63 * to make sure it does not depend on any complex libc features 64 * 65 * open issues / todo: 66 * 67 * - cleaner error reporting 68 * - after linking, set as much stuff as possible to READONLY 69 * and NOEXEC 70 */ 71 72#if defined(__LP64__) 73#define SEARCH_NAME(x) x 74#else 75// Nvidia drivers are relying on the bug: 76// http://code.google.com/p/android/issues/detail?id=6670 77// so we continue to use base-name lookup for lp32 78static const char* get_base_name(const char* name) { 79 const char* bname = strrchr(name, '/'); 80 return bname ? bname + 1 : name; 81} 82#define SEARCH_NAME(x) get_base_name(x) 83#endif 84 85static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf); 86 87static LinkerAllocator<soinfo> g_soinfo_allocator; 88static LinkerAllocator<LinkedListEntry<soinfo>> g_soinfo_links_allocator; 89 90static soinfo* solist; 91static soinfo* sonext; 92static soinfo* somain; // main process, always the one after libdl_info 93 94static const char* const kDefaultLdPaths[] = { 95#if defined(__LP64__) 96 "/vendor/lib64", 97 "/system/lib64", 98#else 99 "/vendor/lib", 100 "/system/lib", 101#endif 102 nullptr 103}; 104 105#define LDPATH_BUFSIZE (LDPATH_MAX*64) 106#define LDPATH_MAX 8 107 108#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64) 109#define LDPRELOAD_MAX 8 110 111static char g_ld_library_paths_buffer[LDPATH_BUFSIZE]; 112static const char* g_ld_library_paths[LDPATH_MAX + 1]; 113 114static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE]; 115static const char* g_ld_preload_names[LDPRELOAD_MAX + 1]; 116 117static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1]; 118 119__LIBC_HIDDEN__ int g_ld_debug_verbosity; 120 121__LIBC_HIDDEN__ abort_msg_t* g_abort_message = nullptr; // For debuggerd. 122 123enum RelocationKind { 124 kRelocAbsolute = 0, 125 kRelocRelative, 126 kRelocCopy, 127 kRelocSymbol, 128 kRelocMax 129}; 130 131#if STATS 132struct linker_stats_t { 133 int count[kRelocMax]; 134}; 135 136static linker_stats_t linker_stats; 137 138static void count_relocation(RelocationKind kind) { 139 ++linker_stats.count[kind]; 140} 141#else 142static void count_relocation(RelocationKind) { 143} 144#endif 145 146#if COUNT_PAGES 147static unsigned bitmask[4096]; 148#if defined(__LP64__) 149#define MARK(offset) \ 150 do { \ 151 if ((((offset) >> 12) >> 5) < 4096) \ 152 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \ 153 } while (0) 154#else 155#define MARK(offset) \ 156 do { \ 157 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \ 158 } while (0) 159#endif 160#else 161#define MARK(x) do {} while (0) 162#endif 163 164// You shouldn't try to call memory-allocating functions in the dynamic linker. 165// Guard against the most obvious ones. 166#define DISALLOW_ALLOCATION(return_type, name, ...) \ 167 return_type name __VA_ARGS__ \ 168 { \ 169 __libc_fatal("ERROR: " #name " called from the dynamic linker!\n"); \ 170 } 171DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused)); 172DISALLOW_ALLOCATION(void, free, (void* u __unused)); 173DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused)); 174DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused)); 175 176static char __linker_dl_err_buf[768]; 177 178char* linker_get_error_buffer() { 179 return &__linker_dl_err_buf[0]; 180} 181 182size_t linker_get_error_buffer_size() { 183 return sizeof(__linker_dl_err_buf); 184} 185 186// This function is an empty stub where GDB locates a breakpoint to get notified 187// about linker activity. 188extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity(); 189 190static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER; 191static r_debug _r_debug = {1, nullptr, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0}; 192static link_map* r_debug_tail = 0; 193 194static void insert_soinfo_into_debug_map(soinfo* info) { 195 // Copy the necessary fields into the debug structure. 196 link_map* map = &(info->link_map_head); 197 map->l_addr = info->load_bias; 198 map->l_name = info->name; 199 map->l_ld = info->dynamic; 200 201 // Stick the new library at the end of the list. 202 // gdb tends to care more about libc than it does 203 // about leaf libraries, and ordering it this way 204 // reduces the back-and-forth over the wire. 205 if (r_debug_tail) { 206 r_debug_tail->l_next = map; 207 map->l_prev = r_debug_tail; 208 map->l_next = 0; 209 } else { 210 _r_debug.r_map = map; 211 map->l_prev = 0; 212 map->l_next = 0; 213 } 214 r_debug_tail = map; 215} 216 217static void remove_soinfo_from_debug_map(soinfo* info) { 218 link_map* map = &(info->link_map_head); 219 220 if (r_debug_tail == map) { 221 r_debug_tail = map->l_prev; 222 } 223 224 if (map->l_prev) { 225 map->l_prev->l_next = map->l_next; 226 } 227 if (map->l_next) { 228 map->l_next->l_prev = map->l_prev; 229 } 230} 231 232static void notify_gdb_of_load(soinfo* info) { 233 if (info->is_main_executable()) { 234 // GDB already knows about the main executable 235 return; 236 } 237 238 ScopedPthreadMutexLocker locker(&g__r_debug_mutex); 239 240 _r_debug.r_state = r_debug::RT_ADD; 241 rtld_db_dlactivity(); 242 243 insert_soinfo_into_debug_map(info); 244 245 _r_debug.r_state = r_debug::RT_CONSISTENT; 246 rtld_db_dlactivity(); 247} 248 249static void notify_gdb_of_unload(soinfo* info) { 250 if (info->is_main_executable()) { 251 // GDB already knows about the main executable 252 return; 253 } 254 255 ScopedPthreadMutexLocker locker(&g__r_debug_mutex); 256 257 _r_debug.r_state = r_debug::RT_DELETE; 258 rtld_db_dlactivity(); 259 260 remove_soinfo_from_debug_map(info); 261 262 _r_debug.r_state = r_debug::RT_CONSISTENT; 263 rtld_db_dlactivity(); 264} 265 266void notify_gdb_of_libraries() { 267 _r_debug.r_state = r_debug::RT_ADD; 268 rtld_db_dlactivity(); 269 _r_debug.r_state = r_debug::RT_CONSISTENT; 270 rtld_db_dlactivity(); 271} 272 273LinkedListEntry<soinfo>* SoinfoListAllocator::alloc() { 274 return g_soinfo_links_allocator.alloc(); 275} 276 277void SoinfoListAllocator::free(LinkedListEntry<soinfo>* entry) { 278 g_soinfo_links_allocator.free(entry); 279} 280 281static void protect_data(int protection) { 282 g_soinfo_allocator.protect_all(protection); 283 g_soinfo_links_allocator.protect_all(protection); 284} 285 286static soinfo* soinfo_alloc(const char* name, struct stat* file_stat, off64_t file_offset, uint32_t rtld_flags) { 287 if (strlen(name) >= SOINFO_NAME_LEN) { 288 DL_ERR("library name \"%s\" too long", name); 289 return nullptr; 290 } 291 292 soinfo* si = new (g_soinfo_allocator.alloc()) soinfo(name, file_stat, file_offset, rtld_flags); 293 294 sonext->next = si; 295 sonext = si; 296 297 TRACE("name %s: allocated soinfo @ %p", name, si); 298 return si; 299} 300 301static void soinfo_free(soinfo* si) { 302 if (si == nullptr) { 303 return; 304 } 305 306 if (si->base != 0 && si->size != 0) { 307 munmap(reinterpret_cast<void*>(si->base), si->size); 308 } 309 310 soinfo *prev = nullptr, *trav; 311 312 TRACE("name %s: freeing soinfo @ %p", si->name, si); 313 314 for (trav = solist; trav != nullptr; trav = trav->next) { 315 if (trav == si) { 316 break; 317 } 318 prev = trav; 319 } 320 321 if (trav == nullptr) { 322 // si was not in solist 323 DL_ERR("name \"%s\"@%p is not in solist!", si->name, si); 324 return; 325 } 326 327 // clear links to/from si 328 si->remove_all_links(); 329 330 // prev will never be null, because the first entry in solist is 331 // always the static libdl_info. 332 prev->next = si->next; 333 if (si == sonext) { 334 sonext = prev; 335 } 336 337 g_soinfo_allocator.free(si); 338} 339 340static void parse_path(const char* path, const char* delimiters, 341 const char** array, char* buf, size_t buf_size, size_t max_count) { 342 if (path == nullptr) { 343 return; 344 } 345 346 size_t len = strlcpy(buf, path, buf_size); 347 348 size_t i = 0; 349 char* buf_p = buf; 350 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) { 351 if (*array[i] != '\0') { 352 ++i; 353 } 354 } 355 356 // Forget the last path if we had to truncate; this occurs if the 2nd to 357 // last char isn't '\0' (i.e. wasn't originally a delimiter). 358 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') { 359 array[i - 1] = nullptr; 360 } else { 361 array[i] = nullptr; 362 } 363} 364 365static void parse_LD_LIBRARY_PATH(const char* path) { 366 parse_path(path, ":", g_ld_library_paths, 367 g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX); 368} 369 370static void parse_LD_PRELOAD(const char* path) { 371 // We have historically supported ':' as well as ' ' in LD_PRELOAD. 372 parse_path(path, " :", g_ld_preload_names, 373 g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX); 374} 375 376#if defined(__arm__) 377 378// For a given PC, find the .so that it belongs to. 379// Returns the base address of the .ARM.exidx section 380// for that .so, and the number of 8-byte entries 381// in that section (via *pcount). 382// 383// Intended to be called by libc's __gnu_Unwind_Find_exidx(). 384// 385// This function is exposed via dlfcn.cpp and libdl.so. 386_Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) { 387 unsigned addr = (unsigned)pc; 388 389 for (soinfo* si = solist; si != 0; si = si->next) { 390 if ((addr >= si->base) && (addr < (si->base + si->size))) { 391 *pcount = si->ARM_exidx_count; 392 return (_Unwind_Ptr)si->ARM_exidx; 393 } 394 } 395 *pcount = 0; 396 return nullptr; 397} 398 399#endif 400 401// Here, we only have to provide a callback to iterate across all the 402// loaded libraries. gcc_eh does the rest. 403int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) { 404 int rv = 0; 405 for (soinfo* si = solist; si != nullptr; si = si->next) { 406 dl_phdr_info dl_info; 407 dl_info.dlpi_addr = si->link_map_head.l_addr; 408 dl_info.dlpi_name = si->link_map_head.l_name; 409 dl_info.dlpi_phdr = si->phdr; 410 dl_info.dlpi_phnum = si->phnum; 411 rv = cb(&dl_info, sizeof(dl_phdr_info), data); 412 if (rv != 0) { 413 break; 414 } 415 } 416 return rv; 417} 418 419ElfW(Sym)* soinfo::find_symbol_by_name(SymbolName& symbol_name) { 420 return is_gnu_hash() ? gnu_lookup(symbol_name) : elf_lookup(symbol_name); 421} 422 423static bool is_symbol_global_and_defined(const soinfo* si, const ElfW(Sym)* s) { 424 if (ELF_ST_BIND(s->st_info) == STB_GLOBAL || 425 ELF_ST_BIND(s->st_info) == STB_WEAK) { 426 return s->st_shndx != SHN_UNDEF; 427 } else if (ELF_ST_BIND(s->st_info) != STB_LOCAL) { 428 DL_WARN("unexpected ST_BIND value: %d for '%s' in '%s'", 429 ELF_ST_BIND(s->st_info), si->get_string(s->st_name), si->name); 430 } 431 432 return false; 433} 434 435ElfW(Sym)* soinfo::gnu_lookup(SymbolName& symbol_name) { 436 uint32_t hash = symbol_name.gnu_hash(); 437 uint32_t h2 = hash >> gnu_shift2_; 438 439 uint32_t bloom_mask_bits = sizeof(ElfW(Addr))*8; 440 uint32_t word_num = (hash / bloom_mask_bits) & gnu_maskwords_; 441 ElfW(Addr) bloom_word = gnu_bloom_filter_[word_num]; 442 443 // test against bloom filter 444 if ((1 & (bloom_word >> (hash % bloom_mask_bits)) & (bloom_word >> (h2 % bloom_mask_bits))) == 0) { 445 return nullptr; 446 } 447 448 // bloom test says "probably yes"... 449 uint32_t n = bucket_[hash % nbucket_]; 450 451 if (n == 0) { 452 return nullptr; 453 } 454 455 do { 456 ElfW(Sym)* s = symtab_ + n; 457 if (((chain_[n] ^ hash) >> 1) == 0 && 458 strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 && 459 is_symbol_global_and_defined(this, s)) { 460 return s; 461 } 462 } while ((chain_[n++] & 1) == 0); 463 464 return nullptr; 465} 466 467ElfW(Sym)* soinfo::elf_lookup(SymbolName& symbol_name) { 468 uint32_t hash = symbol_name.elf_hash(); 469 470 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p h=%x(elf) %zd", 471 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_); 472 473 for (uint32_t n = bucket_[hash % nbucket_]; n != 0; n = chain_[n]) { 474 ElfW(Sym)* s = symtab_ + n; 475 if (strcmp(get_string(s->st_name), symbol_name.get_name()) == 0 && is_symbol_global_and_defined(this, s)) { 476 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd", 477 symbol_name.get_name(), name, reinterpret_cast<void*>(s->st_value), 478 static_cast<size_t>(s->st_size)); 479 return s; 480 } 481 } 482 483 TRACE_TYPE(LOOKUP, "NOT FOUND %s in %s@%p %x %zd", 484 symbol_name.get_name(), name, reinterpret_cast<void*>(base), hash, hash % nbucket_); 485 486 return nullptr; 487} 488 489soinfo::soinfo(const char* name, const struct stat* file_stat, off64_t file_offset, int rtld_flags) { 490 memset(this, 0, sizeof(*this)); 491 492 strlcpy(this->name, name, sizeof(this->name)); 493 flags_ = FLAG_NEW_SOINFO; 494 version_ = SOINFO_VERSION; 495 496 if (file_stat != nullptr) { 497 this->st_dev_ = file_stat->st_dev; 498 this->st_ino_ = file_stat->st_ino; 499 this->file_offset_ = file_offset; 500 } 501 502 this->rtld_flags_ = rtld_flags; 503} 504 505 506uint32_t SymbolName::elf_hash() { 507 if (!has_elf_hash_) { 508 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_); 509 uint32_t h = 0, g; 510 511 while (*name) { 512 h = (h << 4) + *name++; 513 g = h & 0xf0000000; 514 h ^= g; 515 h ^= g >> 24; 516 } 517 518 elf_hash_ = h; 519 has_elf_hash_ = true; 520 } 521 522 return elf_hash_; 523} 524 525uint32_t SymbolName::gnu_hash() { 526 if (!has_gnu_hash_) { 527 uint32_t h = 5381; 528 const unsigned char* name = reinterpret_cast<const unsigned char*>(name_); 529 while (*name != 0) { 530 h += (h << 5) + *name++; // h*33 + c = h + h * 32 + c = h + h << 5 + c 531 } 532 533 gnu_hash_ = h; 534 has_gnu_hash_ = true; 535 } 536 537 return gnu_hash_; 538} 539 540static ElfW(Sym)* soinfo_do_lookup(soinfo* si_from, const char* name, soinfo** si_found_in, 541 const soinfo::soinfo_list_t& global_group, const soinfo::soinfo_list_t& local_group) { 542 SymbolName symbol_name(name); 543 ElfW(Sym)* s = nullptr; 544 545 /* "This element's presence in a shared object library alters the dynamic linker's 546 * symbol resolution algorithm for references within the library. Instead of starting 547 * a symbol search with the executable file, the dynamic linker starts from the shared 548 * object itself. If the shared object fails to supply the referenced symbol, the 549 * dynamic linker then searches the executable file and other shared objects as usual." 550 * 551 * http://www.sco.com/developers/gabi/2012-12-31/ch5.dynamic.html 552 * 553 * Note that this is unlikely since static linker avoids generating 554 * relocations for -Bsymbolic linked dynamic executables. 555 */ 556 if (si_from->has_DT_SYMBOLIC) { 557 DEBUG("%s: looking up %s in local scope (DT_SYMBOLIC)", si_from->name, name); 558 s = si_from->find_symbol_by_name(symbol_name); 559 if (s != nullptr) { 560 *si_found_in = si_from; 561 } 562 } 563 564 // 1. Look for it in global_group 565 if (s == nullptr) { 566 global_group.visit([&](soinfo* global_si) { 567 DEBUG("%s: looking up %s in %s (from global group)", si_from->name, name, global_si->name); 568 s = global_si->find_symbol_by_name(symbol_name); 569 if (s != nullptr) { 570 *si_found_in = global_si; 571 return false; 572 } 573 574 return true; 575 }); 576 } 577 578 // 2. Look for it in the local group 579 if (s == nullptr) { 580 local_group.visit([&](soinfo* local_si) { 581 if (local_si == si_from && si_from->has_DT_SYMBOLIC) { 582 // we already did this - skip 583 return true; 584 } 585 586 DEBUG("%s: looking up %s in %s (from local group)", si_from->name, name, local_si->name); 587 s = local_si->find_symbol_by_name(symbol_name); 588 if (s != nullptr) { 589 *si_found_in = local_si; 590 return false; 591 } 592 593 return true; 594 }); 595 } 596 597 if (s != nullptr) { 598 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, " 599 "found in %s, base = %p, load bias = %p", 600 si_from->name, name, reinterpret_cast<void*>(s->st_value), 601 (*si_found_in)->name, reinterpret_cast<void*>((*si_found_in)->base), 602 reinterpret_cast<void*>((*si_found_in)->load_bias)); 603 } 604 605 return s; 606} 607 608// Each size has it's own allocator. 609template<size_t size> 610class SizeBasedAllocator { 611 public: 612 static void* alloc() { 613 return allocator_.alloc(); 614 } 615 616 static void free(void* ptr) { 617 allocator_.free(ptr); 618 } 619 620 private: 621 static LinkerBlockAllocator allocator_; 622}; 623 624template<size_t size> 625LinkerBlockAllocator SizeBasedAllocator<size>::allocator_(size); 626 627template<typename T> 628class TypeBasedAllocator { 629 public: 630 static T* alloc() { 631 return reinterpret_cast<T*>(SizeBasedAllocator<sizeof(T)>::alloc()); 632 } 633 634 static void free(T* ptr) { 635 SizeBasedAllocator<sizeof(T)>::free(ptr); 636 } 637}; 638 639class LoadTask { 640 public: 641 struct deleter_t { 642 void operator()(LoadTask* t) { 643 TypeBasedAllocator<LoadTask>::free(t); 644 } 645 }; 646 647 typedef UniquePtr<LoadTask, deleter_t> unique_ptr; 648 649 static deleter_t deleter; 650 651 static LoadTask* create(const char* name, soinfo* needed_by) { 652 LoadTask* ptr = TypeBasedAllocator<LoadTask>::alloc(); 653 return new (ptr) LoadTask(name, needed_by); 654 } 655 656 const char* get_name() const { 657 return name_; 658 } 659 660 soinfo* get_needed_by() const { 661 return needed_by_; 662 } 663 private: 664 LoadTask(const char* name, soinfo* needed_by) 665 : name_(name), needed_by_(needed_by) {} 666 667 const char* name_; 668 soinfo* needed_by_; 669 670 DISALLOW_IMPLICIT_CONSTRUCTORS(LoadTask); 671}; 672 673LoadTask::deleter_t LoadTask::deleter; 674 675template <typename T> 676using linked_list_t = LinkedList<T, TypeBasedAllocator<LinkedListEntry<T>>>; 677 678typedef linked_list_t<soinfo> SoinfoLinkedList; 679typedef linked_list_t<const char> StringLinkedList; 680typedef linked_list_t<LoadTask> LoadTaskList; 681 682 683// This function walks down the tree of soinfo dependencies 684// in breadth-first order and 685// * calls action(soinfo* si) for each node, and 686// * terminates walk if action returns false. 687// 688// walk_dependencies_tree returns false if walk was terminated 689// by the action and true otherwise. 690template<typename F> 691static bool walk_dependencies_tree(soinfo* root_soinfos[], size_t root_soinfos_size, F action) { 692 SoinfoLinkedList visit_list; 693 SoinfoLinkedList visited; 694 695 for (size_t i = 0; i < root_soinfos_size; ++i) { 696 visit_list.push_back(root_soinfos[i]); 697 } 698 699 soinfo* si; 700 while ((si = visit_list.pop_front()) != nullptr) { 701 if (visited.contains(si)) { 702 continue; 703 } 704 705 if (!action(si)) { 706 return false; 707 } 708 709 visited.push_back(si); 710 711 si->get_children().for_each([&](soinfo* child) { 712 visit_list.push_back(child); 713 }); 714 } 715 716 return true; 717} 718 719 720// This is used by dlsym(3). It performs symbol lookup only within the 721// specified soinfo object and its dependencies in breadth first order. 722ElfW(Sym)* dlsym_handle_lookup(soinfo* si, soinfo** found, const char* name) { 723 ElfW(Sym)* result = nullptr; 724 SymbolName symbol_name(name); 725 726 727 walk_dependencies_tree(&si, 1, [&](soinfo* current_soinfo) { 728 result = current_soinfo->find_symbol_by_name(symbol_name); 729 if (result != nullptr) { 730 *found = current_soinfo; 731 return false; 732 } 733 734 return true; 735 }); 736 737 return result; 738} 739 740/* This is used by dlsym(3) to performs a global symbol lookup. If the 741 start value is null (for RTLD_DEFAULT), the search starts at the 742 beginning of the global solist. Otherwise the search starts at the 743 specified soinfo (for RTLD_NEXT). 744 */ 745ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) { 746 SymbolName symbol_name(name); 747 748 if (start == nullptr) { 749 start = solist; 750 } 751 752 ElfW(Sym)* s = nullptr; 753 for (soinfo* si = start; (s == nullptr) && (si != nullptr); si = si->next) { 754 if ((si->get_rtld_flags() & RTLD_GLOBAL) == 0) { 755 continue; 756 } 757 758 s = si->find_symbol_by_name(symbol_name); 759 if (s != nullptr) { 760 *found = si; 761 break; 762 } 763 } 764 765 if (s != nullptr) { 766 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p", 767 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base)); 768 } 769 770 return s; 771} 772 773soinfo* find_containing_library(const void* p) { 774 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p); 775 for (soinfo* si = solist; si != nullptr; si = si->next) { 776 if (address >= si->base && address - si->base < si->size) { 777 return si; 778 } 779 } 780 return nullptr; 781} 782 783ElfW(Sym)* soinfo::find_symbol_by_address(const void* addr) { 784 return is_gnu_hash() ? gnu_addr_lookup(addr) : elf_addr_lookup(addr); 785} 786 787static bool symbol_matches_soaddr(const ElfW(Sym)* sym, ElfW(Addr) soaddr) { 788 return sym->st_shndx != SHN_UNDEF && 789 soaddr >= sym->st_value && 790 soaddr < sym->st_value + sym->st_size; 791} 792 793ElfW(Sym)* soinfo::gnu_addr_lookup(const void* addr) { 794 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base; 795 796 for (size_t i = 0; i < nbucket_; ++i) { 797 uint32_t n = bucket_[i]; 798 799 if (n == 0) { 800 continue; 801 } 802 803 do { 804 ElfW(Sym)* sym = symtab_ + n; 805 if (symbol_matches_soaddr(sym, soaddr)) { 806 return sym; 807 } 808 } while ((chain_[n++] & 1) == 0); 809 } 810 811 return nullptr; 812} 813 814ElfW(Sym)* soinfo::elf_addr_lookup(const void* addr) { 815 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - base; 816 817 // Search the library's symbol table for any defined symbol which 818 // contains this address. 819 for (size_t i = 0; i < nchain_; ++i) { 820 ElfW(Sym)* sym = symtab_ + i; 821 if (symbol_matches_soaddr(sym, soaddr)) { 822 return sym; 823 } 824 } 825 826 return nullptr; 827} 828 829static int open_library_on_path(const char* name, const char* const paths[]) { 830 char buf[512]; 831 for (size_t i = 0; paths[i] != nullptr; ++i) { 832 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name); 833 if (n < 0 || n >= static_cast<int>(sizeof(buf))) { 834 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name); 835 continue; 836 } 837 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC)); 838 if (fd != -1) { 839 return fd; 840 } 841 } 842 return -1; 843} 844 845static int open_library(const char* name) { 846 TRACE("[ opening %s ]", name); 847 848 // If the name contains a slash, we should attempt to open it directly and not search the paths. 849 if (strchr(name, '/') != nullptr) { 850 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC)); 851 if (fd != -1) { 852 return fd; 853 } 854 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now. 855#if defined(__LP64__) 856 return -1; 857#endif 858 } 859 860 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths. 861 int fd = open_library_on_path(name, g_ld_library_paths); 862 if (fd == -1) { 863 fd = open_library_on_path(name, kDefaultLdPaths); 864 } 865 return fd; 866} 867 868template<typename F> 869static void for_each_dt_needed(const soinfo* si, F action) { 870 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 871 if (d->d_tag == DT_NEEDED) { 872 action(si->get_string(d->d_un.d_val)); 873 } 874 } 875} 876 877static soinfo* load_library(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) { 878 int fd = -1; 879 off64_t file_offset = 0; 880 ScopedFd file_guard(-1); 881 882 if (extinfo != nullptr && (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) != 0) { 883 fd = extinfo->library_fd; 884 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) { 885 file_offset = extinfo->library_fd_offset; 886 } 887 } else { 888 // Open the file. 889 fd = open_library(name); 890 if (fd == -1) { 891 DL_ERR("library \"%s\" not found", name); 892 return nullptr; 893 } 894 895 file_guard.reset(fd); 896 } 897 898 if ((file_offset % PAGE_SIZE) != 0) { 899 DL_ERR("file offset for the library \"%s\" is not page-aligned: %" PRId64, name, file_offset); 900 return nullptr; 901 } 902 if (file_offset < 0) { 903 DL_ERR("file offset for the library \"%s\" is negative: %" PRId64, name, file_offset); 904 return nullptr; 905 } 906 907 struct stat file_stat; 908 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) { 909 DL_ERR("unable to stat file for the library \"%s\": %s", name, strerror(errno)); 910 return nullptr; 911 } 912 if (file_offset >= file_stat.st_size) { 913 DL_ERR("file offset for the library \"%s\" >= file size: %" PRId64 " >= %" PRId64, name, file_offset, file_stat.st_size); 914 return nullptr; 915 } 916 917 // Check for symlink and other situations where 918 // file can have different names. 919 for (soinfo* si = solist; si != nullptr; si = si->next) { 920 if (si->get_st_dev() != 0 && 921 si->get_st_ino() != 0 && 922 si->get_st_dev() == file_stat.st_dev && 923 si->get_st_ino() == file_stat.st_ino && 924 si->get_file_offset() == file_offset) { 925 TRACE("library \"%s\" is already loaded under different name/path \"%s\" - will return existing soinfo", name, si->name); 926 return si; 927 } 928 } 929 930 if ((rtld_flags & RTLD_NOLOAD) != 0) { 931 DL_ERR("library \"%s\" wasn't loaded and RTLD_NOLOAD prevented it", name); 932 return nullptr; 933 } 934 935 // Read the ELF header and load the segments. 936 ElfReader elf_reader(name, fd, file_offset); 937 if (!elf_reader.Load(extinfo)) { 938 return nullptr; 939 } 940 941 soinfo* si = soinfo_alloc(SEARCH_NAME(name), &file_stat, file_offset, rtld_flags); 942 if (si == nullptr) { 943 return nullptr; 944 } 945 si->base = elf_reader.load_start(); 946 si->size = elf_reader.load_size(); 947 si->load_bias = elf_reader.load_bias(); 948 si->phnum = elf_reader.phdr_count(); 949 si->phdr = elf_reader.loaded_phdr(); 950 951 if (!si->prelink_image()) { 952 soinfo_free(si); 953 return nullptr; 954 } 955 956 for_each_dt_needed(si, [&] (const char* name) { 957 load_tasks.push_back(LoadTask::create(name, si)); 958 }); 959 960 return si; 961} 962 963static soinfo *find_loaded_library_by_name(const char* name) { 964 const char* search_name = SEARCH_NAME(name); 965 for (soinfo* si = solist; si != nullptr; si = si->next) { 966 if (!strcmp(search_name, si->name)) { 967 return si; 968 } 969 } 970 return nullptr; 971} 972 973static soinfo* find_library_internal(LoadTaskList& load_tasks, const char* name, int rtld_flags, const android_dlextinfo* extinfo) { 974 975 soinfo* si = find_loaded_library_by_name(name); 976 977 // Library might still be loaded, the accurate detection 978 // of this fact is done by load_library. 979 if (si == nullptr) { 980 TRACE("[ '%s' has not been found by name. Trying harder...]", name); 981 si = load_library(load_tasks, name, rtld_flags, extinfo); 982 } 983 984 return si; 985} 986 987static void soinfo_unload(soinfo* si); 988 989// TODO: this is slightly unusual way to construct 990// the global group for relocation. Not every RTLD_GLOBAL 991// library is included in this group for backwards-compatibility 992// reasons. 993// 994// This group consists of the main executable, LD_PRELOADs 995// and libraries with the DF_1_GLOBAL flag set. 996static soinfo::soinfo_list_t make_global_group() { 997 soinfo::soinfo_list_t global_group; 998 for (soinfo* si = somain; si != nullptr; si = si->next) { 999 if ((si->get_dt_flags_1() & DF_1_GLOBAL) != 0) { 1000 global_group.push_back(si); 1001 } 1002 } 1003 1004 return global_group; 1005} 1006 1007static bool find_libraries(soinfo* start_with, const char* const library_names[], size_t library_names_count, soinfo* soinfos[], 1008 soinfo* ld_preloads[], size_t ld_preloads_count, int rtld_flags, const android_dlextinfo* extinfo) { 1009 // Step 0: prepare. 1010 LoadTaskList load_tasks; 1011 for (size_t i = 0; i < library_names_count; ++i) { 1012 const char* name = library_names[i]; 1013 load_tasks.push_back(LoadTask::create(name, start_with)); 1014 } 1015 1016 // Construct global_group. 1017 soinfo::soinfo_list_t global_group = make_global_group(); 1018 1019 // If soinfos array is null allocate one on stack. 1020 // The array is needed in case of failure; for example 1021 // when library_names[] = {libone.so, libtwo.so} and libone.so 1022 // is loaded correctly but libtwo.so failed for some reason. 1023 // In this case libone.so should be unloaded on return. 1024 // See also implementation of failure_guard below. 1025 1026 if (soinfos == nullptr) { 1027 size_t soinfos_size = sizeof(soinfo*)*library_names_count; 1028 soinfos = reinterpret_cast<soinfo**>(alloca(soinfos_size)); 1029 memset(soinfos, 0, soinfos_size); 1030 } 1031 1032 // list of libraries to link - see step 2. 1033 size_t soinfos_count = 0; 1034 1035 auto failure_guard = make_scope_guard([&]() { 1036 // Housekeeping 1037 load_tasks.for_each([] (LoadTask* t) { 1038 LoadTask::deleter(t); 1039 }); 1040 1041 for (size_t i = 0; i<soinfos_count; ++i) { 1042 soinfo_unload(soinfos[i]); 1043 } 1044 }); 1045 1046 // Step 1: load and pre-link all DT_NEEDED libraries in breadth first order. 1047 for (LoadTask::unique_ptr task(load_tasks.pop_front()); task.get() != nullptr; task.reset(load_tasks.pop_front())) { 1048 soinfo* si = find_library_internal(load_tasks, task->get_name(), rtld_flags, extinfo); 1049 if (si == nullptr) { 1050 return false; 1051 } 1052 1053 soinfo* needed_by = task->get_needed_by(); 1054 1055 if (needed_by != nullptr) { 1056 needed_by->add_child(si); 1057 } 1058 1059 if (si->is_linked()) { 1060 si->increment_ref_count(); 1061 } 1062 1063 // When ld_preloads is not null, the first 1064 // ld_preloads_count libs are in fact ld_preloads. 1065 if (ld_preloads != nullptr && soinfos_count < ld_preloads_count) { 1066 // Add LD_PRELOADed libraries to the global group for future runs. 1067 // There is no need to explicitly add them to the global group 1068 // for this run because they are going to appear in the local 1069 // group in the correct order. 1070 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL); 1071 ld_preloads[soinfos_count] = si; 1072 } 1073 1074 if (soinfos_count < library_names_count) { 1075 soinfos[soinfos_count++] = si; 1076 } 1077 } 1078 1079 // Step 2: link libraries. 1080 soinfo::soinfo_list_t local_group; 1081 walk_dependencies_tree( 1082 start_with == nullptr ? soinfos : &start_with, 1083 start_with == nullptr ? soinfos_count : 1, 1084 [&] (soinfo* si) { 1085 local_group.push_back(si); 1086 return true; 1087 }); 1088 1089 // We need to increment ref_count in case 1090 // the root of the local group was not linked. 1091 bool was_local_group_root_linked = local_group.front()->is_linked(); 1092 1093 bool linked = local_group.visit([&](soinfo* si) { 1094 if (!si->is_linked()) { 1095 if (!si->link_image(global_group, local_group, extinfo)) { 1096 return false; 1097 } 1098 si->set_linked(); 1099 } 1100 1101 return true; 1102 }); 1103 1104 if (linked) { 1105 failure_guard.disable(); 1106 } 1107 1108 if (!was_local_group_root_linked) { 1109 local_group.front()->increment_ref_count(); 1110 } 1111 1112 return linked; 1113} 1114 1115static soinfo* find_library(const char* name, int rtld_flags, const android_dlextinfo* extinfo) { 1116 soinfo* si; 1117 1118 if (name == nullptr) { 1119 si = somain; 1120 } else if (!find_libraries(nullptr, &name, 1, &si, nullptr, 0, rtld_flags, extinfo)) { 1121 return nullptr; 1122 } 1123 1124 return si; 1125} 1126 1127static void soinfo_unload(soinfo* root) { 1128 // Note that the library can be loaded but not linked; 1129 // in which case there is no root but we still need 1130 // to walk the tree and unload soinfos involved. 1131 // 1132 // This happens on unsuccessful dlopen, when one of 1133 // the DT_NEEDED libraries could not be linked/found. 1134 if (root->is_linked()) { 1135 root = root->get_local_group_root(); 1136 } 1137 1138 if (!root->can_unload()) { 1139 TRACE("not unloading '%s' - the binary is flagged with NODELETE", root->name); 1140 return; 1141 } 1142 1143 size_t ref_count = root->is_linked() ? root->decrement_ref_count() : 0; 1144 1145 if (ref_count == 0) { 1146 soinfo::soinfo_list_t local_unload_list; 1147 soinfo::soinfo_list_t external_unload_list; 1148 soinfo::soinfo_list_t depth_first_list; 1149 depth_first_list.push_back(root); 1150 soinfo* si = nullptr; 1151 1152 while ((si = depth_first_list.pop_front()) != nullptr) { 1153 if (local_unload_list.contains(si)) { 1154 continue; 1155 } 1156 1157 local_unload_list.push_back(si); 1158 1159 if (si->has_min_version(0)) { 1160 soinfo* child = nullptr; 1161 while ((child = si->get_children().pop_front()) != nullptr) { 1162 TRACE("%s@%p needs to unload %s@%p", si->name, si, child->name, child); 1163 if (local_unload_list.contains(child)) { 1164 continue; 1165 } else if (child->is_linked() && child->get_local_group_root() != root) { 1166 external_unload_list.push_back(child); 1167 } else { 1168 depth_first_list.push_front(child); 1169 } 1170 } 1171 } else { 1172#ifdef __LP64__ 1173 __libc_fatal("soinfo for \"%s\"@%p has no version", si->name, si); 1174#else 1175 PRINT("warning: soinfo for \"%s\"@%p has no version", si->name, si); 1176 for_each_dt_needed(si, [&] (const char* library_name) { 1177 TRACE("deprecated (old format of soinfo): %s needs to unload %s", si->name, library_name); 1178 soinfo* needed = find_library(library_name, RTLD_NOLOAD, nullptr); 1179 if (needed != nullptr) { 1180 // Not found: for example if symlink was deleted between dlopen and dlclose 1181 // Since we cannot really handle errors at this point - print and continue. 1182 PRINT("warning: couldn't find %s needed by %s on unload.", library_name, si->name); 1183 return; 1184 } else if (local_unload_list.contains(needed)) { 1185 // already visited 1186 return; 1187 } else if (needed->is_linked() && needed->get_local_group_root() != root) { 1188 // external group 1189 external_unload_list.push_back(needed); 1190 } else { 1191 // local group 1192 depth_first_list.push_front(needed); 1193 } 1194 }); 1195#endif 1196 } 1197 } 1198 1199 local_unload_list.for_each([](soinfo* si) { 1200 si->call_destructors(); 1201 }); 1202 1203 while ((si = local_unload_list.pop_front()) != nullptr) { 1204 notify_gdb_of_unload(si); 1205 soinfo_free(si); 1206 } 1207 1208 while ((si = external_unload_list.pop_front()) != nullptr) { 1209 soinfo_unload(si); 1210 } 1211 } else { 1212 TRACE("not unloading '%s' group, decrementing ref_count to %zd", root->name, ref_count); 1213 } 1214} 1215 1216void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) { 1217 // Use basic string manipulation calls to avoid snprintf. 1218 // snprintf indirectly calls pthread_getspecific to get the size of a buffer. 1219 // When debug malloc is enabled, this call returns 0. This in turn causes 1220 // snprintf to do nothing, which causes libraries to fail to load. 1221 // See b/17302493 for further details. 1222 // Once the above bug is fixed, this code can be modified to use 1223 // snprintf again. 1224 size_t required_len = strlen(kDefaultLdPaths[0]) + strlen(kDefaultLdPaths[1]) + 2; 1225 if (buffer_size < required_len) { 1226 __libc_fatal("android_get_LD_LIBRARY_PATH failed, buffer too small: buffer len %zu, required len %zu", 1227 buffer_size, required_len); 1228 } 1229 char* end = stpcpy(buffer, kDefaultLdPaths[0]); 1230 *end = ':'; 1231 strcpy(end + 1, kDefaultLdPaths[1]); 1232} 1233 1234void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) { 1235 if (!get_AT_SECURE()) { 1236 parse_LD_LIBRARY_PATH(ld_library_path); 1237 } 1238} 1239 1240soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) { 1241 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL|RTLD_NODELETE|RTLD_NOLOAD)) != 0) { 1242 DL_ERR("invalid flags to dlopen: %x", flags); 1243 return nullptr; 1244 } 1245 if (extinfo != nullptr) { 1246 if ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0) { 1247 DL_ERR("invalid extended flags to android_dlopen_ext: 0x%" PRIx64, extinfo->flags); 1248 return nullptr; 1249 } 1250 if ((extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD) == 0 && 1251 (extinfo->flags & ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET) != 0) { 1252 DL_ERR("invalid extended flag combination (ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET without ANDROID_DLEXT_USE_LIBRARY_FD): 0x%" PRIx64, extinfo->flags); 1253 return nullptr; 1254 } 1255 } 1256 protect_data(PROT_READ | PROT_WRITE); 1257 soinfo* si = find_library(name, flags, extinfo); 1258 if (si != nullptr) { 1259 si->call_constructors(); 1260 } 1261 protect_data(PROT_READ); 1262 return si; 1263} 1264 1265void do_dlclose(soinfo* si) { 1266 protect_data(PROT_READ | PROT_WRITE); 1267 soinfo_unload(si); 1268 protect_data(PROT_READ); 1269} 1270 1271static ElfW(Addr) call_ifunc_resolver(ElfW(Addr) resolver_addr) { 1272 typedef ElfW(Addr) (*ifunc_resolver_t)(void); 1273 ifunc_resolver_t ifunc_resolver = reinterpret_cast<ifunc_resolver_t>(resolver_addr); 1274 ElfW(Addr) ifunc_addr = ifunc_resolver(); 1275 TRACE_TYPE(RELO, "Called ifunc_resolver@%p. The result is %p", ifunc_resolver, reinterpret_cast<void*>(ifunc_addr)); 1276 1277 return ifunc_addr; 1278} 1279 1280#if defined(USE_RELA) 1281int soinfo::relocate(ElfW(Rela)* rela, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) { 1282 for (size_t idx = 0; idx < count; ++idx, ++rela) { 1283 unsigned type = ELFW(R_TYPE)(rela->r_info); 1284 unsigned sym = ELFW(R_SYM)(rela->r_info); 1285 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + load_bias); 1286 ElfW(Addr) sym_addr = 0; 1287 const char* sym_name = nullptr; 1288 1289 DEBUG("Processing '%s' relocation at index %zd", name, idx); 1290 if (type == 0) { // R_*_NONE 1291 continue; 1292 } 1293 1294 ElfW(Sym)* s = nullptr; 1295 soinfo* lsi = nullptr; 1296 1297 if (sym != 0) { 1298 sym_name = get_string(symtab_[sym].st_name); 1299 s = soinfo_do_lookup(this, sym_name, &lsi, global_group,local_group); 1300 if (s == nullptr) { 1301 // We only allow an undefined symbol if this is a weak reference... 1302 s = &symtab_[sym]; 1303 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1304 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name); 1305 return -1; 1306 } 1307 1308 /* IHI0044C AAELF 4.5.1.1: 1309 1310 Libraries are not searched to resolve weak references. 1311 It is not an error for a weak reference to remain unsatisfied. 1312 1313 During linking, the value of an undefined weak reference is: 1314 - Zero if the relocation type is absolute 1315 - The address of the place if the relocation is pc-relative 1316 - The address of nominal base address if the relocation 1317 type is base-relative. 1318 */ 1319 1320 switch (type) { 1321#if defined(__aarch64__) 1322 case R_AARCH64_JUMP_SLOT: 1323 case R_AARCH64_GLOB_DAT: 1324 case R_AARCH64_ABS64: 1325 case R_AARCH64_ABS32: 1326 case R_AARCH64_ABS16: 1327 case R_AARCH64_RELATIVE: 1328 case R_AARCH64_IRELATIVE: 1329 /* 1330 * The sym_addr was initialized to be zero above, or the relocation 1331 * code below does not care about value of sym_addr. 1332 * No need to do anything. 1333 */ 1334 break; 1335#elif defined(__x86_64__) 1336 case R_X86_64_JUMP_SLOT: 1337 case R_X86_64_GLOB_DAT: 1338 case R_X86_64_32: 1339 case R_X86_64_64: 1340 case R_X86_64_RELATIVE: 1341 case R_X86_64_IRELATIVE: 1342 // No need to do anything. 1343 break; 1344 case R_X86_64_PC32: 1345 sym_addr = reloc; 1346 break; 1347#endif 1348 default: 1349 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx); 1350 return -1; 1351 } 1352 } else { 1353 // We got a definition. 1354 sym_addr = lsi->resolve_symbol_address(s); 1355 } 1356 count_relocation(kRelocSymbol); 1357 } 1358 1359 switch (type) { 1360#if defined(__aarch64__) 1361 case R_AARCH64_JUMP_SLOT: 1362 count_relocation(kRelocAbsolute); 1363 MARK(rela->r_offset); 1364 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n", 1365 reloc, (sym_addr + rela->r_addend), sym_name); 1366 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend); 1367 break; 1368 case R_AARCH64_GLOB_DAT: 1369 count_relocation(kRelocAbsolute); 1370 MARK(rela->r_offset); 1371 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n", 1372 reloc, (sym_addr + rela->r_addend), sym_name); 1373 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend); 1374 break; 1375 case R_AARCH64_ABS64: 1376 count_relocation(kRelocAbsolute); 1377 MARK(rela->r_offset); 1378 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n", 1379 reloc, (sym_addr + rela->r_addend), sym_name); 1380 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend); 1381 break; 1382 case R_AARCH64_ABS32: 1383 count_relocation(kRelocAbsolute); 1384 MARK(rela->r_offset); 1385 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n", 1386 reloc, (sym_addr + rela->r_addend), sym_name); 1387 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) && 1388 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) { 1389 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend); 1390 } else { 1391 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 1392 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)), 1393 static_cast<ElfW(Addr)>(INT32_MIN), 1394 static_cast<ElfW(Addr)>(UINT32_MAX)); 1395 return -1; 1396 } 1397 break; 1398 case R_AARCH64_ABS16: 1399 count_relocation(kRelocAbsolute); 1400 MARK(rela->r_offset); 1401 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n", 1402 reloc, (sym_addr + rela->r_addend), sym_name); 1403 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) && 1404 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) { 1405 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend); 1406 } else { 1407 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 1408 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)), 1409 static_cast<ElfW(Addr)>(INT16_MIN), 1410 static_cast<ElfW(Addr)>(UINT16_MAX)); 1411 return -1; 1412 } 1413 break; 1414 case R_AARCH64_PREL64: 1415 count_relocation(kRelocRelative); 1416 MARK(rela->r_offset); 1417 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n", 1418 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name); 1419 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset; 1420 break; 1421 case R_AARCH64_PREL32: 1422 count_relocation(kRelocRelative); 1423 MARK(rela->r_offset); 1424 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n", 1425 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name); 1426 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) && 1427 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) { 1428 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset); 1429 } else { 1430 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 1431 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)), 1432 static_cast<ElfW(Addr)>(INT32_MIN), 1433 static_cast<ElfW(Addr)>(UINT32_MAX)); 1434 return -1; 1435 } 1436 break; 1437 case R_AARCH64_PREL16: 1438 count_relocation(kRelocRelative); 1439 MARK(rela->r_offset); 1440 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n", 1441 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name); 1442 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) && 1443 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) { 1444 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset); 1445 } else { 1446 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 1447 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)), 1448 static_cast<ElfW(Addr)>(INT16_MIN), 1449 static_cast<ElfW(Addr)>(UINT16_MAX)); 1450 return -1; 1451 } 1452 break; 1453 1454 case R_AARCH64_RELATIVE: 1455 count_relocation(kRelocRelative); 1456 MARK(rela->r_offset); 1457 if (sym) { 1458 DL_ERR("odd RELATIVE form..."); 1459 return -1; 1460 } 1461 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n", 1462 reloc, (base + rela->r_addend)); 1463 *reinterpret_cast<ElfW(Addr)*>(reloc) = (base + rela->r_addend); 1464 break; 1465 1466 case R_AARCH64_IRELATIVE: 1467 count_relocation(kRelocRelative); 1468 MARK(rela->r_offset); 1469 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend)); 1470 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend); 1471 break; 1472 1473 case R_AARCH64_COPY: 1474 /* 1475 * ET_EXEC is not supported so this should not happen. 1476 * 1477 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf 1478 * 1479 * Section 4.7.1.10 "Dynamic relocations" 1480 * R_AARCH64_COPY may only appear in executable objects where e_type is 1481 * set to ET_EXEC. 1482 */ 1483 DL_ERR("%s R_AARCH64_COPY relocations are not supported", name); 1484 return -1; 1485 case R_AARCH64_TLS_TPREL64: 1486 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n", 1487 reloc, (sym_addr + rela->r_addend), rela->r_offset); 1488 break; 1489 case R_AARCH64_TLS_DTPREL32: 1490 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n", 1491 reloc, (sym_addr + rela->r_addend), rela->r_offset); 1492 break; 1493#elif defined(__x86_64__) 1494 case R_X86_64_JUMP_SLOT: 1495 count_relocation(kRelocAbsolute); 1496 MARK(rela->r_offset); 1497 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc), 1498 static_cast<size_t>(sym_addr + rela->r_addend), sym_name); 1499 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1500 break; 1501 case R_X86_64_GLOB_DAT: 1502 count_relocation(kRelocAbsolute); 1503 MARK(rela->r_offset); 1504 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc), 1505 static_cast<size_t>(sym_addr + rela->r_addend), sym_name); 1506 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1507 break; 1508 case R_X86_64_RELATIVE: 1509 count_relocation(kRelocRelative); 1510 MARK(rela->r_offset); 1511 if (sym) { 1512 DL_ERR("odd RELATIVE form..."); 1513 return -1; 1514 } 1515 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc), 1516 static_cast<size_t>(base)); 1517 *reinterpret_cast<ElfW(Addr)*>(reloc) = base + rela->r_addend; 1518 break; 1519 case R_X86_64_IRELATIVE: 1520 count_relocation(kRelocRelative); 1521 MARK(rela->r_offset); 1522 TRACE_TYPE(RELO, "RELO IRELATIVE %16llx <- %16llx\n", reloc, (base + rela->r_addend)); 1523 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + rela->r_addend); 1524 break; 1525 case R_X86_64_32: 1526 count_relocation(kRelocRelative); 1527 MARK(rela->r_offset); 1528 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc), 1529 static_cast<size_t>(sym_addr), sym_name); 1530 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1531 break; 1532 case R_X86_64_64: 1533 count_relocation(kRelocRelative); 1534 MARK(rela->r_offset); 1535 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc), 1536 static_cast<size_t>(sym_addr), sym_name); 1537 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1538 break; 1539 case R_X86_64_PC32: 1540 count_relocation(kRelocRelative); 1541 MARK(rela->r_offset); 1542 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s", 1543 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc), 1544 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name); 1545 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc; 1546 break; 1547#endif 1548 1549 default: 1550 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx); 1551 return -1; 1552 } 1553 } 1554 return 0; 1555} 1556 1557#else // REL, not RELA. 1558int soinfo::relocate(ElfW(Rel)* rel, unsigned count, const soinfo_list_t& global_group, const soinfo_list_t& local_group) { 1559 for (size_t idx = 0; idx < count; ++idx, ++rel) { 1560 unsigned type = ELFW(R_TYPE)(rel->r_info); 1561 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead. 1562 unsigned sym = ELFW(R_SYM)(rel->r_info); 1563 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + load_bias); 1564 ElfW(Addr) sym_addr = 0; 1565 const char* sym_name = nullptr; 1566 1567 DEBUG("Processing '%s' relocation at index %zd", name, idx); 1568 if (type == 0) { // R_*_NONE 1569 continue; 1570 } 1571 1572 ElfW(Sym)* s = nullptr; 1573 soinfo* lsi = nullptr; 1574 1575 if (sym != 0) { 1576 sym_name = get_string(symtab_[sym].st_name); 1577 s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group); 1578 if (s == nullptr) { 1579 // We only allow an undefined symbol if this is a weak reference... 1580 s = &symtab_[sym]; 1581 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1582 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, name); 1583 return -1; 1584 } 1585 1586 /* IHI0044C AAELF 4.5.1.1: 1587 1588 Libraries are not searched to resolve weak references. 1589 It is not an error for a weak reference to remain 1590 unsatisfied. 1591 1592 During linking, the value of an undefined weak reference is: 1593 - Zero if the relocation type is absolute 1594 - The address of the place if the relocation is pc-relative 1595 - The address of nominal base address if the relocation 1596 type is base-relative. 1597 */ 1598 1599 switch (type) { 1600#if defined(__arm__) 1601 case R_ARM_JUMP_SLOT: 1602 case R_ARM_GLOB_DAT: 1603 case R_ARM_ABS32: 1604 case R_ARM_RELATIVE: /* Don't care. */ 1605 // sym_addr was initialized to be zero above or relocation 1606 // code below does not care about value of sym_addr. 1607 // No need to do anything. 1608 break; 1609#elif defined(__i386__) 1610 case R_386_JMP_SLOT: 1611 case R_386_GLOB_DAT: 1612 case R_386_32: 1613 case R_386_RELATIVE: /* Don't care. */ 1614 case R_386_IRELATIVE: 1615 // sym_addr was initialized to be zero above or relocation 1616 // code below does not care about value of sym_addr. 1617 // No need to do anything. 1618 break; 1619 case R_386_PC32: 1620 sym_addr = reloc; 1621 break; 1622#endif 1623 1624#if defined(__arm__) 1625 case R_ARM_COPY: 1626 // Fall through. Can't really copy if weak symbol is not found at run-time. 1627#endif 1628 default: 1629 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx); 1630 return -1; 1631 } 1632 } else { 1633 // We got a definition. 1634 sym_addr = lsi->resolve_symbol_address(s); 1635 } 1636 count_relocation(kRelocSymbol); 1637 } 1638 1639 switch (type) { 1640#if defined(__arm__) 1641 case R_ARM_JUMP_SLOT: 1642 count_relocation(kRelocAbsolute); 1643 MARK(rel->r_offset); 1644 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name); 1645 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1646 break; 1647 case R_ARM_GLOB_DAT: 1648 count_relocation(kRelocAbsolute); 1649 MARK(rel->r_offset); 1650 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name); 1651 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1652 break; 1653 case R_ARM_ABS32: 1654 count_relocation(kRelocAbsolute); 1655 MARK(rel->r_offset); 1656 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name); 1657 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; 1658 break; 1659 case R_ARM_REL32: 1660 count_relocation(kRelocRelative); 1661 MARK(rel->r_offset); 1662 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s", 1663 reloc, sym_addr, rel->r_offset, sym_name); 1664 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset; 1665 break; 1666 case R_ARM_COPY: 1667 /* 1668 * ET_EXEC is not supported so this should not happen. 1669 * 1670 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf 1671 * 1672 * Section 4.7.1.10 "Dynamic relocations" 1673 * R_ARM_COPY may only appear in executable objects where e_type is 1674 * set to ET_EXEC. 1675 */ 1676 DL_ERR("%s R_ARM_COPY relocations are not supported", name); 1677 return -1; 1678#elif defined(__i386__) 1679 case R_386_JMP_SLOT: 1680 count_relocation(kRelocAbsolute); 1681 MARK(rel->r_offset); 1682 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name); 1683 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1684 break; 1685 case R_386_GLOB_DAT: 1686 count_relocation(kRelocAbsolute); 1687 MARK(rel->r_offset); 1688 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name); 1689 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1690 break; 1691 case R_386_32: 1692 count_relocation(kRelocRelative); 1693 MARK(rel->r_offset); 1694 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name); 1695 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; 1696 break; 1697 case R_386_PC32: 1698 count_relocation(kRelocRelative); 1699 MARK(rel->r_offset); 1700 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s", 1701 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name); 1702 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc); 1703 break; 1704#elif defined(__mips__) 1705 case R_MIPS_REL32: 1706#if defined(__LP64__) 1707 // MIPS Elf64_Rel entries contain compound relocations 1708 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case 1709 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 || 1710 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) { 1711 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)", 1712 type, (unsigned)ELF64_R_TYPE2(rel->r_info), 1713 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx); 1714 return -1; 1715 } 1716#endif 1717 count_relocation(kRelocAbsolute); 1718 MARK(rel->r_offset); 1719 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc), 1720 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*"); 1721 if (s) { 1722 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; 1723 } else { 1724 *reinterpret_cast<ElfW(Addr)*>(reloc) += base; 1725 } 1726 break; 1727#endif 1728 1729#if defined(__arm__) 1730 case R_ARM_RELATIVE: 1731#elif defined(__i386__) 1732 case R_386_RELATIVE: 1733#endif 1734 count_relocation(kRelocRelative); 1735 MARK(rel->r_offset); 1736 if (sym) { 1737 DL_ERR("odd RELATIVE form..."); 1738 return -1; 1739 } 1740 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p", 1741 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base)); 1742 *reinterpret_cast<ElfW(Addr)*>(reloc) += base; 1743 break; 1744#if defined(__i386__) 1745 case R_386_IRELATIVE: 1746 count_relocation(kRelocRelative); 1747 MARK(rel->r_offset); 1748 TRACE_TYPE(RELO, "RELO IRELATIVE %p <- %p", reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(base)); 1749 *reinterpret_cast<ElfW(Addr)*>(reloc) = call_ifunc_resolver(base + *reinterpret_cast<ElfW(Addr)*>(reloc)); 1750 break; 1751#endif 1752 1753 default: 1754 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx); 1755 return -1; 1756 } 1757 } 1758 return 0; 1759} 1760#endif 1761 1762#if defined(__mips__) 1763bool soinfo::mips_relocate_got(const soinfo_list_t& global_group, const soinfo_list_t& local_group) { 1764 ElfW(Addr)** got = plt_got_; 1765 if (got == nullptr) { 1766 return true; 1767 } 1768 1769 // got[0] is the address of the lazy resolver function. 1770 // got[1] may be used for a GNU extension. 1771 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start). 1772 // FIXME: maybe this should be in a separate routine? 1773 if ((flags_ & FLAG_LINKER) == 0) { 1774 size_t g = 0; 1775 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef); 1776 if (reinterpret_cast<intptr_t>(got[g]) < 0) { 1777 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed); 1778 } 1779 // Relocate the local GOT entries. 1780 for (; g < mips_local_gotno_; g++) { 1781 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + load_bias); 1782 } 1783 } 1784 1785 // Now for the global GOT entries... 1786 ElfW(Sym)* sym = symtab_ + mips_gotsym_; 1787 got = plt_got_ + mips_local_gotno_; 1788 for (size_t g = mips_gotsym_; g < mips_symtabno_; g++, sym++, got++) { 1789 // This is an undefined reference... try to locate it. 1790 const char* sym_name = get_string(sym->st_name); 1791 soinfo* lsi = nullptr; 1792 ElfW(Sym)* s = soinfo_do_lookup(this, sym_name, &lsi, global_group, local_group); 1793 if (s == nullptr) { 1794 // We only allow an undefined symbol if this is a weak reference. 1795 s = &symtab_[g]; 1796 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1797 DL_ERR("cannot locate \"%s\"...", sym_name); 1798 return false; 1799 } 1800 *got = 0; 1801 } else { 1802 // FIXME: is this sufficient? 1803 // For reference see NetBSD link loader 1804 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup 1805 *got = reinterpret_cast<ElfW(Addr)*>(lsi->resolve_symbol_address(s)); 1806 } 1807 } 1808 return true; 1809} 1810#endif 1811 1812void soinfo::call_array(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) { 1813 if (functions == nullptr) { 1814 return; 1815 } 1816 1817 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name); 1818 1819 int begin = reverse ? (count - 1) : 0; 1820 int end = reverse ? -1 : count; 1821 int step = reverse ? -1 : 1; 1822 1823 for (int i = begin; i != end; i += step) { 1824 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]); 1825 call_function("function", functions[i]); 1826 } 1827 1828 TRACE("[ Done calling %s for '%s' ]", array_name, name); 1829} 1830 1831void soinfo::call_function(const char* function_name __unused, linker_function_t function) { 1832 if (function == nullptr || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) { 1833 return; 1834 } 1835 1836 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name); 1837 function(); 1838 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name); 1839 1840 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures 1841 // are still writable. This happens with our debug malloc (see http://b/7941716). 1842 protect_data(PROT_READ | PROT_WRITE); 1843} 1844 1845void soinfo::call_pre_init_constructors() { 1846 // DT_PREINIT_ARRAY functions are called before any other constructors for executables, 1847 // but ignored in a shared library. 1848 call_array("DT_PREINIT_ARRAY", preinit_array_, preinit_array_count_, false); 1849} 1850 1851void soinfo::call_constructors() { 1852 if (constructors_called) { 1853 return; 1854 } 1855 1856 // We set constructors_called before actually calling the constructors, otherwise it doesn't 1857 // protect against recursive constructor calls. One simple example of constructor recursion 1858 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so: 1859 // 1. The program depends on libc, so libc's constructor is called here. 1860 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so. 1861 // 3. dlopen() calls the constructors on the newly created 1862 // soinfo for libc_malloc_debug_leak.so. 1863 // 4. The debug .so depends on libc, so CallConstructors is 1864 // called again with the libc soinfo. If it doesn't trigger the early- 1865 // out above, the libc constructor will be called again (recursively!). 1866 constructors_called = true; 1867 1868 if (!is_main_executable() && preinit_array_ != nullptr) { 1869 // The GNU dynamic linker silently ignores these, but we warn the developer. 1870 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!", 1871 name, preinit_array_count_); 1872 } 1873 1874 get_children().for_each([] (soinfo* si) { 1875 si->call_constructors(); 1876 }); 1877 1878 TRACE("\"%s\": calling constructors", name); 1879 1880 // DT_INIT should be called before DT_INIT_ARRAY if both are present. 1881 call_function("DT_INIT", init_func_); 1882 call_array("DT_INIT_ARRAY", init_array_, init_array_count_, false); 1883} 1884 1885void soinfo::call_destructors() { 1886 if (!constructors_called) { 1887 return; 1888 } 1889 TRACE("\"%s\": calling destructors", name); 1890 1891 // DT_FINI_ARRAY must be parsed in reverse order. 1892 call_array("DT_FINI_ARRAY", fini_array_, fini_array_count_, true); 1893 1894 // DT_FINI should be called after DT_FINI_ARRAY if both are present. 1895 call_function("DT_FINI", fini_func_); 1896 1897 // This is needed on second call to dlopen 1898 // after library has been unloaded with RTLD_NODELETE 1899 constructors_called = false; 1900} 1901 1902void soinfo::add_child(soinfo* child) { 1903 if (has_min_version(0)) { 1904 child->parents_.push_back(this); 1905 this->children_.push_back(child); 1906 } 1907} 1908 1909void soinfo::remove_all_links() { 1910 if (!has_min_version(0)) { 1911 return; 1912 } 1913 1914 // 1. Untie connected soinfos from 'this'. 1915 children_.for_each([&] (soinfo* child) { 1916 child->parents_.remove_if([&] (const soinfo* parent) { 1917 return parent == this; 1918 }); 1919 }); 1920 1921 parents_.for_each([&] (soinfo* parent) { 1922 parent->children_.remove_if([&] (const soinfo* child) { 1923 return child == this; 1924 }); 1925 }); 1926 1927 // 2. Once everything untied - clear local lists. 1928 parents_.clear(); 1929 children_.clear(); 1930} 1931 1932dev_t soinfo::get_st_dev() const { 1933 if (has_min_version(0)) { 1934 return st_dev_; 1935 } 1936 1937 return 0; 1938}; 1939 1940ino_t soinfo::get_st_ino() const { 1941 if (has_min_version(0)) { 1942 return st_ino_; 1943 } 1944 1945 return 0; 1946} 1947 1948off64_t soinfo::get_file_offset() const { 1949 if (has_min_version(1)) { 1950 return file_offset_; 1951 } 1952 1953 return 0; 1954} 1955 1956uint32_t soinfo::get_rtld_flags() const { 1957 if (has_min_version(1)) { 1958 return rtld_flags_; 1959 } 1960 1961 return 0; 1962} 1963 1964uint32_t soinfo::get_dt_flags_1() const { 1965 if (has_min_version(1)) { 1966 return dt_flags_1_; 1967 } 1968 1969 return 0; 1970} 1971void soinfo::set_dt_flags_1(uint32_t dt_flags_1) { 1972 if (has_min_version(1)) { 1973 if ((dt_flags_1 & DF_1_GLOBAL) != 0) { 1974 rtld_flags_ |= RTLD_GLOBAL; 1975 } 1976 1977 if ((dt_flags_1 & DF_1_NODELETE) != 0) { 1978 rtld_flags_ |= RTLD_NODELETE; 1979 } 1980 1981 dt_flags_1_ = dt_flags_1; 1982 } 1983} 1984 1985// This is a return on get_children()/get_parents() if 1986// 'this->flags' does not have FLAG_NEW_SOINFO set. 1987static soinfo::soinfo_list_t g_empty_list; 1988 1989soinfo::soinfo_list_t& soinfo::get_children() { 1990 if (has_min_version(0)) { 1991 return children_; 1992 } 1993 1994 return g_empty_list; 1995} 1996 1997soinfo::soinfo_list_t& soinfo::get_parents() { 1998 if (has_min_version(0)) { 1999 return parents_; 2000 } 2001 2002 return g_empty_list; 2003} 2004 2005ElfW(Addr) soinfo::resolve_symbol_address(ElfW(Sym)* s) { 2006 if (ELF_ST_TYPE(s->st_info) == STT_GNU_IFUNC) { 2007 return call_ifunc_resolver(s->st_value + load_bias); 2008 } 2009 2010 return static_cast<ElfW(Addr)>(s->st_value + load_bias); 2011} 2012 2013const char* soinfo::get_string(ElfW(Word) index) const { 2014 if (has_min_version(1) && (index >= strtab_size_)) { 2015 __libc_fatal("%s: strtab out of bounds error; STRSZ=%zd, name=%d", name, strtab_size_, index); 2016 } 2017 2018 return strtab_ + index; 2019} 2020 2021bool soinfo::is_gnu_hash() const { 2022 return (flags_ & FLAG_GNU_HASH) != 0; 2023} 2024 2025bool soinfo::can_unload() const { 2026 return (get_rtld_flags() & (RTLD_NODELETE | RTLD_GLOBAL)) == 0; 2027} 2028 2029bool soinfo::is_linked() const { 2030 return (flags_ & FLAG_LINKED) != 0; 2031} 2032 2033bool soinfo::is_main_executable() const { 2034 return (flags_ & FLAG_EXE) != 0; 2035} 2036 2037void soinfo::set_linked() { 2038 flags_ |= FLAG_LINKED; 2039} 2040 2041void soinfo::set_linker_flag() { 2042 flags_ |= FLAG_LINKER; 2043} 2044 2045void soinfo::set_main_executable() { 2046 flags_ |= FLAG_EXE; 2047} 2048 2049void soinfo::increment_ref_count() { 2050 local_group_root_->ref_count_++; 2051} 2052 2053size_t soinfo::decrement_ref_count() { 2054 return --local_group_root_->ref_count_; 2055} 2056 2057soinfo* soinfo::get_local_group_root() const { 2058 return local_group_root_; 2059} 2060 2061/* Force any of the closed stdin, stdout and stderr to be associated with 2062 /dev/null. */ 2063static int nullify_closed_stdio() { 2064 int dev_null, i, status; 2065 int return_value = 0; 2066 2067 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR)); 2068 if (dev_null < 0) { 2069 DL_ERR("cannot open /dev/null: %s", strerror(errno)); 2070 return -1; 2071 } 2072 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null); 2073 2074 /* If any of the stdio file descriptors is valid and not associated 2075 with /dev/null, dup /dev/null to it. */ 2076 for (i = 0; i < 3; i++) { 2077 /* If it is /dev/null already, we are done. */ 2078 if (i == dev_null) { 2079 continue; 2080 } 2081 2082 TRACE("[ Nullifying stdio file descriptor %d]", i); 2083 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL)); 2084 2085 /* If file is opened, we are good. */ 2086 if (status != -1) { 2087 continue; 2088 } 2089 2090 /* The only error we allow is that the file descriptor does not 2091 exist, in which case we dup /dev/null to it. */ 2092 if (errno != EBADF) { 2093 DL_ERR("fcntl failed: %s", strerror(errno)); 2094 return_value = -1; 2095 continue; 2096 } 2097 2098 /* Try dupping /dev/null to this stdio file descriptor and 2099 repeat if there is a signal. Note that any errors in closing 2100 the stdio descriptor are lost. */ 2101 status = TEMP_FAILURE_RETRY(dup2(dev_null, i)); 2102 if (status < 0) { 2103 DL_ERR("dup2 failed: %s", strerror(errno)); 2104 return_value = -1; 2105 continue; 2106 } 2107 } 2108 2109 /* If /dev/null is not one of the stdio file descriptors, close it. */ 2110 if (dev_null > 2) { 2111 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null); 2112 status = TEMP_FAILURE_RETRY(close(dev_null)); 2113 if (status == -1) { 2114 DL_ERR("close failed: %s", strerror(errno)); 2115 return_value = -1; 2116 } 2117 } 2118 2119 return return_value; 2120} 2121 2122bool soinfo::prelink_image() { 2123 /* Extract dynamic section */ 2124 ElfW(Word) dynamic_flags = 0; 2125 phdr_table_get_dynamic_section(phdr, phnum, load_bias, &dynamic, &dynamic_flags); 2126 2127 /* We can't log anything until the linker is relocated */ 2128 bool relocating_linker = (flags_ & FLAG_LINKER) != 0; 2129 if (!relocating_linker) { 2130 INFO("[ linking %s ]", name); 2131 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(base), flags_); 2132 } 2133 2134 if (dynamic == nullptr) { 2135 if (!relocating_linker) { 2136 DL_ERR("missing PT_DYNAMIC in \"%s\"", name); 2137 } 2138 return false; 2139 } else { 2140 if (!relocating_linker) { 2141 DEBUG("dynamic = %p", dynamic); 2142 } 2143 } 2144 2145#if defined(__arm__) 2146 (void) phdr_table_get_arm_exidx(phdr, phnum, load_bias, 2147 &ARM_exidx, &ARM_exidx_count); 2148#endif 2149 2150 // Extract useful information from dynamic section. 2151 uint32_t needed_count = 0; 2152 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) { 2153 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p", 2154 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val)); 2155 switch (d->d_tag) { 2156 case DT_SONAME: 2157 // TODO: glibc dynamic linker uses this name for 2158 // initial library lookup; consider doing the same here. 2159 break; 2160 2161 case DT_HASH: 2162 if (nbucket_ != 0) { 2163 // in case of --hash-style=both, we prefer gnu 2164 break; 2165 } 2166 2167 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0]; 2168 nchain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1]; 2169 bucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8); 2170 chain_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr + 8 + nbucket_ * 4); 2171 break; 2172 2173 case DT_GNU_HASH: 2174 if (nbucket_ != 0) { 2175 // in case of --hash-style=both, we prefer gnu 2176 nchain_ = 0; 2177 } 2178 2179 nbucket_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[0]; 2180 // skip symndx 2181 gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[2]; 2182 gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[3]; 2183 2184 gnu_bloom_filter_ = reinterpret_cast<ElfW(Addr)*>(load_bias + d->d_un.d_ptr + 16); 2185 bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_); 2186 // amend chain for symndx = header[1] 2187 chain_ = bucket_ + nbucket_ - reinterpret_cast<uint32_t*>(load_bias + d->d_un.d_ptr)[1]; 2188 2189 if (!powerof2(gnu_maskwords_)) { 2190 DL_ERR("invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power to two", gnu_maskwords_, name); 2191 return false; 2192 } 2193 --gnu_maskwords_; 2194 2195 flags_ |= FLAG_GNU_HASH; 2196 break; 2197 2198 case DT_STRTAB: 2199 strtab_ = reinterpret_cast<const char*>(load_bias + d->d_un.d_ptr); 2200 break; 2201 2202 case DT_STRSZ: 2203 strtab_size_ = d->d_un.d_val; 2204 break; 2205 2206 case DT_SYMTAB: 2207 symtab_ = reinterpret_cast<ElfW(Sym)*>(load_bias + d->d_un.d_ptr); 2208 break; 2209 2210 case DT_SYMENT: 2211 if (d->d_un.d_val != sizeof(ElfW(Sym))) { 2212 DL_ERR("invalid DT_SYMENT: %zd in \"%s\"", static_cast<size_t>(d->d_un.d_val), name); 2213 return false; 2214 } 2215 break; 2216 2217 case DT_PLTREL: 2218#if defined(USE_RELA) 2219 if (d->d_un.d_val != DT_RELA) { 2220 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_RELA", name); 2221 return false; 2222 } 2223#else 2224 if (d->d_un.d_val != DT_REL) { 2225 DL_ERR("unsupported DT_PLTREL in \"%s\"; expected DT_REL", name); 2226 return false; 2227 } 2228#endif 2229 break; 2230 2231 case DT_JMPREL: 2232#if defined(USE_RELA) 2233 plt_rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr); 2234#else 2235 plt_rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr); 2236#endif 2237 break; 2238 2239 case DT_PLTRELSZ: 2240#if defined(USE_RELA) 2241 plt_rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela)); 2242#else 2243 plt_rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel)); 2244#endif 2245 break; 2246 2247 case DT_PLTGOT: 2248#if defined(__mips__) 2249 // Used by mips and mips64. 2250 plt_got_ = reinterpret_cast<ElfW(Addr)**>(load_bias + d->d_un.d_ptr); 2251#endif 2252 // Ignore for other platforms... (because RTLD_LAZY is not supported) 2253 break; 2254 2255 case DT_DEBUG: 2256 // Set the DT_DEBUG entry to the address of _r_debug for GDB 2257 // if the dynamic table is writable 2258// FIXME: not working currently for N64 2259// The flags for the LOAD and DYNAMIC program headers do not agree. 2260// The LOAD section containing the dynamic table has been mapped as 2261// read-only, but the DYNAMIC header claims it is writable. 2262#if !(defined(__mips__) && defined(__LP64__)) 2263 if ((dynamic_flags & PF_W) != 0) { 2264 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug); 2265 } 2266 break; 2267#endif 2268#if defined(USE_RELA) 2269 case DT_RELA: 2270 rela_ = reinterpret_cast<ElfW(Rela)*>(load_bias + d->d_un.d_ptr); 2271 break; 2272 2273 case DT_RELASZ: 2274 rela_count_ = d->d_un.d_val / sizeof(ElfW(Rela)); 2275 break; 2276 2277 case DT_RELAENT: 2278 if (d->d_un.d_val != sizeof(ElfW(Rela))) { 2279 DL_ERR("invalid DT_RELAENT: %zd", static_cast<size_t>(d->d_un.d_val)); 2280 return false; 2281 } 2282 break; 2283 2284 // ignored (see DT_RELCOUNT comments for details) 2285 case DT_RELACOUNT: 2286 break; 2287 2288 case DT_REL: 2289 DL_ERR("unsupported DT_REL in \"%s\"", name); 2290 return false; 2291 2292 case DT_RELSZ: 2293 DL_ERR("unsupported DT_RELSZ in \"%s\"", name); 2294 return false; 2295#else 2296 case DT_REL: 2297 rel_ = reinterpret_cast<ElfW(Rel)*>(load_bias + d->d_un.d_ptr); 2298 break; 2299 2300 case DT_RELSZ: 2301 rel_count_ = d->d_un.d_val / sizeof(ElfW(Rel)); 2302 break; 2303 2304 case DT_RELENT: 2305 if (d->d_un.d_val != sizeof(ElfW(Rel))) { 2306 DL_ERR("invalid DT_RELENT: %zd", static_cast<size_t>(d->d_un.d_val)); 2307 return false; 2308 } 2309 break; 2310 2311 // "Indicates that all RELATIVE relocations have been concatenated together, 2312 // and specifies the RELATIVE relocation count." 2313 // 2314 // TODO: Spec also mentions that this can be used to optimize relocation process; 2315 // Not currently used by bionic linker - ignored. 2316 case DT_RELCOUNT: 2317 break; 2318 case DT_RELA: 2319 DL_ERR("unsupported DT_RELA in \"%s\"", name); 2320 return false; 2321#endif 2322 case DT_INIT: 2323 init_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr); 2324 DEBUG("%s constructors (DT_INIT) found at %p", name, init_func_); 2325 break; 2326 2327 case DT_FINI: 2328 fini_func_ = reinterpret_cast<linker_function_t>(load_bias + d->d_un.d_ptr); 2329 DEBUG("%s destructors (DT_FINI) found at %p", name, fini_func_); 2330 break; 2331 2332 case DT_INIT_ARRAY: 2333 init_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr); 2334 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", name, init_array_); 2335 break; 2336 2337 case DT_INIT_ARRAYSZ: 2338 init_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr)); 2339 break; 2340 2341 case DT_FINI_ARRAY: 2342 fini_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr); 2343 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", name, fini_array_); 2344 break; 2345 2346 case DT_FINI_ARRAYSZ: 2347 fini_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr)); 2348 break; 2349 2350 case DT_PREINIT_ARRAY: 2351 preinit_array_ = reinterpret_cast<linker_function_t*>(load_bias + d->d_un.d_ptr); 2352 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", name, preinit_array_); 2353 break; 2354 2355 case DT_PREINIT_ARRAYSZ: 2356 preinit_array_count_ = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr)); 2357 break; 2358 2359 case DT_TEXTREL: 2360#if defined(__LP64__) 2361 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", name); 2362 return false; 2363#else 2364 has_text_relocations = true; 2365 break; 2366#endif 2367 2368 case DT_SYMBOLIC: 2369 has_DT_SYMBOLIC = true; 2370 break; 2371 2372 case DT_NEEDED: 2373 ++needed_count; 2374 break; 2375 2376 case DT_FLAGS: 2377 if (d->d_un.d_val & DF_TEXTREL) { 2378#if defined(__LP64__) 2379 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", name); 2380 return false; 2381#else 2382 has_text_relocations = true; 2383#endif 2384 } 2385 if (d->d_un.d_val & DF_SYMBOLIC) { 2386 has_DT_SYMBOLIC = true; 2387 } 2388 break; 2389 2390 case DT_FLAGS_1: 2391 set_dt_flags_1(d->d_un.d_val); 2392 2393 if ((d->d_un.d_val & ~SUPPORTED_DT_FLAGS_1) != 0) { 2394 DL_WARN("Unsupported flags DT_FLAGS_1=%p", reinterpret_cast<void*>(d->d_un.d_val)); 2395 } 2396 break; 2397#if defined(__mips__) 2398 case DT_MIPS_RLD_MAP: 2399 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB. 2400 { 2401 r_debug** dp = reinterpret_cast<r_debug**>(load_bias + d->d_un.d_ptr); 2402 *dp = &_r_debug; 2403 } 2404 break; 2405 case DT_MIPS_RLD_MAP2: 2406 // Set the DT_MIPS_RLD_MAP2 entry to the address of _r_debug for GDB. 2407 { 2408 r_debug** dp = reinterpret_cast<r_debug**>(reinterpret_cast<ElfW(Addr)>(d) + d->d_un.d_val); 2409 *dp = &_r_debug; 2410 } 2411 break; 2412 2413 case DT_MIPS_RLD_VERSION: 2414 case DT_MIPS_FLAGS: 2415 case DT_MIPS_BASE_ADDRESS: 2416 case DT_MIPS_UNREFEXTNO: 2417 break; 2418 2419 case DT_MIPS_SYMTABNO: 2420 mips_symtabno_ = d->d_un.d_val; 2421 break; 2422 2423 case DT_MIPS_LOCAL_GOTNO: 2424 mips_local_gotno_ = d->d_un.d_val; 2425 break; 2426 2427 case DT_MIPS_GOTSYM: 2428 mips_gotsym_ = d->d_un.d_val; 2429 break; 2430#endif 2431 // Ignored: "Its use has been superseded by the DF_BIND_NOW flag" 2432 case DT_BIND_NOW: 2433 break; 2434 2435 // Ignore: bionic does not support symbol versioning... 2436 case DT_VERSYM: 2437 case DT_VERDEF: 2438 case DT_VERDEFNUM: 2439 case DT_VERNEED: 2440 case DT_VERNEEDNUM: 2441 break; 2442 2443 default: 2444 if (!relocating_linker) { 2445 DL_WARN("%s: unused DT entry: type %p arg %p", name, 2446 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val)); 2447 } 2448 break; 2449 } 2450 } 2451 2452 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p", 2453 reinterpret_cast<void*>(base), strtab_, symtab_); 2454 2455 // Sanity checks. 2456 if (relocating_linker && needed_count != 0) { 2457 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries"); 2458 return false; 2459 } 2460 if (nbucket_ == 0) { 2461 DL_ERR("empty/missing DT_HASH/DT_GNU_HASH in \"%s\" (new hash type from the future?)", name); 2462 return false; 2463 } 2464 if (strtab_ == 0) { 2465 DL_ERR("empty/missing DT_STRTAB in \"%s\"", name); 2466 return false; 2467 } 2468 if (symtab_ == 0) { 2469 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", name); 2470 return false; 2471 } 2472 return true; 2473} 2474 2475bool soinfo::link_image(const soinfo_list_t& global_group, const soinfo_list_t& local_group, const android_dlextinfo* extinfo) { 2476 2477 local_group_root_ = local_group.front(); 2478 if (local_group_root_ == nullptr) { 2479 local_group_root_ = this; 2480 } 2481 2482#if !defined(__LP64__) 2483 if (has_text_relocations) { 2484 // Make segments writable to allow text relocations to work properly. We will later call 2485 // phdr_table_protect_segments() after all of them are applied and all constructors are run. 2486 DL_WARN("%s has text relocations. This is wasting memory and prevents " 2487 "security hardening. Please fix.", name); 2488 if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) { 2489 DL_ERR("can't unprotect loadable segments for \"%s\": %s", 2490 name, strerror(errno)); 2491 return false; 2492 } 2493 } 2494#endif 2495 2496#if defined(USE_RELA) 2497 if (rela_ != nullptr) { 2498 DEBUG("[ relocating %s ]", name); 2499 if (relocate(rela_, rela_count_, global_group, local_group)) { 2500 return false; 2501 } 2502 } 2503 if (plt_rela_ != nullptr) { 2504 DEBUG("[ relocating %s plt ]", name); 2505 if (relocate(plt_rela_, plt_rela_count_, global_group, local_group)) { 2506 return false; 2507 } 2508 } 2509#else 2510 if (rel_ != nullptr) { 2511 DEBUG("[ relocating %s ]", name); 2512 if (relocate(rel_, rel_count_, global_group, local_group)) { 2513 return false; 2514 } 2515 } 2516 if (plt_rel_ != nullptr) { 2517 DEBUG("[ relocating %s plt ]", name); 2518 if (relocate(plt_rel_, plt_rel_count_, global_group, local_group)) { 2519 return false; 2520 } 2521 } 2522#endif 2523 2524#if defined(__mips__) 2525 if (!mips_relocate_got(global_group, local_group)) { 2526 return false; 2527 } 2528#endif 2529 2530 DEBUG("[ finished linking %s ]", name); 2531 2532#if !defined(__LP64__) 2533 if (has_text_relocations) { 2534 // All relocations are done, we can protect our segments back to read-only. 2535 if (phdr_table_protect_segments(phdr, phnum, load_bias) < 0) { 2536 DL_ERR("can't protect segments for \"%s\": %s", 2537 name, strerror(errno)); 2538 return false; 2539 } 2540 } 2541#endif 2542 2543 /* We can also turn on GNU RELRO protection */ 2544 if (phdr_table_protect_gnu_relro(phdr, phnum, load_bias) < 0) { 2545 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s", 2546 name, strerror(errno)); 2547 return false; 2548 } 2549 2550 /* Handle serializing/sharing the RELRO segment */ 2551 if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) { 2552 if (phdr_table_serialize_gnu_relro(phdr, phnum, load_bias, 2553 extinfo->relro_fd) < 0) { 2554 DL_ERR("failed serializing GNU RELRO section for \"%s\": %s", 2555 name, strerror(errno)); 2556 return false; 2557 } 2558 } else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) { 2559 if (phdr_table_map_gnu_relro(phdr, phnum, load_bias, 2560 extinfo->relro_fd) < 0) { 2561 DL_ERR("failed mapping GNU RELRO section for \"%s\": %s", 2562 name, strerror(errno)); 2563 return false; 2564 } 2565 } 2566 2567 notify_gdb_of_load(this); 2568 return true; 2569} 2570 2571/* 2572 * This function add vdso to internal dso list. 2573 * It helps to stack unwinding through signal handlers. 2574 * Also, it makes bionic more like glibc. 2575 */ 2576static void add_vdso(KernelArgumentBlock& args __unused) { 2577#if defined(AT_SYSINFO_EHDR) 2578 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR)); 2579 if (ehdr_vdso == nullptr) { 2580 return; 2581 } 2582 2583 soinfo* si = soinfo_alloc("[vdso]", nullptr, 0, 0); 2584 2585 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff); 2586 si->phnum = ehdr_vdso->e_phnum; 2587 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso); 2588 si->size = phdr_table_get_load_size(si->phdr, si->phnum); 2589 si->load_bias = get_elf_exec_load_bias(ehdr_vdso); 2590 2591 si->prelink_image(); 2592 si->link_image(g_empty_list, soinfo::soinfo_list_t::make_list(si), nullptr); 2593#endif 2594} 2595 2596/* 2597 * This is linker soinfo for GDB. See details below. 2598 */ 2599#if defined(__LP64__) 2600#define LINKER_PATH "/system/bin/linker64" 2601#else 2602#define LINKER_PATH "/system/bin/linker" 2603#endif 2604static soinfo linker_soinfo_for_gdb(LINKER_PATH, nullptr, 0, 0); 2605 2606/* gdb expects the linker to be in the debug shared object list. 2607 * Without this, gdb has trouble locating the linker's ".text" 2608 * and ".plt" sections. Gdb could also potentially use this to 2609 * relocate the offset of our exported 'rtld_db_dlactivity' symbol. 2610 * Don't use soinfo_alloc(), because the linker shouldn't 2611 * be on the soinfo list. 2612 */ 2613static void init_linker_info_for_gdb(ElfW(Addr) linker_base) { 2614 linker_soinfo_for_gdb.base = linker_base; 2615 2616 /* 2617 * Set the dynamic field in the link map otherwise gdb will complain with 2618 * the following: 2619 * warning: .dynamic section for "/system/bin/linker" is not at the 2620 * expected address (wrong library or version mismatch?) 2621 */ 2622 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base); 2623 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff); 2624 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base, 2625 &linker_soinfo_for_gdb.dynamic, nullptr); 2626 insert_soinfo_into_debug_map(&linker_soinfo_for_gdb); 2627} 2628 2629/* 2630 * This code is called after the linker has linked itself and 2631 * fixed it's own GOT. It is safe to make references to externs 2632 * and other non-local data at this point. 2633 */ 2634static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) { 2635#if TIMING 2636 struct timeval t0, t1; 2637 gettimeofday(&t0, 0); 2638#endif 2639 2640 // Initialize environment functions, and get to the ELF aux vectors table. 2641 linker_env_init(args); 2642 2643 // If this is a setuid/setgid program, close the security hole described in 2644 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc 2645 if (get_AT_SECURE()) { 2646 nullify_closed_stdio(); 2647 } 2648 2649 debuggerd_init(); 2650 2651 // Get a few environment variables. 2652 const char* LD_DEBUG = linker_env_get("LD_DEBUG"); 2653 if (LD_DEBUG != nullptr) { 2654 g_ld_debug_verbosity = atoi(LD_DEBUG); 2655 } 2656 2657 // Normally, these are cleaned by linker_env_init, but the test 2658 // doesn't cost us anything. 2659 const char* ldpath_env = nullptr; 2660 const char* ldpreload_env = nullptr; 2661 if (!get_AT_SECURE()) { 2662 ldpath_env = linker_env_get("LD_LIBRARY_PATH"); 2663 ldpreload_env = linker_env_get("LD_PRELOAD"); 2664 } 2665 2666 INFO("[ android linker & debugger ]"); 2667 2668 soinfo* si = soinfo_alloc(args.argv[0], nullptr, 0, RTLD_GLOBAL); 2669 if (si == nullptr) { 2670 exit(EXIT_FAILURE); 2671 } 2672 2673 /* bootstrap the link map, the main exe always needs to be first */ 2674 si->set_main_executable(); 2675 link_map* map = &(si->link_map_head); 2676 2677 map->l_addr = 0; 2678 map->l_name = args.argv[0]; 2679 map->l_prev = nullptr; 2680 map->l_next = nullptr; 2681 2682 _r_debug.r_map = map; 2683 r_debug_tail = map; 2684 2685 init_linker_info_for_gdb(linker_base); 2686 2687 // Extract information passed from the kernel. 2688 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR)); 2689 si->phnum = args.getauxval(AT_PHNUM); 2690 si->entry = args.getauxval(AT_ENTRY); 2691 2692 /* Compute the value of si->base. We can't rely on the fact that 2693 * the first entry is the PHDR because this will not be true 2694 * for certain executables (e.g. some in the NDK unit test suite) 2695 */ 2696 si->base = 0; 2697 si->size = phdr_table_get_load_size(si->phdr, si->phnum); 2698 si->load_bias = 0; 2699 for (size_t i = 0; i < si->phnum; ++i) { 2700 if (si->phdr[i].p_type == PT_PHDR) { 2701 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr; 2702 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset; 2703 break; 2704 } 2705 } 2706 si->dynamic = nullptr; 2707 2708 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base); 2709 if (elf_hdr->e_type != ET_DYN) { 2710 __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n"); 2711 exit(EXIT_FAILURE); 2712 } 2713 2714 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid). 2715 parse_LD_LIBRARY_PATH(ldpath_env); 2716 parse_LD_PRELOAD(ldpreload_env); 2717 2718 somain = si; 2719 2720 si->prelink_image(); 2721 2722 // add somain to global group 2723 si->set_dt_flags_1(si->get_dt_flags_1() | DF_1_GLOBAL); 2724 2725 // Load ld_preloads and dependencies. 2726 StringLinkedList needed_library_name_list; 2727 size_t needed_libraries_count = 0; 2728 size_t ld_preloads_count = 0; 2729 while (g_ld_preload_names[ld_preloads_count] != nullptr) { 2730 needed_library_name_list.push_back(g_ld_preload_names[ld_preloads_count++]); 2731 ++needed_libraries_count; 2732 } 2733 2734 for_each_dt_needed(si, [&](const char* name) { 2735 needed_library_name_list.push_back(name); 2736 ++needed_libraries_count; 2737 }); 2738 2739 const char* needed_library_names[needed_libraries_count]; 2740 2741 memset(needed_library_names, 0, sizeof(needed_library_names)); 2742 needed_library_name_list.copy_to_array(needed_library_names, needed_libraries_count); 2743 2744 if (needed_libraries_count > 0 && !find_libraries(si, needed_library_names, needed_libraries_count, nullptr, g_ld_preloads, ld_preloads_count, RTLD_GLOBAL, nullptr)) { 2745 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer()); 2746 exit(EXIT_FAILURE); 2747 } else if (needed_libraries_count == 0) { 2748 if (!si->link_image(g_empty_list, soinfo::soinfo_list_t::make_list(si), nullptr)) { 2749 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer()); 2750 exit(EXIT_FAILURE); 2751 } 2752 si->increment_ref_count(); 2753 } 2754 2755 add_vdso(args); 2756 2757 si->call_pre_init_constructors(); 2758 2759 /* After the prelink_image, the si->load_bias is initialized. 2760 * For so lib, the map->l_addr will be updated in notify_gdb_of_load. 2761 * We need to update this value for so exe here. So Unwind_Backtrace 2762 * for some arch like x86 could work correctly within so exe. 2763 */ 2764 map->l_addr = si->load_bias; 2765 si->call_constructors(); 2766 2767#if TIMING 2768 gettimeofday(&t1, nullptr); 2769 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) ( 2770 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) - 2771 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec))); 2772#endif 2773#if STATS 2774 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0], 2775 linker_stats.count[kRelocAbsolute], 2776 linker_stats.count[kRelocRelative], 2777 linker_stats.count[kRelocCopy], 2778 linker_stats.count[kRelocSymbol]); 2779#endif 2780#if COUNT_PAGES 2781 { 2782 unsigned n; 2783 unsigned i; 2784 unsigned count = 0; 2785 for (n = 0; n < 4096; n++) { 2786 if (bitmask[n]) { 2787 unsigned x = bitmask[n]; 2788#if defined(__LP64__) 2789 for (i = 0; i < 32; i++) { 2790#else 2791 for (i = 0; i < 8; i++) { 2792#endif 2793 if (x & 1) { 2794 count++; 2795 } 2796 x >>= 1; 2797 } 2798 } 2799 } 2800 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4); 2801 } 2802#endif 2803 2804#if TIMING || STATS || COUNT_PAGES 2805 fflush(stdout); 2806#endif 2807 2808 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry)); 2809 return si->entry; 2810} 2811 2812/* Compute the load-bias of an existing executable. This shall only 2813 * be used to compute the load bias of an executable or shared library 2814 * that was loaded by the kernel itself. 2815 * 2816 * Input: 2817 * elf -> address of ELF header, assumed to be at the start of the file. 2818 * Return: 2819 * load bias, i.e. add the value of any p_vaddr in the file to get 2820 * the corresponding address in memory. 2821 */ 2822static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) { 2823 ElfW(Addr) offset = elf->e_phoff; 2824 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset); 2825 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum; 2826 2827 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) { 2828 if (phdr->p_type == PT_LOAD) { 2829 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr; 2830 } 2831 } 2832 return 0; 2833} 2834 2835extern "C" void _start(); 2836 2837/* 2838 * This is the entry point for the linker, called from begin.S. This 2839 * method is responsible for fixing the linker's own relocations, and 2840 * then calling __linker_init_post_relocation(). 2841 * 2842 * Because this method is called before the linker has fixed it's own 2843 * relocations, any attempt to reference an extern variable, extern 2844 * function, or other GOT reference will generate a segfault. 2845 */ 2846extern "C" ElfW(Addr) __linker_init(void* raw_args) { 2847 KernelArgumentBlock args(raw_args); 2848 2849 ElfW(Addr) linker_addr = args.getauxval(AT_BASE); 2850 ElfW(Addr) entry_point = args.getauxval(AT_ENTRY); 2851 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr); 2852 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff); 2853 2854 soinfo linker_so("[dynamic linker]", nullptr, 0, 0); 2855 2856 // If the linker is not acting as PT_INTERP entry_point is equal to 2857 // _start. Which means that the linker is running as an executable and 2858 // already linked by PT_INTERP. 2859 // 2860 // This happens when user tries to run 'adb shell /system/bin/linker' 2861 // see also https://code.google.com/p/android/issues/detail?id=63174 2862 if (reinterpret_cast<ElfW(Addr)>(&_start) == entry_point) { 2863 __libc_fatal("This is %s, the helper program for shared library executables.\n", args.argv[0]); 2864 } 2865 2866 linker_so.base = linker_addr; 2867 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum); 2868 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr); 2869 linker_so.dynamic = nullptr; 2870 linker_so.phdr = phdr; 2871 linker_so.phnum = elf_hdr->e_phnum; 2872 linker_so.set_linker_flag(); 2873 2874 // This might not be obvious... The reasons why we pass g_empty_list 2875 // in place of local_group here are (1) we do not really need it, because 2876 // linker is built with DT_SYMBOLIC and therefore relocates its symbols against 2877 // itself without having to look into local_group and (2) allocators 2878 // are not yet initialized, and therefore we cannot use linked_list.push_* 2879 // functions at this point. 2880 if (!(linker_so.prelink_image() && linker_so.link_image(g_empty_list, g_empty_list, nullptr))) { 2881 // It would be nice to print an error message, but if the linker 2882 // can't link itself, there's no guarantee that we'll be able to 2883 // call write() (because it involves a GOT reference). We may as 2884 // well try though... 2885 const char* msg = "CANNOT LINK EXECUTABLE: "; 2886 write(2, msg, strlen(msg)); 2887 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf)); 2888 write(2, "\n", 1); 2889 _exit(EXIT_FAILURE); 2890 } 2891 2892 __libc_init_tls(args); 2893 2894 // Initialize the linker's own global variables 2895 linker_so.call_constructors(); 2896 2897 // Initialize static variables. Note that in order to 2898 // get correct libdl_info we need to call constructors 2899 // before get_libdl_info(). 2900 solist = get_libdl_info(); 2901 sonext = get_libdl_info(); 2902 2903 // We have successfully fixed our own relocations. It's safe to run 2904 // the main part of the linker now. 2905 args.abort_message_ptr = &g_abort_message; 2906 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr); 2907 2908 protect_data(PROT_READ); 2909 2910 // Return the address that the calling assembly stub should jump to. 2911 return start_address; 2912} 2913