linker.cpp revision 012cb4583a5f8564059142bb1900ea3a31e7cfa9
1/* 2 * Copyright (C) 2008, 2009 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <dlfcn.h> 30#include <errno.h> 31#include <fcntl.h> 32#include <inttypes.h> 33#include <pthread.h> 34#include <stdio.h> 35#include <stdlib.h> 36#include <string.h> 37#include <sys/atomics.h> 38#include <sys/mman.h> 39#include <sys/stat.h> 40#include <unistd.h> 41 42// Private C library headers. 43#include "private/bionic_tls.h" 44#include "private/KernelArgumentBlock.h" 45#include "private/ScopedPthreadMutexLocker.h" 46 47#include "linker.h" 48#include "linker_debug.h" 49#include "linker_environ.h" 50#include "linker_phdr.h" 51 52/* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<< 53 * 54 * Do NOT use malloc() and friends or pthread_*() code here. 55 * Don't use printf() either; it's caused mysterious memory 56 * corruption in the past. 57 * The linker runs before we bring up libc and it's easiest 58 * to make sure it does not depend on any complex libc features 59 * 60 * open issues / todo: 61 * 62 * - are we doing everything we should for ARM_COPY relocations? 63 * - cleaner error reporting 64 * - after linking, set as much stuff as possible to READONLY 65 * and NOEXEC 66 */ 67 68static bool soinfo_link_image(soinfo* si); 69static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf); 70 71// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous 72// maps, each a single page in size. The pages are broken up into as many struct soinfo 73// objects as will fit, and they're all threaded together on a free list. 74#define SOINFO_PER_POOL ((PAGE_SIZE - sizeof(soinfo_pool_t*)) / sizeof(soinfo)) 75struct soinfo_pool_t { 76 soinfo_pool_t* next; 77 soinfo info[SOINFO_PER_POOL]; 78}; 79static struct soinfo_pool_t* gSoInfoPools = NULL; 80static soinfo* gSoInfoFreeList = NULL; 81 82static soinfo* solist = &libdl_info; 83static soinfo* sonext = &libdl_info; 84static soinfo* somain; /* main process, always the one after libdl_info */ 85 86static const char* const gDefaultLdPaths[] = { 87#if defined(__LP64__) 88 "/vendor/lib64", 89 "/system/lib64", 90#else 91 "/vendor/lib", 92 "/system/lib", 93#endif 94 NULL 95}; 96 97#define LDPATH_BUFSIZE (LDPATH_MAX*64) 98#define LDPATH_MAX 8 99 100#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64) 101#define LDPRELOAD_MAX 8 102 103static char gLdPathsBuffer[LDPATH_BUFSIZE]; 104static const char* gLdPaths[LDPATH_MAX + 1]; 105 106static char gLdPreloadsBuffer[LDPRELOAD_BUFSIZE]; 107static const char* gLdPreloadNames[LDPRELOAD_MAX + 1]; 108 109static soinfo* gLdPreloads[LDPRELOAD_MAX + 1]; 110 111__LIBC_HIDDEN__ int gLdDebugVerbosity; 112 113__LIBC_HIDDEN__ abort_msg_t* gAbortMessage = NULL; // For debuggerd. 114 115enum RelocationKind { 116 kRelocAbsolute = 0, 117 kRelocRelative, 118 kRelocCopy, 119 kRelocSymbol, 120 kRelocMax 121}; 122 123#if STATS 124struct linker_stats_t { 125 int count[kRelocMax]; 126}; 127 128static linker_stats_t linker_stats; 129 130static void count_relocation(RelocationKind kind) { 131 ++linker_stats.count[kind]; 132} 133#else 134static void count_relocation(RelocationKind) { 135} 136#endif 137 138#if COUNT_PAGES 139static unsigned bitmask[4096]; 140#if defined(__LP64__) 141#define MARK(offset) \ 142 do { \ 143 if ((((offset) >> 12) >> 5) < 4096) \ 144 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \ 145 } while (0) 146#else 147#define MARK(offset) \ 148 do { \ 149 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \ 150 } while (0) 151#endif 152#else 153#define MARK(x) do {} while (0) 154#endif 155 156// You shouldn't try to call memory-allocating functions in the dynamic linker. 157// Guard against the most obvious ones. 158#define DISALLOW_ALLOCATION(return_type, name, ...) \ 159 return_type name __VA_ARGS__ \ 160 { \ 161 const char* msg = "ERROR: " #name " called from the dynamic linker!\n"; \ 162 __libc_format_log(ANDROID_LOG_FATAL, "linker", "%s", msg); \ 163 write(2, msg, strlen(msg)); \ 164 abort(); \ 165 } 166DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused)); 167DISALLOW_ALLOCATION(void, free, (void* u __unused)); 168DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused)); 169DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused)); 170 171static char tmp_err_buf[768]; 172static char __linker_dl_err_buf[768]; 173 174char* linker_get_error_buffer() { 175 return &__linker_dl_err_buf[0]; 176} 177 178size_t linker_get_error_buffer_size() { 179 return sizeof(__linker_dl_err_buf); 180} 181 182/* 183 * This function is an empty stub where GDB locates a breakpoint to get notified 184 * about linker activity. 185 */ 186extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity(); 187 188static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0}; 189static link_map* r_debug_tail = 0; 190 191static pthread_mutex_t gDebugMutex = PTHREAD_MUTEX_INITIALIZER; 192 193static void insert_soinfo_into_debug_map(soinfo* info) { 194 // Copy the necessary fields into the debug structure. 195 link_map* map = &(info->link_map_head); 196 map->l_addr = info->load_bias; 197 map->l_name = reinterpret_cast<char*>(info->name); 198 map->l_ld = info->dynamic; 199 200 /* Stick the new library at the end of the list. 201 * gdb tends to care more about libc than it does 202 * about leaf libraries, and ordering it this way 203 * reduces the back-and-forth over the wire. 204 */ 205 if (r_debug_tail) { 206 r_debug_tail->l_next = map; 207 map->l_prev = r_debug_tail; 208 map->l_next = 0; 209 } else { 210 _r_debug.r_map = map; 211 map->l_prev = 0; 212 map->l_next = 0; 213 } 214 r_debug_tail = map; 215} 216 217static void remove_soinfo_from_debug_map(soinfo* info) { 218 link_map* map = &(info->link_map_head); 219 220 if (r_debug_tail == map) { 221 r_debug_tail = map->l_prev; 222 } 223 224 if (map->l_prev) { 225 map->l_prev->l_next = map->l_next; 226 } 227 if (map->l_next) { 228 map->l_next->l_prev = map->l_prev; 229 } 230} 231 232static void notify_gdb_of_load(soinfo* info) { 233 if (info->flags & FLAG_EXE) { 234 // GDB already knows about the main executable 235 return; 236 } 237 238 ScopedPthreadMutexLocker locker(&gDebugMutex); 239 240 _r_debug.r_state = r_debug::RT_ADD; 241 rtld_db_dlactivity(); 242 243 insert_soinfo_into_debug_map(info); 244 245 _r_debug.r_state = r_debug::RT_CONSISTENT; 246 rtld_db_dlactivity(); 247} 248 249static void notify_gdb_of_unload(soinfo* info) { 250 if (info->flags & FLAG_EXE) { 251 // GDB already knows about the main executable 252 return; 253 } 254 255 ScopedPthreadMutexLocker locker(&gDebugMutex); 256 257 _r_debug.r_state = r_debug::RT_DELETE; 258 rtld_db_dlactivity(); 259 260 remove_soinfo_from_debug_map(info); 261 262 _r_debug.r_state = r_debug::RT_CONSISTENT; 263 rtld_db_dlactivity(); 264} 265 266void notify_gdb_of_libraries() { 267 _r_debug.r_state = r_debug::RT_ADD; 268 rtld_db_dlactivity(); 269 _r_debug.r_state = r_debug::RT_CONSISTENT; 270 rtld_db_dlactivity(); 271} 272 273static bool ensure_free_list_non_empty() { 274 if (gSoInfoFreeList != NULL) { 275 return true; 276 } 277 278 // Allocate a new pool. 279 soinfo_pool_t* pool = reinterpret_cast<soinfo_pool_t*>(mmap(NULL, sizeof(*pool), 280 PROT_READ|PROT_WRITE, 281 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0)); 282 if (pool == MAP_FAILED) { 283 return false; 284 } 285 286 // Add the pool to our list of pools. 287 pool->next = gSoInfoPools; 288 gSoInfoPools = pool; 289 290 // Chain the entries in the new pool onto the free list. 291 gSoInfoFreeList = &pool->info[0]; 292 soinfo* next = NULL; 293 for (int i = SOINFO_PER_POOL - 1; i >= 0; --i) { 294 pool->info[i].next = next; 295 next = &pool->info[i]; 296 } 297 298 return true; 299} 300 301static void set_soinfo_pool_protection(int protection) { 302 for (soinfo_pool_t* p = gSoInfoPools; p != NULL; p = p->next) { 303 if (mprotect(p, sizeof(*p), protection) == -1) { 304 abort(); // Can't happen. 305 } 306 } 307} 308 309static soinfo* soinfo_alloc(const char* name) { 310 if (strlen(name) >= SOINFO_NAME_LEN) { 311 DL_ERR("library name \"%s\" too long", name); 312 return NULL; 313 } 314 315 if (!ensure_free_list_non_empty()) { 316 DL_ERR("out of memory when loading \"%s\"", name); 317 return NULL; 318 } 319 320 // Take the head element off the free list. 321 soinfo* si = gSoInfoFreeList; 322 gSoInfoFreeList = gSoInfoFreeList->next; 323 324 // Initialize the new element. 325 memset(si, 0, sizeof(soinfo)); 326 strlcpy(si->name, name, sizeof(si->name)); 327 sonext->next = si; 328 sonext = si; 329 330 TRACE("name %s: allocated soinfo @ %p", name, si); 331 return si; 332} 333 334static void soinfo_free(soinfo* si) { 335 if (si == NULL) { 336 return; 337 } 338 339 soinfo *prev = NULL, *trav; 340 341 TRACE("name %s: freeing soinfo @ %p", si->name, si); 342 343 for (trav = solist; trav != NULL; trav = trav->next) { 344 if (trav == si) 345 break; 346 prev = trav; 347 } 348 if (trav == NULL) { 349 /* si was not in solist */ 350 DL_ERR("name \"%s\" is not in solist!", si->name); 351 return; 352 } 353 354 /* prev will never be NULL, because the first entry in solist is 355 always the static libdl_info. 356 */ 357 prev->next = si->next; 358 if (si == sonext) { 359 sonext = prev; 360 } 361 si->next = gSoInfoFreeList; 362 gSoInfoFreeList = si; 363} 364 365 366static void parse_path(const char* path, const char* delimiters, 367 const char** array, char* buf, size_t buf_size, size_t max_count) { 368 if (path == NULL) { 369 return; 370 } 371 372 size_t len = strlcpy(buf, path, buf_size); 373 374 size_t i = 0; 375 char* buf_p = buf; 376 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) { 377 if (*array[i] != '\0') { 378 ++i; 379 } 380 } 381 382 // Forget the last path if we had to truncate; this occurs if the 2nd to 383 // last char isn't '\0' (i.e. wasn't originally a delimiter). 384 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') { 385 array[i - 1] = NULL; 386 } else { 387 array[i] = NULL; 388 } 389} 390 391static void parse_LD_LIBRARY_PATH(const char* path) { 392 parse_path(path, ":", gLdPaths, 393 gLdPathsBuffer, sizeof(gLdPathsBuffer), LDPATH_MAX); 394} 395 396static void parse_LD_PRELOAD(const char* path) { 397 // We have historically supported ':' as well as ' ' in LD_PRELOAD. 398 parse_path(path, " :", gLdPreloadNames, 399 gLdPreloadsBuffer, sizeof(gLdPreloadsBuffer), LDPRELOAD_MAX); 400} 401 402#if defined(__arm__) 403 404/* For a given PC, find the .so that it belongs to. 405 * Returns the base address of the .ARM.exidx section 406 * for that .so, and the number of 8-byte entries 407 * in that section (via *pcount). 408 * 409 * Intended to be called by libc's __gnu_Unwind_Find_exidx(). 410 * 411 * This function is exposed via dlfcn.cpp and libdl.so. 412 */ 413_Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) { 414 unsigned addr = (unsigned)pc; 415 416 for (soinfo* si = solist; si != 0; si = si->next) { 417 if ((addr >= si->base) && (addr < (si->base + si->size))) { 418 *pcount = si->ARM_exidx_count; 419 return (_Unwind_Ptr)si->ARM_exidx; 420 } 421 } 422 *pcount = 0; 423 return NULL; 424} 425 426#endif 427 428/* Here, we only have to provide a callback to iterate across all the 429 * loaded libraries. gcc_eh does the rest. */ 430int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) { 431 int rv = 0; 432 for (soinfo* si = solist; si != NULL; si = si->next) { 433 dl_phdr_info dl_info; 434 dl_info.dlpi_addr = si->link_map_head.l_addr; 435 dl_info.dlpi_name = si->link_map_head.l_name; 436 dl_info.dlpi_phdr = si->phdr; 437 dl_info.dlpi_phnum = si->phnum; 438 rv = cb(&dl_info, sizeof(dl_phdr_info), data); 439 if (rv != 0) { 440 break; 441 } 442 } 443 return rv; 444} 445 446static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) { 447 ElfW(Sym)* symtab = si->symtab; 448 const char* strtab = si->strtab; 449 450 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd", 451 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket); 452 453 for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) { 454 ElfW(Sym)* s = symtab + n; 455 if (strcmp(strtab + s->st_name, name)) continue; 456 457 /* only concern ourselves with global and weak symbol definitions */ 458 switch (ELF_ST_BIND(s->st_info)) { 459 case STB_GLOBAL: 460 case STB_WEAK: 461 if (s->st_shndx == SHN_UNDEF) { 462 continue; 463 } 464 465 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd", 466 name, si->name, reinterpret_cast<void*>(s->st_value), 467 static_cast<size_t>(s->st_size)); 468 return s; 469 } 470 } 471 472 return NULL; 473} 474 475static unsigned elfhash(const char* _name) { 476 const unsigned char* name = reinterpret_cast<const unsigned char*>(_name); 477 unsigned h = 0, g; 478 479 while (*name) { 480 h = (h << 4) + *name++; 481 g = h & 0xf0000000; 482 h ^= g; 483 h ^= g >> 24; 484 } 485 return h; 486} 487 488static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) { 489 unsigned elf_hash = elfhash(name); 490 ElfW(Sym)* s = NULL; 491 492 if (si != NULL && somain != NULL) { 493 /* 494 * Local scope is executable scope. Just start looking into it right away 495 * for the shortcut. 496 */ 497 498 if (si == somain) { 499 s = soinfo_elf_lookup(si, elf_hash, name); 500 if (s != NULL) { 501 *lsi = si; 502 goto done; 503 } 504 } else { 505 /* Order of symbol lookup is controlled by DT_SYMBOLIC flag */ 506 507 /* 508 * If this object was built with symbolic relocations disabled, the 509 * first place to look to resolve external references is the main 510 * executable. 511 */ 512 513 if (!si->has_DT_SYMBOLIC) { 514 DEBUG("%s: looking up %s in executable %s", 515 si->name, name, somain->name); 516 s = soinfo_elf_lookup(somain, elf_hash, name); 517 if (s != NULL) { 518 *lsi = somain; 519 goto done; 520 } 521 } 522 523 /* Look for symbols in the local scope (the object who is 524 * searching). This happens with C++ templates on x86 for some 525 * reason. 526 * 527 * Notes on weak symbols: 528 * The ELF specs are ambiguous about treatment of weak definitions in 529 * dynamic linking. Some systems return the first definition found 530 * and some the first non-weak definition. This is system dependent. 531 * Here we return the first definition found for simplicity. */ 532 533 s = soinfo_elf_lookup(si, elf_hash, name); 534 if (s != NULL) { 535 *lsi = si; 536 goto done; 537 } 538 539 /* 540 * If this object was built with -Bsymbolic and symbol is not found 541 * in the local scope, try to find the symbol in the main executable. 542 */ 543 544 if (si->has_DT_SYMBOLIC) { 545 DEBUG("%s: looking up %s in executable %s after local scope", 546 si->name, name, somain->name); 547 s = soinfo_elf_lookup(somain, elf_hash, name); 548 if (s != NULL) { 549 *lsi = somain; 550 goto done; 551 } 552 } 553 } 554 } 555 556 /* Next, look for it in the preloads list */ 557 for (int i = 0; gLdPreloads[i] != NULL; i++) { 558 s = soinfo_elf_lookup(gLdPreloads[i], elf_hash, name); 559 if (s != NULL) { 560 *lsi = gLdPreloads[i]; 561 goto done; 562 } 563 } 564 565 for (int i = 0; needed[i] != NULL; i++) { 566 DEBUG("%s: looking up %s in %s", 567 si->name, name, needed[i]->name); 568 s = soinfo_elf_lookup(needed[i], elf_hash, name); 569 if (s != NULL) { 570 *lsi = needed[i]; 571 goto done; 572 } 573 } 574 575done: 576 if (s != NULL) { 577 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, " 578 "found in %s, base = %p, load bias = %p", 579 si->name, name, reinterpret_cast<void*>(s->st_value), 580 (*lsi)->name, reinterpret_cast<void*>((*lsi)->base), 581 reinterpret_cast<void*>((*lsi)->load_bias)); 582 return s; 583 } 584 585 return NULL; 586} 587 588/* This is used by dlsym(3). It performs symbol lookup only within the 589 specified soinfo object and not in any of its dependencies. 590 591 TODO: Only looking in the specified soinfo seems wrong. dlsym(3) says 592 that it should do a breadth first search through the dependency 593 tree. This agrees with the ELF spec (aka System V Application 594 Binary Interface) where in Chapter 5 it discuss resolving "Shared 595 Object Dependencies" in breadth first search order. 596 */ 597ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name) { 598 return soinfo_elf_lookup(si, elfhash(name), name); 599} 600 601/* This is used by dlsym(3) to performs a global symbol lookup. If the 602 start value is null (for RTLD_DEFAULT), the search starts at the 603 beginning of the global solist. Otherwise the search starts at the 604 specified soinfo (for RTLD_NEXT). 605 */ 606ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) { 607 unsigned elf_hash = elfhash(name); 608 609 if (start == NULL) { 610 start = solist; 611 } 612 613 ElfW(Sym)* s = NULL; 614 for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) { 615 s = soinfo_elf_lookup(si, elf_hash, name); 616 if (s != NULL) { 617 *found = si; 618 break; 619 } 620 } 621 622 if (s != NULL) { 623 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p", 624 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base)); 625 } 626 627 return s; 628} 629 630soinfo* find_containing_library(const void* p) { 631 ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p); 632 for (soinfo* si = solist; si != NULL; si = si->next) { 633 if (address >= si->base && address - si->base < si->size) { 634 return si; 635 } 636 } 637 return NULL; 638} 639 640ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) { 641 ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base; 642 643 // Search the library's symbol table for any defined symbol which 644 // contains this address. 645 for (size_t i = 0; i < si->nchain; ++i) { 646 ElfW(Sym)* sym = &si->symtab[i]; 647 if (sym->st_shndx != SHN_UNDEF && 648 soaddr >= sym->st_value && 649 soaddr < sym->st_value + sym->st_size) { 650 return sym; 651 } 652 } 653 654 return NULL; 655} 656 657static int open_library_on_path(const char* name, const char* const paths[]) { 658 char buf[512]; 659 for (size_t i = 0; paths[i] != NULL; ++i) { 660 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name); 661 if (n < 0 || n >= static_cast<int>(sizeof(buf))) { 662 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name); 663 continue; 664 } 665 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC)); 666 if (fd != -1) { 667 return fd; 668 } 669 } 670 return -1; 671} 672 673static int open_library(const char* name) { 674 TRACE("[ opening %s ]", name); 675 676 // If the name contains a slash, we should attempt to open it directly and not search the paths. 677 if (strchr(name, '/') != NULL) { 678 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC)); 679 if (fd != -1) { 680 return fd; 681 } 682 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now. 683 } 684 685 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths. 686 int fd = open_library_on_path(name, gLdPaths); 687 if (fd == -1) { 688 fd = open_library_on_path(name, gDefaultLdPaths); 689 } 690 return fd; 691} 692 693static soinfo* load_library(const char* name) { 694 // Open the file. 695 int fd = open_library(name); 696 if (fd == -1) { 697 DL_ERR("library \"%s\" not found", name); 698 return NULL; 699 } 700 701 // Read the ELF header and load the segments. 702 ElfReader elf_reader(name, fd); 703 if (!elf_reader.Load()) { 704 return NULL; 705 } 706 707 const char* bname = strrchr(name, '/'); 708 soinfo* si = soinfo_alloc(bname ? bname + 1 : name); 709 if (si == NULL) { 710 return NULL; 711 } 712 si->base = elf_reader.load_start(); 713 si->size = elf_reader.load_size(); 714 si->load_bias = elf_reader.load_bias(); 715 si->flags = 0; 716 si->entry = 0; 717 si->dynamic = NULL; 718 si->phnum = elf_reader.phdr_count(); 719 si->phdr = elf_reader.loaded_phdr(); 720 return si; 721} 722 723static soinfo *find_loaded_library(const char* name) { 724 // TODO: don't use basename only for determining libraries 725 // http://code.google.com/p/android/issues/detail?id=6670 726 727 const char* bname = strrchr(name, '/'); 728 bname = bname ? bname + 1 : name; 729 730 for (soinfo* si = solist; si != NULL; si = si->next) { 731 if (!strcmp(bname, si->name)) { 732 return si; 733 } 734 } 735 return NULL; 736} 737 738static soinfo* find_library_internal(const char* name) { 739 if (name == NULL) { 740 return somain; 741 } 742 743 soinfo* si = find_loaded_library(name); 744 if (si != NULL) { 745 if (si->flags & FLAG_LINKED) { 746 return si; 747 } 748 DL_ERR("OOPS: recursive link to \"%s\"", si->name); 749 return NULL; 750 } 751 752 TRACE("[ '%s' has not been loaded yet. Locating...]", name); 753 si = load_library(name); 754 if (si == NULL) { 755 return NULL; 756 } 757 758 // At this point we know that whatever is loaded @ base is a valid ELF 759 // shared library whose segments are properly mapped in. 760 TRACE("[ find_library_internal base=%p size=%zu name='%s' ]", 761 reinterpret_cast<void*>(si->base), si->size, si->name); 762 763 if (!soinfo_link_image(si)) { 764 munmap(reinterpret_cast<void*>(si->base), si->size); 765 soinfo_free(si); 766 return NULL; 767 } 768 769 return si; 770} 771 772static soinfo* find_library(const char* name) { 773 soinfo* si = find_library_internal(name); 774 if (si != NULL) { 775 si->ref_count++; 776 } 777 return si; 778} 779 780static int soinfo_unload(soinfo* si) { 781 if (si->ref_count == 1) { 782 TRACE("unloading '%s'", si->name); 783 si->CallDestructors(); 784 785 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 786 if (d->d_tag == DT_NEEDED) { 787 const char* library_name = si->strtab + d->d_un.d_val; 788 TRACE("%s needs to unload %s", si->name, library_name); 789 soinfo_unload(find_loaded_library(library_name)); 790 } 791 } 792 793 munmap(reinterpret_cast<void*>(si->base), si->size); 794 notify_gdb_of_unload(si); 795 soinfo_free(si); 796 si->ref_count = 0; 797 } else { 798 si->ref_count--; 799 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count); 800 } 801 return 0; 802} 803 804void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) { 805 snprintf(buffer, buffer_size, "%s:%s", gDefaultLdPaths[0], gDefaultLdPaths[1]); 806} 807 808void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) { 809 if (!get_AT_SECURE()) { 810 parse_LD_LIBRARY_PATH(ld_library_path); 811 } 812} 813 814soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) { 815 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL)) != 0) { 816 DL_ERR("invalid flags to dlopen: %x", flags); 817 return NULL; 818 } 819 if (extinfo != NULL && ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0)) { 820 DL_ERR("invalid extended flags to android_dlopen_ext: %x", extinfo->flags); 821 return NULL; 822 } 823 set_soinfo_pool_protection(PROT_READ | PROT_WRITE); 824 soinfo* si = find_library(name); 825 if (si != NULL) { 826 si->CallConstructors(); 827 } 828 set_soinfo_pool_protection(PROT_READ); 829 return si; 830} 831 832int do_dlclose(soinfo* si) { 833 set_soinfo_pool_protection(PROT_READ | PROT_WRITE); 834 int result = soinfo_unload(si); 835 set_soinfo_pool_protection(PROT_READ); 836 return result; 837} 838 839#if defined(USE_RELA) 840static int soinfo_relocate(soinfo* si, ElfW(Rela)* rela, unsigned count, soinfo* needed[]) { 841 ElfW(Sym)* s; 842 soinfo* lsi; 843 844 for (size_t idx = 0; idx < count; ++idx, ++rela) { 845 unsigned type = ELFW(R_TYPE)(rela->r_info); 846 unsigned sym = ELFW(R_SYM)(rela->r_info); 847 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + si->load_bias); 848 ElfW(Addr) sym_addr = 0; 849 const char* sym_name = NULL; 850 851 DEBUG("Processing '%s' relocation at index %zd", si->name, idx); 852 if (type == 0) { // R_*_NONE 853 continue; 854 } 855 if (sym != 0) { 856 sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name); 857 s = soinfo_do_lookup(si, sym_name, &lsi, needed); 858 if (s == NULL) { 859 // We only allow an undefined symbol if this is a weak reference... 860 s = &si->symtab[sym]; 861 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 862 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name); 863 return -1; 864 } 865 866 /* IHI0044C AAELF 4.5.1.1: 867 868 Libraries are not searched to resolve weak references. 869 It is not an error for a weak reference to remain unsatisfied. 870 871 During linking, the value of an undefined weak reference is: 872 - Zero if the relocation type is absolute 873 - The address of the place if the relocation is pc-relative 874 - The address of nominal base address if the relocation 875 type is base-relative. 876 */ 877 878 switch (type) { 879#if defined(__aarch64__) 880 case R_AARCH64_JUMP_SLOT: 881 case R_AARCH64_GLOB_DAT: 882 case R_AARCH64_ABS64: 883 case R_AARCH64_ABS32: 884 case R_AARCH64_ABS16: 885 case R_AARCH64_RELATIVE: 886 /* 887 * The sym_addr was initialized to be zero above, or the relocation 888 * code below does not care about value of sym_addr. 889 * No need to do anything. 890 */ 891 break; 892#elif defined(__x86_64__) 893 case R_X86_64_JUMP_SLOT: 894 case R_X86_64_GLOB_DAT: 895 case R_X86_64_32: 896 case R_X86_64_RELATIVE: 897 // No need to do anything. 898 break; 899 case R_X86_64_PC32: 900 sym_addr = reloc; 901 break; 902#endif 903 default: 904 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx); 905 return -1; 906 } 907 } else { 908 // We got a definition. 909 sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias); 910 } 911 count_relocation(kRelocSymbol); 912 } else { 913 s = NULL; 914 } 915 916 switch (type) { 917#if defined(__aarch64__) 918 case R_AARCH64_JUMP_SLOT: 919 count_relocation(kRelocAbsolute); 920 MARK(rela->r_offset); 921 TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n", 922 reloc, (sym_addr + rela->r_addend), sym_name); 923 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend); 924 break; 925 case R_AARCH64_GLOB_DAT: 926 count_relocation(kRelocAbsolute); 927 MARK(rela->r_offset); 928 TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n", 929 reloc, (sym_addr + rela->r_addend), sym_name); 930 *reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend); 931 break; 932 case R_AARCH64_ABS64: 933 count_relocation(kRelocAbsolute); 934 MARK(rela->r_offset); 935 TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n", 936 reloc, (sym_addr + rela->r_addend), sym_name); 937 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend); 938 break; 939 case R_AARCH64_ABS32: 940 count_relocation(kRelocAbsolute); 941 MARK(rela->r_offset); 942 TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n", 943 reloc, (sym_addr + rela->r_addend), sym_name); 944 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) && 945 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) { 946 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend); 947 } else { 948 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 949 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)), 950 static_cast<ElfW(Addr)>(INT32_MIN), 951 static_cast<ElfW(Addr)>(UINT32_MAX)); 952 return -1; 953 } 954 break; 955 case R_AARCH64_ABS16: 956 count_relocation(kRelocAbsolute); 957 MARK(rela->r_offset); 958 TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n", 959 reloc, (sym_addr + rela->r_addend), sym_name); 960 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) && 961 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) { 962 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend); 963 } else { 964 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 965 (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)), 966 static_cast<ElfW(Addr)>(INT16_MIN), 967 static_cast<ElfW(Addr)>(UINT16_MAX)); 968 return -1; 969 } 970 break; 971 case R_AARCH64_PREL64: 972 count_relocation(kRelocRelative); 973 MARK(rela->r_offset); 974 TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n", 975 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name); 976 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset; 977 break; 978 case R_AARCH64_PREL32: 979 count_relocation(kRelocRelative); 980 MARK(rela->r_offset); 981 TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n", 982 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name); 983 if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) && 984 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) { 985 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset); 986 } else { 987 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 988 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)), 989 static_cast<ElfW(Addr)>(INT32_MIN), 990 static_cast<ElfW(Addr)>(UINT32_MAX)); 991 return -1; 992 } 993 break; 994 case R_AARCH64_PREL16: 995 count_relocation(kRelocRelative); 996 MARK(rela->r_offset); 997 TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n", 998 reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name); 999 if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) && 1000 ((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) { 1001 *reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset); 1002 } else { 1003 DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx", 1004 (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)), 1005 static_cast<ElfW(Addr)>(INT16_MIN), 1006 static_cast<ElfW(Addr)>(UINT16_MAX)); 1007 return -1; 1008 } 1009 break; 1010 1011 case R_AARCH64_RELATIVE: 1012 count_relocation(kRelocRelative); 1013 MARK(rela->r_offset); 1014 if (sym) { 1015 DL_ERR("odd RELATIVE form..."); 1016 return -1; 1017 } 1018 TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n", 1019 reloc, (si->base + rela->r_addend)); 1020 *reinterpret_cast<ElfW(Addr)*>(reloc) = (si->base + rela->r_addend); 1021 break; 1022 1023 case R_AARCH64_COPY: 1024 if ((si->flags & FLAG_EXE) == 0) { 1025 /* 1026 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf 1027 * 1028 * Section 4.7.1.10 "Dynamic relocations" 1029 * R_AARCH64_COPY may only appear in executable objects where e_type is 1030 * set to ET_EXEC. 1031 * 1032 * FLAG_EXE is set for both ET_DYN and ET_EXEC executables. 1033 * We should explicitly disallow ET_DYN executables from having 1034 * R_AARCH64_COPY relocations. 1035 */ 1036 DL_ERR("%s R_AARCH64_COPY relocations only supported for ET_EXEC", si->name); 1037 return -1; 1038 } 1039 count_relocation(kRelocCopy); 1040 MARK(rela->r_offset); 1041 TRACE_TYPE(RELO, "RELO COPY %16llx <- %lld @ %16llx %s\n", 1042 reloc, 1043 s->st_size, 1044 (sym_addr + rela->r_addend), 1045 sym_name); 1046 if (reloc == (sym_addr + rela->r_addend)) { 1047 ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed); 1048 1049 if (src == NULL) { 1050 DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name); 1051 return -1; 1052 } 1053 if (lsi->has_DT_SYMBOLIC) { 1054 DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared " 1055 "library %s (built with -Bsymbolic?)", si->name, lsi->name); 1056 return -1; 1057 } 1058 if (s->st_size < src->st_size) { 1059 DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%lld < %lld)", 1060 si->name, s->st_size, src->st_size); 1061 return -1; 1062 } 1063 memcpy(reinterpret_cast<void*>(reloc), 1064 reinterpret_cast<void*>(src->st_value + lsi->load_bias), src->st_size); 1065 } else { 1066 DL_ERR("%s R_AARCH64_COPY relocation target cannot be resolved", si->name); 1067 return -1; 1068 } 1069 break; 1070 case R_AARCH64_TLS_TPREL64: 1071 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n", 1072 reloc, (sym_addr + rela->r_addend), rela->r_offset); 1073 break; 1074 case R_AARCH64_TLS_DTPREL32: 1075 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n", 1076 reloc, (sym_addr + rela->r_addend), rela->r_offset); 1077 break; 1078#elif defined(__x86_64__) 1079 case R_X86_64_JUMP_SLOT: 1080 count_relocation(kRelocAbsolute); 1081 MARK(rela->r_offset); 1082 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc), 1083 static_cast<size_t>(sym_addr + rela->r_addend), sym_name); 1084 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1085 break; 1086 case R_X86_64_GLOB_DAT: 1087 count_relocation(kRelocAbsolute); 1088 MARK(rela->r_offset); 1089 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc), 1090 static_cast<size_t>(sym_addr + rela->r_addend), sym_name); 1091 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1092 break; 1093 case R_X86_64_RELATIVE: 1094 count_relocation(kRelocRelative); 1095 MARK(rela->r_offset); 1096 if (sym) { 1097 DL_ERR("odd RELATIVE form..."); 1098 return -1; 1099 } 1100 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc), 1101 static_cast<size_t>(si->base)); 1102 *reinterpret_cast<ElfW(Addr)*>(reloc) = si->base + rela->r_addend; 1103 break; 1104 case R_X86_64_32: 1105 count_relocation(kRelocRelative); 1106 MARK(rela->r_offset); 1107 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc), 1108 static_cast<size_t>(sym_addr), sym_name); 1109 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1110 break; 1111 case R_X86_64_64: 1112 count_relocation(kRelocRelative); 1113 MARK(rela->r_offset); 1114 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc), 1115 static_cast<size_t>(sym_addr), sym_name); 1116 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend; 1117 break; 1118 case R_X86_64_PC32: 1119 count_relocation(kRelocRelative); 1120 MARK(rela->r_offset); 1121 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s", 1122 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc), 1123 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name); 1124 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc; 1125 break; 1126#endif 1127 1128 default: 1129 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx); 1130 return -1; 1131 } 1132 } 1133 return 0; 1134} 1135 1136#else // REL, not RELA. 1137 1138static int soinfo_relocate(soinfo* si, ElfW(Rel)* rel, unsigned count, soinfo* needed[]) { 1139 ElfW(Sym)* s; 1140 soinfo* lsi; 1141 1142 for (size_t idx = 0; idx < count; ++idx, ++rel) { 1143 unsigned type = ELFW(R_TYPE)(rel->r_info); 1144 // TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead. 1145 unsigned sym = ELFW(R_SYM)(rel->r_info); 1146 ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + si->load_bias); 1147 ElfW(Addr) sym_addr = 0; 1148 const char* sym_name = NULL; 1149 1150 DEBUG("Processing '%s' relocation at index %zd", si->name, idx); 1151 if (type == 0) { // R_*_NONE 1152 continue; 1153 } 1154 if (sym != 0) { 1155 sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name); 1156 s = soinfo_do_lookup(si, sym_name, &lsi, needed); 1157 if (s == NULL) { 1158 // We only allow an undefined symbol if this is a weak reference... 1159 s = &si->symtab[sym]; 1160 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1161 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name); 1162 return -1; 1163 } 1164 1165 /* IHI0044C AAELF 4.5.1.1: 1166 1167 Libraries are not searched to resolve weak references. 1168 It is not an error for a weak reference to remain 1169 unsatisfied. 1170 1171 During linking, the value of an undefined weak reference is: 1172 - Zero if the relocation type is absolute 1173 - The address of the place if the relocation is pc-relative 1174 - The address of nominal base address if the relocation 1175 type is base-relative. 1176 */ 1177 1178 switch (type) { 1179#if defined(__arm__) 1180 case R_ARM_JUMP_SLOT: 1181 case R_ARM_GLOB_DAT: 1182 case R_ARM_ABS32: 1183 case R_ARM_RELATIVE: /* Don't care. */ 1184 // sym_addr was initialized to be zero above or relocation 1185 // code below does not care about value of sym_addr. 1186 // No need to do anything. 1187 break; 1188#elif defined(__i386__) 1189 case R_386_JMP_SLOT: 1190 case R_386_GLOB_DAT: 1191 case R_386_32: 1192 case R_386_RELATIVE: /* Don't care. */ 1193 // sym_addr was initialized to be zero above or relocation 1194 // code below does not care about value of sym_addr. 1195 // No need to do anything. 1196 break; 1197 case R_386_PC32: 1198 sym_addr = reloc; 1199 break; 1200#endif 1201 1202#if defined(__arm__) 1203 case R_ARM_COPY: 1204 // Fall through. Can't really copy if weak symbol is not found at run-time. 1205#endif 1206 default: 1207 DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx); 1208 return -1; 1209 } 1210 } else { 1211 // We got a definition. 1212 sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias); 1213 } 1214 count_relocation(kRelocSymbol); 1215 } else { 1216 s = NULL; 1217 } 1218 1219 switch (type) { 1220#if defined(__arm__) 1221 case R_ARM_JUMP_SLOT: 1222 count_relocation(kRelocAbsolute); 1223 MARK(rel->r_offset); 1224 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name); 1225 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1226 break; 1227 case R_ARM_GLOB_DAT: 1228 count_relocation(kRelocAbsolute); 1229 MARK(rel->r_offset); 1230 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name); 1231 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1232 break; 1233 case R_ARM_ABS32: 1234 count_relocation(kRelocAbsolute); 1235 MARK(rel->r_offset); 1236 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name); 1237 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; 1238 break; 1239 case R_ARM_REL32: 1240 count_relocation(kRelocRelative); 1241 MARK(rel->r_offset); 1242 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s", 1243 reloc, sym_addr, rel->r_offset, sym_name); 1244 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset; 1245 break; 1246 case R_ARM_COPY: 1247 if ((si->flags & FLAG_EXE) == 0) { 1248 /* 1249 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf 1250 * 1251 * Section 4.7.1.10 "Dynamic relocations" 1252 * R_ARM_COPY may only appear in executable objects where e_type is 1253 * set to ET_EXEC. 1254 * 1255 * TODO: FLAG_EXE is set for both ET_DYN and ET_EXEC executables. 1256 * We should explicitly disallow ET_DYN executables from having 1257 * R_ARM_COPY relocations. 1258 */ 1259 DL_ERR("%s R_ARM_COPY relocations only supported for ET_EXEC", si->name); 1260 return -1; 1261 } 1262 count_relocation(kRelocCopy); 1263 MARK(rel->r_offset); 1264 TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name); 1265 if (reloc == sym_addr) { 1266 ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed); 1267 1268 if (src == NULL) { 1269 DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name); 1270 return -1; 1271 } 1272 if (lsi->has_DT_SYMBOLIC) { 1273 DL_ERR("%s invalid R_ARM_COPY relocation against DT_SYMBOLIC shared " 1274 "library %s (built with -Bsymbolic?)", si->name, lsi->name); 1275 return -1; 1276 } 1277 if (s->st_size < src->st_size) { 1278 DL_ERR("%s R_ARM_COPY relocation size mismatch (%d < %d)", 1279 si->name, s->st_size, src->st_size); 1280 return -1; 1281 } 1282 memcpy(reinterpret_cast<void*>(reloc), 1283 reinterpret_cast<void*>(src->st_value + lsi->load_bias), src->st_size); 1284 } else { 1285 DL_ERR("%s R_ARM_COPY relocation target cannot be resolved", si->name); 1286 return -1; 1287 } 1288 break; 1289#elif defined(__i386__) 1290 case R_386_JMP_SLOT: 1291 count_relocation(kRelocAbsolute); 1292 MARK(rel->r_offset); 1293 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name); 1294 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1295 break; 1296 case R_386_GLOB_DAT: 1297 count_relocation(kRelocAbsolute); 1298 MARK(rel->r_offset); 1299 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name); 1300 *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr; 1301 break; 1302 case R_386_32: 1303 count_relocation(kRelocRelative); 1304 MARK(rel->r_offset); 1305 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name); 1306 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; 1307 break; 1308 case R_386_PC32: 1309 count_relocation(kRelocRelative); 1310 MARK(rel->r_offset); 1311 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s", 1312 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name); 1313 *reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc); 1314 break; 1315#elif defined(__mips__) 1316 case R_MIPS_REL32: 1317#if defined(__LP64__) 1318 // MIPS Elf64_Rel entries contain compound relocations 1319 // We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case 1320 if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 || 1321 ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) { 1322 DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)", 1323 type, (unsigned)ELF64_R_TYPE2(rel->r_info), 1324 (unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx); 1325 return -1; 1326 } 1327#endif 1328 count_relocation(kRelocAbsolute); 1329 MARK(rel->r_offset); 1330 TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc), 1331 static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*"); 1332 if (s) { 1333 *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr; 1334 } else { 1335 *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base; 1336 } 1337 break; 1338#endif 1339 1340#if defined(__arm__) 1341 case R_ARM_RELATIVE: 1342#elif defined(__i386__) 1343 case R_386_RELATIVE: 1344#endif 1345 count_relocation(kRelocRelative); 1346 MARK(rel->r_offset); 1347 if (sym) { 1348 DL_ERR("odd RELATIVE form..."); 1349 return -1; 1350 } 1351 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p", 1352 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base)); 1353 *reinterpret_cast<ElfW(Addr)*>(reloc) += si->base; 1354 break; 1355 1356 default: 1357 DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx); 1358 return -1; 1359 } 1360 } 1361 return 0; 1362} 1363#endif 1364 1365#if defined(__mips__) 1366static bool mips_relocate_got(soinfo* si, soinfo* needed[]) { 1367 ElfW(Addr)** got = si->plt_got; 1368 if (got == NULL) { 1369 return true; 1370 } 1371 unsigned local_gotno = si->mips_local_gotno; 1372 unsigned gotsym = si->mips_gotsym; 1373 unsigned symtabno = si->mips_symtabno; 1374 ElfW(Sym)* symtab = si->symtab; 1375 1376 // got[0] is the address of the lazy resolver function. 1377 // got[1] may be used for a GNU extension. 1378 // Set it to a recognizable address in case someone calls it (should be _rtld_bind_start). 1379 // FIXME: maybe this should be in a separate routine? 1380 if ((si->flags & FLAG_LINKER) == 0) { 1381 size_t g = 0; 1382 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef); 1383 if (reinterpret_cast<intptr_t>(got[g]) < 0) { 1384 got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed); 1385 } 1386 // Relocate the local GOT entries. 1387 for (; g < local_gotno; g++) { 1388 got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + si->load_bias); 1389 } 1390 } 1391 1392 // Now for the global GOT entries... 1393 ElfW(Sym)* sym = symtab + gotsym; 1394 got = si->plt_got + local_gotno; 1395 for (size_t g = gotsym; g < symtabno; g++, sym++, got++) { 1396 // This is an undefined reference... try to locate it. 1397 const char* sym_name = si->strtab + sym->st_name; 1398 soinfo* lsi; 1399 ElfW(Sym)* s = soinfo_do_lookup(si, sym_name, &lsi, needed); 1400 if (s == NULL) { 1401 // We only allow an undefined symbol if this is a weak reference. 1402 s = &symtab[g]; 1403 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1404 DL_ERR("cannot locate \"%s\"...", sym_name); 1405 return false; 1406 } 1407 *got = 0; 1408 } else { 1409 // FIXME: is this sufficient? 1410 // For reference see NetBSD link loader 1411 // http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup 1412 *got = reinterpret_cast<ElfW(Addr)*>(lsi->load_bias + s->st_value); 1413 } 1414 } 1415 return true; 1416} 1417#endif 1418 1419void soinfo::CallArray(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) { 1420 if (functions == NULL) { 1421 return; 1422 } 1423 1424 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name); 1425 1426 int begin = reverse ? (count - 1) : 0; 1427 int end = reverse ? -1 : count; 1428 int step = reverse ? -1 : 1; 1429 1430 for (int i = begin; i != end; i += step) { 1431 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]); 1432 CallFunction("function", functions[i]); 1433 } 1434 1435 TRACE("[ Done calling %s for '%s' ]", array_name, name); 1436} 1437 1438void soinfo::CallFunction(const char* function_name __unused, linker_function_t function) { 1439 if (function == NULL || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) { 1440 return; 1441 } 1442 1443 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name); 1444 function(); 1445 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name); 1446 1447 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures 1448 // are still writable. This happens with our debug malloc (see http://b/7941716). 1449 set_soinfo_pool_protection(PROT_READ | PROT_WRITE); 1450} 1451 1452void soinfo::CallPreInitConstructors() { 1453 // DT_PREINIT_ARRAY functions are called before any other constructors for executables, 1454 // but ignored in a shared library. 1455 CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false); 1456} 1457 1458void soinfo::CallConstructors() { 1459 if (constructors_called) { 1460 return; 1461 } 1462 1463 // We set constructors_called before actually calling the constructors, otherwise it doesn't 1464 // protect against recursive constructor calls. One simple example of constructor recursion 1465 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so: 1466 // 1. The program depends on libc, so libc's constructor is called here. 1467 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so. 1468 // 3. dlopen() calls the constructors on the newly created 1469 // soinfo for libc_malloc_debug_leak.so. 1470 // 4. The debug .so depends on libc, so CallConstructors is 1471 // called again with the libc soinfo. If it doesn't trigger the early- 1472 // out above, the libc constructor will be called again (recursively!). 1473 constructors_called = true; 1474 1475 if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) { 1476 // The GNU dynamic linker silently ignores these, but we warn the developer. 1477 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!", 1478 name, preinit_array_count); 1479 } 1480 1481 if (dynamic != NULL) { 1482 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) { 1483 if (d->d_tag == DT_NEEDED) { 1484 const char* library_name = strtab + d->d_un.d_val; 1485 TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name); 1486 find_loaded_library(library_name)->CallConstructors(); 1487 } 1488 } 1489 } 1490 1491 TRACE("\"%s\": calling constructors", name); 1492 1493 // DT_INIT should be called before DT_INIT_ARRAY if both are present. 1494 CallFunction("DT_INIT", init_func); 1495 CallArray("DT_INIT_ARRAY", init_array, init_array_count, false); 1496} 1497 1498void soinfo::CallDestructors() { 1499 TRACE("\"%s\": calling destructors", name); 1500 1501 // DT_FINI_ARRAY must be parsed in reverse order. 1502 CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true); 1503 1504 // DT_FINI should be called after DT_FINI_ARRAY if both are present. 1505 CallFunction("DT_FINI", fini_func); 1506} 1507 1508/* Force any of the closed stdin, stdout and stderr to be associated with 1509 /dev/null. */ 1510static int nullify_closed_stdio() { 1511 int dev_null, i, status; 1512 int return_value = 0; 1513 1514 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR)); 1515 if (dev_null < 0) { 1516 DL_ERR("cannot open /dev/null: %s", strerror(errno)); 1517 return -1; 1518 } 1519 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null); 1520 1521 /* If any of the stdio file descriptors is valid and not associated 1522 with /dev/null, dup /dev/null to it. */ 1523 for (i = 0; i < 3; i++) { 1524 /* If it is /dev/null already, we are done. */ 1525 if (i == dev_null) { 1526 continue; 1527 } 1528 1529 TRACE("[ Nullifying stdio file descriptor %d]", i); 1530 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL)); 1531 1532 /* If file is opened, we are good. */ 1533 if (status != -1) { 1534 continue; 1535 } 1536 1537 /* The only error we allow is that the file descriptor does not 1538 exist, in which case we dup /dev/null to it. */ 1539 if (errno != EBADF) { 1540 DL_ERR("fcntl failed: %s", strerror(errno)); 1541 return_value = -1; 1542 continue; 1543 } 1544 1545 /* Try dupping /dev/null to this stdio file descriptor and 1546 repeat if there is a signal. Note that any errors in closing 1547 the stdio descriptor are lost. */ 1548 status = TEMP_FAILURE_RETRY(dup2(dev_null, i)); 1549 if (status < 0) { 1550 DL_ERR("dup2 failed: %s", strerror(errno)); 1551 return_value = -1; 1552 continue; 1553 } 1554 } 1555 1556 /* If /dev/null is not one of the stdio file descriptors, close it. */ 1557 if (dev_null > 2) { 1558 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null); 1559 status = TEMP_FAILURE_RETRY(close(dev_null)); 1560 if (status == -1) { 1561 DL_ERR("close failed: %s", strerror(errno)); 1562 return_value = -1; 1563 } 1564 } 1565 1566 return return_value; 1567} 1568 1569static bool soinfo_link_image(soinfo* si) { 1570 /* "base" might wrap around UINT32_MAX. */ 1571 ElfW(Addr) base = si->load_bias; 1572 const ElfW(Phdr)* phdr = si->phdr; 1573 int phnum = si->phnum; 1574 bool relocating_linker = (si->flags & FLAG_LINKER) != 0; 1575 1576 /* We can't debug anything until the linker is relocated */ 1577 if (!relocating_linker) { 1578 INFO("[ linking %s ]", si->name); 1579 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(si->base), si->flags); 1580 } 1581 1582 /* Extract dynamic section */ 1583 size_t dynamic_count; 1584 ElfW(Word) dynamic_flags; 1585 phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic, 1586 &dynamic_count, &dynamic_flags); 1587 if (si->dynamic == NULL) { 1588 if (!relocating_linker) { 1589 DL_ERR("missing PT_DYNAMIC in \"%s\"", si->name); 1590 } 1591 return false; 1592 } else { 1593 if (!relocating_linker) { 1594 DEBUG("dynamic = %p", si->dynamic); 1595 } 1596 } 1597 1598#if defined(__arm__) 1599 (void) phdr_table_get_arm_exidx(phdr, phnum, base, 1600 &si->ARM_exidx, &si->ARM_exidx_count); 1601#endif 1602 1603 // Extract useful information from dynamic section. 1604 uint32_t needed_count = 0; 1605 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 1606 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p", 1607 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val)); 1608 switch (d->d_tag) { 1609 case DT_HASH: 1610 si->nbucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[0]; 1611 si->nchain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[1]; 1612 si->bucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8); 1613 si->chain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8 + si->nbucket * 4); 1614 break; 1615 case DT_STRTAB: 1616 si->strtab = reinterpret_cast<const char*>(base + d->d_un.d_ptr); 1617 break; 1618 case DT_SYMTAB: 1619 si->symtab = reinterpret_cast<ElfW(Sym)*>(base + d->d_un.d_ptr); 1620 break; 1621#if !defined(__LP64__) 1622 case DT_PLTREL: 1623 if (d->d_un.d_val != DT_REL) { 1624 DL_ERR("unsupported DT_RELA in \"%s\"", si->name); 1625 return false; 1626 } 1627 break; 1628#endif 1629 case DT_JMPREL: 1630#if defined(USE_RELA) 1631 si->plt_rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr); 1632#else 1633 si->plt_rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr); 1634#endif 1635 break; 1636 case DT_PLTRELSZ: 1637#if defined(USE_RELA) 1638 si->plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela)); 1639#else 1640 si->plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel)); 1641#endif 1642 break; 1643#if defined(__mips__) 1644 case DT_PLTGOT: 1645 // Used by mips and mips64. 1646 si->plt_got = reinterpret_cast<ElfW(Addr)**>(base + d->d_un.d_ptr); 1647 break; 1648#endif 1649 case DT_DEBUG: 1650 // Set the DT_DEBUG entry to the address of _r_debug for GDB 1651 // if the dynamic table is writable 1652// FIXME: not working currently for N64 1653// The flags for the LOAD and DYNAMIC program headers do not agree. 1654// The LOAD section containng the dynamic table has been mapped as 1655// read-only, but the DYNAMIC header claims it is writable. 1656#if !(defined(__mips__) && defined(__LP64__)) 1657 if ((dynamic_flags & PF_W) != 0) { 1658 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug); 1659 } 1660 break; 1661#endif 1662#if defined(USE_RELA) 1663 case DT_RELA: 1664 si->rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr); 1665 break; 1666 case DT_RELASZ: 1667 si->rela_count = d->d_un.d_val / sizeof(ElfW(Rela)); 1668 break; 1669 case DT_REL: 1670 DL_ERR("unsupported DT_REL in \"%s\"", si->name); 1671 return false; 1672 case DT_RELSZ: 1673 DL_ERR("unsupported DT_RELSZ in \"%s\"", si->name); 1674 return false; 1675#else 1676 case DT_REL: 1677 si->rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr); 1678 break; 1679 case DT_RELSZ: 1680 si->rel_count = d->d_un.d_val / sizeof(ElfW(Rel)); 1681 break; 1682 case DT_RELA: 1683 DL_ERR("unsupported DT_RELA in \"%s\"", si->name); 1684 return false; 1685#endif 1686 case DT_INIT: 1687 si->init_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr); 1688 DEBUG("%s constructors (DT_INIT) found at %p", si->name, si->init_func); 1689 break; 1690 case DT_FINI: 1691 si->fini_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr); 1692 DEBUG("%s destructors (DT_FINI) found at %p", si->name, si->fini_func); 1693 break; 1694 case DT_INIT_ARRAY: 1695 si->init_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr); 1696 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array); 1697 break; 1698 case DT_INIT_ARRAYSZ: 1699 si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr)); 1700 break; 1701 case DT_FINI_ARRAY: 1702 si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr); 1703 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array); 1704 break; 1705 case DT_FINI_ARRAYSZ: 1706 si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr)); 1707 break; 1708 case DT_PREINIT_ARRAY: 1709 si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr); 1710 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array); 1711 break; 1712 case DT_PREINIT_ARRAYSZ: 1713 si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr)); 1714 break; 1715 case DT_TEXTREL: 1716#if defined(__LP64__) 1717 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", si->name); 1718 return false; 1719#else 1720 si->has_text_relocations = true; 1721 break; 1722#endif 1723 case DT_SYMBOLIC: 1724 si->has_DT_SYMBOLIC = true; 1725 break; 1726 case DT_NEEDED: 1727 ++needed_count; 1728 break; 1729 case DT_FLAGS: 1730 if (d->d_un.d_val & DF_TEXTREL) { 1731#if defined(__LP64__) 1732 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", si->name); 1733 return false; 1734#else 1735 si->has_text_relocations = true; 1736#endif 1737 } 1738 if (d->d_un.d_val & DF_SYMBOLIC) { 1739 si->has_DT_SYMBOLIC = true; 1740 } 1741 break; 1742#if defined(__mips__) 1743 case DT_STRSZ: 1744 case DT_SYMENT: 1745 case DT_RELENT: 1746 break; 1747 case DT_MIPS_RLD_MAP: 1748 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB. 1749 { 1750 r_debug** dp = reinterpret_cast<r_debug**>(base + d->d_un.d_ptr); 1751 *dp = &_r_debug; 1752 } 1753 break; 1754 case DT_MIPS_RLD_VERSION: 1755 case DT_MIPS_FLAGS: 1756 case DT_MIPS_BASE_ADDRESS: 1757 case DT_MIPS_UNREFEXTNO: 1758 break; 1759 1760 case DT_MIPS_SYMTABNO: 1761 si->mips_symtabno = d->d_un.d_val; 1762 break; 1763 1764 case DT_MIPS_LOCAL_GOTNO: 1765 si->mips_local_gotno = d->d_un.d_val; 1766 break; 1767 1768 case DT_MIPS_GOTSYM: 1769 si->mips_gotsym = d->d_un.d_val; 1770 break; 1771#endif 1772 1773 default: 1774 DEBUG("Unused DT entry: type %p arg %p", 1775 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val)); 1776 break; 1777 } 1778 } 1779 1780 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p", 1781 reinterpret_cast<void*>(si->base), si->strtab, si->symtab); 1782 1783 // Sanity checks. 1784 if (relocating_linker && needed_count != 0) { 1785 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries"); 1786 return false; 1787 } 1788 if (si->nbucket == 0) { 1789 DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", si->name); 1790 return false; 1791 } 1792 if (si->strtab == 0) { 1793 DL_ERR("empty/missing DT_STRTAB in \"%s\"", si->name); 1794 return false; 1795 } 1796 if (si->symtab == 0) { 1797 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", si->name); 1798 return false; 1799 } 1800 1801 // If this is the main executable, then load all of the libraries from LD_PRELOAD now. 1802 if (si->flags & FLAG_EXE) { 1803 memset(gLdPreloads, 0, sizeof(gLdPreloads)); 1804 size_t preload_count = 0; 1805 for (size_t i = 0; gLdPreloadNames[i] != NULL; i++) { 1806 soinfo* lsi = find_library(gLdPreloadNames[i]); 1807 if (lsi != NULL) { 1808 gLdPreloads[preload_count++] = lsi; 1809 } else { 1810 // As with glibc, failure to load an LD_PRELOAD library is just a warning. 1811 DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s", 1812 gLdPreloadNames[i], si->name, linker_get_error_buffer()); 1813 } 1814 } 1815 } 1816 1817 soinfo** needed = reinterpret_cast<soinfo**>(alloca((1 + needed_count) * sizeof(soinfo*))); 1818 soinfo** pneeded = needed; 1819 1820 for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 1821 if (d->d_tag == DT_NEEDED) { 1822 const char* library_name = si->strtab + d->d_un.d_val; 1823 DEBUG("%s needs %s", si->name, library_name); 1824 soinfo* lsi = find_library(library_name); 1825 if (lsi == NULL) { 1826 strlcpy(tmp_err_buf, linker_get_error_buffer(), sizeof(tmp_err_buf)); 1827 DL_ERR("could not load library \"%s\" needed by \"%s\"; caused by %s", 1828 library_name, si->name, tmp_err_buf); 1829 return false; 1830 } 1831 *pneeded++ = lsi; 1832 } 1833 } 1834 *pneeded = NULL; 1835 1836#if !defined(__LP64__) 1837 if (si->has_text_relocations) { 1838 // Make segments writable to allow text relocations to work properly. We will later call 1839 // phdr_table_protect_segments() after all of them are applied and all constructors are run. 1840 DL_WARN("%s has text relocations. This is wasting memory and prevents " 1841 "security hardening. Please fix.", si->name); 1842 if (phdr_table_unprotect_segments(si->phdr, si->phnum, si->load_bias) < 0) { 1843 DL_ERR("can't unprotect loadable segments for \"%s\": %s", 1844 si->name, strerror(errno)); 1845 return false; 1846 } 1847 } 1848#endif 1849 1850#if defined(USE_RELA) 1851 if (si->plt_rela != NULL) { 1852 DEBUG("[ relocating %s plt ]\n", si->name); 1853 if (soinfo_relocate(si, si->plt_rela, si->plt_rela_count, needed)) { 1854 return false; 1855 } 1856 } 1857 if (si->rela != NULL) { 1858 DEBUG("[ relocating %s ]\n", si->name); 1859 if (soinfo_relocate(si, si->rela, si->rela_count, needed)) { 1860 return false; 1861 } 1862 } 1863#else 1864 if (si->plt_rel != NULL) { 1865 DEBUG("[ relocating %s plt ]", si->name); 1866 if (soinfo_relocate(si, si->plt_rel, si->plt_rel_count, needed)) { 1867 return false; 1868 } 1869 } 1870 if (si->rel != NULL) { 1871 DEBUG("[ relocating %s ]", si->name); 1872 if (soinfo_relocate(si, si->rel, si->rel_count, needed)) { 1873 return false; 1874 } 1875 } 1876#endif 1877 1878#if defined(__mips__) 1879 if (!mips_relocate_got(si, needed)) { 1880 return false; 1881 } 1882#endif 1883 1884 si->flags |= FLAG_LINKED; 1885 DEBUG("[ finished linking %s ]", si->name); 1886 1887#if !defined(__LP64__) 1888 if (si->has_text_relocations) { 1889 // All relocations are done, we can protect our segments back to read-only. 1890 if (phdr_table_protect_segments(si->phdr, si->phnum, si->load_bias) < 0) { 1891 DL_ERR("can't protect segments for \"%s\": %s", 1892 si->name, strerror(errno)); 1893 return false; 1894 } 1895 } 1896#endif 1897 1898 /* We can also turn on GNU RELRO protection */ 1899 if (phdr_table_protect_gnu_relro(si->phdr, si->phnum, si->load_bias) < 0) { 1900 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s", 1901 si->name, strerror(errno)); 1902 return false; 1903 } 1904 1905 notify_gdb_of_load(si); 1906 return true; 1907} 1908 1909/* 1910 * This function add vdso to internal dso list. 1911 * It helps to stack unwinding through signal handlers. 1912 * Also, it makes bionic more like glibc. 1913 */ 1914static void add_vdso(KernelArgumentBlock& args __unused) { 1915#if defined(AT_SYSINFO_EHDR) 1916 ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR)); 1917 if (ehdr_vdso == NULL) { 1918 return; 1919 } 1920 1921 soinfo* si = soinfo_alloc("[vdso]"); 1922 1923 si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff); 1924 si->phnum = ehdr_vdso->e_phnum; 1925 si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso); 1926 si->size = phdr_table_get_load_size(si->phdr, si->phnum); 1927 si->flags = 0; 1928 si->load_bias = get_elf_exec_load_bias(ehdr_vdso); 1929 1930 soinfo_link_image(si); 1931#endif 1932} 1933 1934/* 1935 * This code is called after the linker has linked itself and 1936 * fixed it's own GOT. It is safe to make references to externs 1937 * and other non-local data at this point. 1938 */ 1939static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) { 1940 /* NOTE: we store the args pointer on a special location 1941 * of the temporary TLS area in order to pass it to 1942 * the C Library's runtime initializer. 1943 * 1944 * The initializer must clear the slot and reset the TLS 1945 * to point to a different location to ensure that no other 1946 * shared library constructor can access it. 1947 */ 1948 __libc_init_tls(args); 1949 1950#if TIMING 1951 struct timeval t0, t1; 1952 gettimeofday(&t0, 0); 1953#endif 1954 1955 // Initialize environment functions, and get to the ELF aux vectors table. 1956 linker_env_init(args); 1957 1958 // If this is a setuid/setgid program, close the security hole described in 1959 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc 1960 if (get_AT_SECURE()) { 1961 nullify_closed_stdio(); 1962 } 1963 1964 debuggerd_init(); 1965 1966 // Get a few environment variables. 1967 const char* LD_DEBUG = linker_env_get("LD_DEBUG"); 1968 if (LD_DEBUG != NULL) { 1969 gLdDebugVerbosity = atoi(LD_DEBUG); 1970 } 1971 1972 // Normally, these are cleaned by linker_env_init, but the test 1973 // doesn't cost us anything. 1974 const char* ldpath_env = NULL; 1975 const char* ldpreload_env = NULL; 1976 if (!get_AT_SECURE()) { 1977 ldpath_env = linker_env_get("LD_LIBRARY_PATH"); 1978 ldpreload_env = linker_env_get("LD_PRELOAD"); 1979 } 1980 1981 INFO("[ android linker & debugger ]"); 1982 1983 soinfo* si = soinfo_alloc(args.argv[0]); 1984 if (si == NULL) { 1985 exit(EXIT_FAILURE); 1986 } 1987 1988 /* bootstrap the link map, the main exe always needs to be first */ 1989 si->flags |= FLAG_EXE; 1990 link_map* map = &(si->link_map_head); 1991 1992 map->l_addr = 0; 1993 map->l_name = args.argv[0]; 1994 map->l_prev = NULL; 1995 map->l_next = NULL; 1996 1997 _r_debug.r_map = map; 1998 r_debug_tail = map; 1999 2000 /* gdb expects the linker to be in the debug shared object list. 2001 * Without this, gdb has trouble locating the linker's ".text" 2002 * and ".plt" sections. Gdb could also potentially use this to 2003 * relocate the offset of our exported 'rtld_db_dlactivity' symbol. 2004 * Don't use soinfo_alloc(), because the linker shouldn't 2005 * be on the soinfo list. 2006 */ 2007 { 2008 static soinfo linker_soinfo; 2009#if defined(__LP64__) 2010 strlcpy(linker_soinfo.name, "/system/bin/linker64", sizeof(linker_soinfo.name)); 2011#else 2012 strlcpy(linker_soinfo.name, "/system/bin/linker", sizeof(linker_soinfo.name)); 2013#endif 2014 linker_soinfo.flags = 0; 2015 linker_soinfo.base = linker_base; 2016 2017 /* 2018 * Set the dynamic field in the link map otherwise gdb will complain with 2019 * the following: 2020 * warning: .dynamic section for "/system/bin/linker" is not at the 2021 * expected address (wrong library or version mismatch?) 2022 */ 2023 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base); 2024 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff); 2025 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base, 2026 &linker_soinfo.dynamic, NULL, NULL); 2027 insert_soinfo_into_debug_map(&linker_soinfo); 2028 } 2029 2030 // Extract information passed from the kernel. 2031 si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR)); 2032 si->phnum = args.getauxval(AT_PHNUM); 2033 si->entry = args.getauxval(AT_ENTRY); 2034 2035 /* Compute the value of si->base. We can't rely on the fact that 2036 * the first entry is the PHDR because this will not be true 2037 * for certain executables (e.g. some in the NDK unit test suite) 2038 */ 2039 si->base = 0; 2040 si->size = phdr_table_get_load_size(si->phdr, si->phnum); 2041 si->load_bias = 0; 2042 for (size_t i = 0; i < si->phnum; ++i) { 2043 if (si->phdr[i].p_type == PT_PHDR) { 2044 si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr; 2045 si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset; 2046 break; 2047 } 2048 } 2049 si->dynamic = NULL; 2050 si->ref_count = 1; 2051 2052 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid). 2053 parse_LD_LIBRARY_PATH(ldpath_env); 2054 parse_LD_PRELOAD(ldpreload_env); 2055 2056 somain = si; 2057 2058 if (!soinfo_link_image(si)) { 2059 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer()); 2060 exit(EXIT_FAILURE); 2061 } 2062 2063 add_vdso(args); 2064 2065 si->CallPreInitConstructors(); 2066 2067 for (size_t i = 0; gLdPreloads[i] != NULL; ++i) { 2068 gLdPreloads[i]->CallConstructors(); 2069 } 2070 2071 /* After the link_image, the si->load_bias is initialized. 2072 * For so lib, the map->l_addr will be updated in notify_gdb_of_load. 2073 * We need to update this value for so exe here. So Unwind_Backtrace 2074 * for some arch like x86 could work correctly within so exe. 2075 */ 2076 map->l_addr = si->load_bias; 2077 si->CallConstructors(); 2078 2079#if TIMING 2080 gettimeofday(&t1, NULL); 2081 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) ( 2082 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) - 2083 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec))); 2084#endif 2085#if STATS 2086 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0], 2087 linker_stats.count[kRelocAbsolute], 2088 linker_stats.count[kRelocRelative], 2089 linker_stats.count[kRelocCopy], 2090 linker_stats.count[kRelocSymbol]); 2091#endif 2092#if COUNT_PAGES 2093 { 2094 unsigned n; 2095 unsigned i; 2096 unsigned count = 0; 2097 for (n = 0; n < 4096; n++) { 2098 if (bitmask[n]) { 2099 unsigned x = bitmask[n]; 2100#if defined(__LP64__) 2101 for (i = 0; i < 32; i++) { 2102#else 2103 for (i = 0; i < 8; i++) { 2104#endif 2105 if (x & 1) { 2106 count++; 2107 } 2108 x >>= 1; 2109 } 2110 } 2111 } 2112 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4); 2113 } 2114#endif 2115 2116#if TIMING || STATS || COUNT_PAGES 2117 fflush(stdout); 2118#endif 2119 2120 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry)); 2121 return si->entry; 2122} 2123 2124/* Compute the load-bias of an existing executable. This shall only 2125 * be used to compute the load bias of an executable or shared library 2126 * that was loaded by the kernel itself. 2127 * 2128 * Input: 2129 * elf -> address of ELF header, assumed to be at the start of the file. 2130 * Return: 2131 * load bias, i.e. add the value of any p_vaddr in the file to get 2132 * the corresponding address in memory. 2133 */ 2134static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) { 2135 ElfW(Addr) offset = elf->e_phoff; 2136 const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset); 2137 const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum; 2138 2139 for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) { 2140 if (phdr->p_type == PT_LOAD) { 2141 return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr; 2142 } 2143 } 2144 return 0; 2145} 2146 2147/* 2148 * This is the entry point for the linker, called from begin.S. This 2149 * method is responsible for fixing the linker's own relocations, and 2150 * then calling __linker_init_post_relocation(). 2151 * 2152 * Because this method is called before the linker has fixed it's own 2153 * relocations, any attempt to reference an extern variable, extern 2154 * function, or other GOT reference will generate a segfault. 2155 */ 2156extern "C" ElfW(Addr) __linker_init(void* raw_args) { 2157 KernelArgumentBlock args(raw_args); 2158 2159 ElfW(Addr) linker_addr = args.getauxval(AT_BASE); 2160 ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr); 2161 ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff); 2162 2163 soinfo linker_so; 2164 memset(&linker_so, 0, sizeof(soinfo)); 2165 2166 strcpy(linker_so.name, "[dynamic linker]"); 2167 linker_so.base = linker_addr; 2168 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum); 2169 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr); 2170 linker_so.dynamic = NULL; 2171 linker_so.phdr = phdr; 2172 linker_so.phnum = elf_hdr->e_phnum; 2173 linker_so.flags |= FLAG_LINKER; 2174 2175 if (!soinfo_link_image(&linker_so)) { 2176 // It would be nice to print an error message, but if the linker 2177 // can't link itself, there's no guarantee that we'll be able to 2178 // call write() (because it involves a GOT reference). We may as 2179 // well try though... 2180 const char* msg = "CANNOT LINK EXECUTABLE: "; 2181 write(2, msg, strlen(msg)); 2182 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf)); 2183 write(2, "\n", 1); 2184 _exit(EXIT_FAILURE); 2185 } 2186 2187 // We have successfully fixed our own relocations. It's safe to run 2188 // the main part of the linker now. 2189 args.abort_message_ptr = &gAbortMessage; 2190 ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr); 2191 2192 set_soinfo_pool_protection(PROT_READ); 2193 2194 // Return the address that the calling assembly stub should jump to. 2195 return start_address; 2196} 2197