linker.cpp revision a4aafd156068ee174012f28cd894dbecf0e4ab90
1/* 2 * Copyright (C) 2008, 2009 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <dlfcn.h> 30#include <errno.h> 31#include <fcntl.h> 32#include <linux/auxvec.h> 33#include <pthread.h> 34#include <stdint.h> 35#include <stdio.h> 36#include <stdlib.h> 37#include <string.h> 38#include <sys/atomics.h> 39#include <sys/mman.h> 40#include <sys/stat.h> 41#include <unistd.h> 42 43// Private C library headers. 44#include "private/bionic_tls.h" 45#include "private/KernelArgumentBlock.h" 46#include "private/ScopedPthreadMutexLocker.h" 47 48#include "linker.h" 49#include "linker_debug.h" 50#include "linker_environ.h" 51#include "linker_phdr.h" 52 53/* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<< 54 * 55 * Do NOT use malloc() and friends or pthread_*() code here. 56 * Don't use printf() either; it's caused mysterious memory 57 * corruption in the past. 58 * The linker runs before we bring up libc and it's easiest 59 * to make sure it does not depend on any complex libc features 60 * 61 * open issues / todo: 62 * 63 * - are we doing everything we should for ARM_COPY relocations? 64 * - cleaner error reporting 65 * - after linking, set as much stuff as possible to READONLY 66 * and NOEXEC 67 */ 68 69static bool soinfo_link_image(soinfo* si); 70static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf); 71 72// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous 73// maps, each a single page in size. The pages are broken up into as many struct soinfo 74// objects as will fit, and they're all threaded together on a free list. 75#define SOINFO_PER_POOL ((PAGE_SIZE - sizeof(soinfo_pool_t*)) / sizeof(soinfo)) 76struct soinfo_pool_t { 77 soinfo_pool_t* next; 78 soinfo info[SOINFO_PER_POOL]; 79}; 80static struct soinfo_pool_t* gSoInfoPools = NULL; 81static soinfo* gSoInfoFreeList = NULL; 82 83static soinfo* solist = &libdl_info; 84static soinfo* sonext = &libdl_info; 85static soinfo* somain; /* main process, always the one after libdl_info */ 86 87static const char* const gDefaultLdPaths[] = { 88#if defined(__LP64__) 89 "/vendor/lib64", 90 "/system/lib64", 91#else 92 "/vendor/lib", 93 "/system/lib", 94#endif 95 NULL 96}; 97 98#define LDPATH_BUFSIZE (LDPATH_MAX*64) 99#define LDPATH_MAX 8 100 101#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64) 102#define LDPRELOAD_MAX 8 103 104static char gLdPathsBuffer[LDPATH_BUFSIZE]; 105static const char* gLdPaths[LDPATH_MAX + 1]; 106 107static char gLdPreloadsBuffer[LDPRELOAD_BUFSIZE]; 108static const char* gLdPreloadNames[LDPRELOAD_MAX + 1]; 109 110static soinfo* gLdPreloads[LDPRELOAD_MAX + 1]; 111 112__LIBC_HIDDEN__ int gLdDebugVerbosity; 113 114__LIBC_HIDDEN__ abort_msg_t* gAbortMessage = NULL; // For debuggerd. 115 116enum RelocationKind { 117 kRelocAbsolute = 0, 118 kRelocRelative, 119 kRelocCopy, 120 kRelocSymbol, 121 kRelocMax 122}; 123 124#if STATS 125struct linker_stats_t { 126 int count[kRelocMax]; 127}; 128 129static linker_stats_t linker_stats; 130 131static void count_relocation(RelocationKind kind) { 132 ++linker_stats.count[kind]; 133} 134#else 135static void count_relocation(RelocationKind) { 136} 137#endif 138 139#if COUNT_PAGES 140static unsigned bitmask[4096]; 141#if defined(__LP64__) 142#define MARK(offset) \ 143 do { \ 144 if ((((offset) >> 12) >> 5) < 4096) \ 145 bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \ 146 } while(0) 147#else 148#define MARK(offset) \ 149 do { \ 150 bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \ 151 } while(0) 152#endif 153#else 154#define MARK(x) do {} while (0) 155#endif 156 157// You shouldn't try to call memory-allocating functions in the dynamic linker. 158// Guard against the most obvious ones. 159#define DISALLOW_ALLOCATION(return_type, name, ...) \ 160 return_type name __VA_ARGS__ \ 161 { \ 162 const char* msg = "ERROR: " #name " called from the dynamic linker!\n"; \ 163 __libc_format_log(ANDROID_LOG_FATAL, "linker", "%s", msg); \ 164 write(2, msg, strlen(msg)); \ 165 abort(); \ 166 } 167#define UNUSED __attribute__((unused)) 168DISALLOW_ALLOCATION(void*, malloc, (size_t u UNUSED)); 169DISALLOW_ALLOCATION(void, free, (void* u UNUSED)); 170DISALLOW_ALLOCATION(void*, realloc, (void* u1 UNUSED, size_t u2 UNUSED)); 171DISALLOW_ALLOCATION(void*, calloc, (size_t u1 UNUSED, size_t u2 UNUSED)); 172 173static char tmp_err_buf[768]; 174static char __linker_dl_err_buf[768]; 175 176char* linker_get_error_buffer() { 177 return &__linker_dl_err_buf[0]; 178} 179 180size_t linker_get_error_buffer_size() { 181 return sizeof(__linker_dl_err_buf); 182} 183 184/* 185 * This function is an empty stub where GDB locates a breakpoint to get notified 186 * about linker activity. 187 */ 188extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity(); 189 190static r_debug _r_debug = {1, NULL, &rtld_db_dlactivity, RT_CONSISTENT, 0}; 191static link_map_t* r_debug_tail = 0; 192 193static pthread_mutex_t gDebugMutex = PTHREAD_MUTEX_INITIALIZER; 194 195static void insert_soinfo_into_debug_map(soinfo * info) { 196 // Copy the necessary fields into the debug structure. 197 link_map_t* map = &(info->link_map); 198 map->l_addr = info->load_bias; 199 map->l_name = (char*) info->name; 200 map->l_ld = (uintptr_t)info->dynamic; 201 202 /* Stick the new library at the end of the list. 203 * gdb tends to care more about libc than it does 204 * about leaf libraries, and ordering it this way 205 * reduces the back-and-forth over the wire. 206 */ 207 if (r_debug_tail) { 208 r_debug_tail->l_next = map; 209 map->l_prev = r_debug_tail; 210 map->l_next = 0; 211 } else { 212 _r_debug.r_map = map; 213 map->l_prev = 0; 214 map->l_next = 0; 215 } 216 r_debug_tail = map; 217} 218 219static void remove_soinfo_from_debug_map(soinfo* info) { 220 link_map_t* map = &(info->link_map); 221 222 if (r_debug_tail == map) { 223 r_debug_tail = map->l_prev; 224 } 225 226 if (map->l_prev) { 227 map->l_prev->l_next = map->l_next; 228 } 229 if (map->l_next) { 230 map->l_next->l_prev = map->l_prev; 231 } 232} 233 234static void notify_gdb_of_load(soinfo* info) { 235 if (info->flags & FLAG_EXE) { 236 // GDB already knows about the main executable 237 return; 238 } 239 240 ScopedPthreadMutexLocker locker(&gDebugMutex); 241 242 _r_debug.r_state = RT_ADD; 243 rtld_db_dlactivity(); 244 245 insert_soinfo_into_debug_map(info); 246 247 _r_debug.r_state = RT_CONSISTENT; 248 rtld_db_dlactivity(); 249} 250 251static void notify_gdb_of_unload(soinfo* info) { 252 if (info->flags & FLAG_EXE) { 253 // GDB already knows about the main executable 254 return; 255 } 256 257 ScopedPthreadMutexLocker locker(&gDebugMutex); 258 259 _r_debug.r_state = RT_DELETE; 260 rtld_db_dlactivity(); 261 262 remove_soinfo_from_debug_map(info); 263 264 _r_debug.r_state = RT_CONSISTENT; 265 rtld_db_dlactivity(); 266} 267 268void notify_gdb_of_libraries() { 269 _r_debug.r_state = RT_ADD; 270 rtld_db_dlactivity(); 271 _r_debug.r_state = RT_CONSISTENT; 272 rtld_db_dlactivity(); 273} 274 275static bool ensure_free_list_non_empty() { 276 if (gSoInfoFreeList != NULL) { 277 return true; 278 } 279 280 // Allocate a new pool. 281 soinfo_pool_t* pool = reinterpret_cast<soinfo_pool_t*>(mmap(NULL, sizeof(*pool), 282 PROT_READ|PROT_WRITE, 283 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0)); 284 if (pool == MAP_FAILED) { 285 return false; 286 } 287 288 // Add the pool to our list of pools. 289 pool->next = gSoInfoPools; 290 gSoInfoPools = pool; 291 292 // Chain the entries in the new pool onto the free list. 293 gSoInfoFreeList = &pool->info[0]; 294 soinfo* next = NULL; 295 for (int i = SOINFO_PER_POOL - 1; i >= 0; --i) { 296 pool->info[i].next = next; 297 next = &pool->info[i]; 298 } 299 300 return true; 301} 302 303static void set_soinfo_pool_protection(int protection) { 304 for (soinfo_pool_t* p = gSoInfoPools; p != NULL; p = p->next) { 305 if (mprotect(p, sizeof(*p), protection) == -1) { 306 abort(); // Can't happen. 307 } 308 } 309} 310 311static soinfo* soinfo_alloc(const char* name) { 312 if (strlen(name) >= SOINFO_NAME_LEN) { 313 DL_ERR("library name \"%s\" too long", name); 314 return NULL; 315 } 316 317 if (!ensure_free_list_non_empty()) { 318 DL_ERR("out of memory when loading \"%s\"", name); 319 return NULL; 320 } 321 322 // Take the head element off the free list. 323 soinfo* si = gSoInfoFreeList; 324 gSoInfoFreeList = gSoInfoFreeList->next; 325 326 // Initialize the new element. 327 memset(si, 0, sizeof(soinfo)); 328 strlcpy(si->name, name, sizeof(si->name)); 329 sonext->next = si; 330 sonext = si; 331 332 TRACE("name %s: allocated soinfo @ %p", name, si); 333 return si; 334} 335 336static void soinfo_free(soinfo* si) 337{ 338 if (si == NULL) { 339 return; 340 } 341 342 soinfo *prev = NULL, *trav; 343 344 TRACE("name %s: freeing soinfo @ %p", si->name, si); 345 346 for (trav = solist; trav != NULL; trav = trav->next) { 347 if (trav == si) 348 break; 349 prev = trav; 350 } 351 if (trav == NULL) { 352 /* si was not in solist */ 353 DL_ERR("name \"%s\" is not in solist!", si->name); 354 return; 355 } 356 357 /* prev will never be NULL, because the first entry in solist is 358 always the static libdl_info. 359 */ 360 prev->next = si->next; 361 if (si == sonext) { 362 sonext = prev; 363 } 364 si->next = gSoInfoFreeList; 365 gSoInfoFreeList = si; 366} 367 368 369static void parse_path(const char* path, const char* delimiters, 370 const char** array, char* buf, size_t buf_size, size_t max_count) { 371 if (path == NULL) { 372 return; 373 } 374 375 size_t len = strlcpy(buf, path, buf_size); 376 377 size_t i = 0; 378 char* buf_p = buf; 379 while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) { 380 if (*array[i] != '\0') { 381 ++i; 382 } 383 } 384 385 // Forget the last path if we had to truncate; this occurs if the 2nd to 386 // last char isn't '\0' (i.e. wasn't originally a delimiter). 387 if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') { 388 array[i - 1] = NULL; 389 } else { 390 array[i] = NULL; 391 } 392} 393 394static void parse_LD_LIBRARY_PATH(const char* path) { 395 parse_path(path, ":", gLdPaths, 396 gLdPathsBuffer, sizeof(gLdPathsBuffer), LDPATH_MAX); 397} 398 399static void parse_LD_PRELOAD(const char* path) { 400 // We have historically supported ':' as well as ' ' in LD_PRELOAD. 401 parse_path(path, " :", gLdPreloadNames, 402 gLdPreloadsBuffer, sizeof(gLdPreloadsBuffer), LDPRELOAD_MAX); 403} 404 405#if defined(__arm__) 406 407/* For a given PC, find the .so that it belongs to. 408 * Returns the base address of the .ARM.exidx section 409 * for that .so, and the number of 8-byte entries 410 * in that section (via *pcount). 411 * 412 * Intended to be called by libc's __gnu_Unwind_Find_exidx(). 413 * 414 * This function is exposed via dlfcn.cpp and libdl.so. 415 */ 416_Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int *pcount) 417{ 418 soinfo *si; 419 unsigned addr = (unsigned)pc; 420 421 for (si = solist; si != 0; si = si->next) { 422 if ((addr >= si->base) && (addr < (si->base + si->size))) { 423 *pcount = si->ARM_exidx_count; 424 return (_Unwind_Ptr)si->ARM_exidx; 425 } 426 } 427 *pcount = 0; 428 return NULL; 429} 430 431#endif 432 433/* Here, we only have to provide a callback to iterate across all the 434 * loaded libraries. gcc_eh does the rest. */ 435int 436dl_iterate_phdr(int (*cb)(dl_phdr_info *info, size_t size, void *data), 437 void *data) 438{ 439 int rv = 0; 440 for (soinfo* si = solist; si != NULL; si = si->next) { 441 dl_phdr_info dl_info; 442 dl_info.dlpi_addr = si->link_map.l_addr; 443 dl_info.dlpi_name = si->link_map.l_name; 444 dl_info.dlpi_phdr = si->phdr; 445 dl_info.dlpi_phnum = si->phnum; 446 rv = cb(&dl_info, sizeof(dl_phdr_info), data); 447 if (rv != 0) { 448 break; 449 } 450 } 451 return rv; 452} 453 454static Elf_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) { 455 Elf_Sym* symtab = si->symtab; 456 const char* strtab = si->strtab; 457 458 TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd", 459 name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket); 460 461 for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) { 462 Elf_Sym* s = symtab + n; 463 if (strcmp(strtab + s->st_name, name)) continue; 464 465 /* only concern ourselves with global and weak symbol definitions */ 466 switch (ELF_ST_BIND(s->st_info)) { 467 case STB_GLOBAL: 468 case STB_WEAK: 469 if (s->st_shndx == SHN_UNDEF) { 470 continue; 471 } 472 473 TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd", 474 name, si->name, reinterpret_cast<void*>(s->st_value), 475 static_cast<size_t>(s->st_size)); 476 return s; 477 } 478 } 479 480 return NULL; 481} 482 483static unsigned elfhash(const char* _name) { 484 const unsigned char* name = (const unsigned char*) _name; 485 unsigned h = 0, g; 486 487 while(*name) { 488 h = (h << 4) + *name++; 489 g = h & 0xf0000000; 490 h ^= g; 491 h ^= g >> 24; 492 } 493 return h; 494} 495 496static Elf_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) { 497 unsigned elf_hash = elfhash(name); 498 Elf_Sym* s = NULL; 499 500 if (si != NULL && somain != NULL) { 501 502 /* 503 * Local scope is executable scope. Just start looking into it right away 504 * for the shortcut. 505 */ 506 507 if (si == somain) { 508 s = soinfo_elf_lookup(si, elf_hash, name); 509 if (s != NULL) { 510 *lsi = si; 511 goto done; 512 } 513 } else { 514 /* Order of symbol lookup is controlled by DT_SYMBOLIC flag */ 515 516 /* 517 * If this object was built with symbolic relocations disabled, the 518 * first place to look to resolve external references is the main 519 * executable. 520 */ 521 522 if (!si->has_DT_SYMBOLIC) { 523 DEBUG("%s: looking up %s in executable %s", 524 si->name, name, somain->name); 525 s = soinfo_elf_lookup(somain, elf_hash, name); 526 if (s != NULL) { 527 *lsi = somain; 528 goto done; 529 } 530 } 531 532 /* Look for symbols in the local scope (the object who is 533 * searching). This happens with C++ templates on x86 for some 534 * reason. 535 * 536 * Notes on weak symbols: 537 * The ELF specs are ambiguous about treatment of weak definitions in 538 * dynamic linking. Some systems return the first definition found 539 * and some the first non-weak definition. This is system dependent. 540 * Here we return the first definition found for simplicity. */ 541 542 s = soinfo_elf_lookup(si, elf_hash, name); 543 if (s != NULL) { 544 *lsi = si; 545 goto done; 546 } 547 548 /* 549 * If this object was built with -Bsymbolic and symbol is not found 550 * in the local scope, try to find the symbol in the main executable. 551 */ 552 553 if (si->has_DT_SYMBOLIC) { 554 DEBUG("%s: looking up %s in executable %s after local scope", 555 si->name, name, somain->name); 556 s = soinfo_elf_lookup(somain, elf_hash, name); 557 if (s != NULL) { 558 *lsi = somain; 559 goto done; 560 } 561 } 562 } 563 } 564 565 /* Next, look for it in the preloads list */ 566 for (int i = 0; gLdPreloads[i] != NULL; i++) { 567 s = soinfo_elf_lookup(gLdPreloads[i], elf_hash, name); 568 if (s != NULL) { 569 *lsi = gLdPreloads[i]; 570 goto done; 571 } 572 } 573 574 for (int i = 0; needed[i] != NULL; i++) { 575 DEBUG("%s: looking up %s in %s", 576 si->name, name, needed[i]->name); 577 s = soinfo_elf_lookup(needed[i], elf_hash, name); 578 if (s != NULL) { 579 *lsi = needed[i]; 580 goto done; 581 } 582 } 583 584done: 585 if (s != NULL) { 586 TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, " 587 "found in %s, base = %p, load bias = %p", 588 si->name, name, reinterpret_cast<void*>(s->st_value), 589 (*lsi)->name, reinterpret_cast<void*>((*lsi)->base), 590 reinterpret_cast<void*>((*lsi)->load_bias)); 591 return s; 592 } 593 594 return NULL; 595} 596 597/* This is used by dlsym(3). It performs symbol lookup only within the 598 specified soinfo object and not in any of its dependencies. 599 600 TODO: Only looking in the specified soinfo seems wrong. dlsym(3) says 601 that it should do a breadth first search through the dependency 602 tree. This agrees with the ELF spec (aka System V Application 603 Binary Interface) where in Chapter 5 it discuss resolving "Shared 604 Object Dependencies" in breadth first search order. 605 */ 606Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name) { 607 return soinfo_elf_lookup(si, elfhash(name), name); 608} 609 610/* This is used by dlsym(3) to performs a global symbol lookup. If the 611 start value is null (for RTLD_DEFAULT), the search starts at the 612 beginning of the global solist. Otherwise the search starts at the 613 specified soinfo (for RTLD_NEXT). 614 */ 615Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) { 616 unsigned elf_hash = elfhash(name); 617 618 if (start == NULL) { 619 start = solist; 620 } 621 622 Elf_Sym* s = NULL; 623 for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) { 624 s = soinfo_elf_lookup(si, elf_hash, name); 625 if (s != NULL) { 626 *found = si; 627 break; 628 } 629 } 630 631 if (s != NULL) { 632 TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p", 633 name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base)); 634 } 635 636 return s; 637} 638 639soinfo* find_containing_library(const void* p) { 640 Elf_Addr address = reinterpret_cast<Elf_Addr>(p); 641 for (soinfo* si = solist; si != NULL; si = si->next) { 642 if (address >= si->base && address - si->base < si->size) { 643 return si; 644 } 645 } 646 return NULL; 647} 648 649Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr) { 650 Elf_Addr soaddr = reinterpret_cast<Elf_Addr>(addr) - si->base; 651 652 // Search the library's symbol table for any defined symbol which 653 // contains this address. 654 for (size_t i = 0; i < si->nchain; ++i) { 655 Elf_Sym* sym = &si->symtab[i]; 656 if (sym->st_shndx != SHN_UNDEF && 657 soaddr >= sym->st_value && 658 soaddr < sym->st_value + sym->st_size) { 659 return sym; 660 } 661 } 662 663 return NULL; 664} 665 666#if 0 667static void dump(soinfo* si) 668{ 669 Elf_Sym* s = si->symtab; 670 for (unsigned n = 0; n < si->nchain; n++) { 671 TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s, 672 s->st_info, s->st_shndx, s->st_value, s->st_size, 673 si->strtab + s->st_name); 674 s++; 675 } 676} 677#endif 678 679static int open_library_on_path(const char* name, const char* const paths[]) { 680 char buf[512]; 681 for (size_t i = 0; paths[i] != NULL; ++i) { 682 int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name); 683 if (n < 0 || n >= static_cast<int>(sizeof(buf))) { 684 PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name); 685 continue; 686 } 687 int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC)); 688 if (fd != -1) { 689 return fd; 690 } 691 } 692 return -1; 693} 694 695static int open_library(const char* name) { 696 TRACE("[ opening %s ]", name); 697 698 // If the name contains a slash, we should attempt to open it directly and not search the paths. 699 if (strchr(name, '/') != NULL) { 700 int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC)); 701 if (fd != -1) { 702 return fd; 703 } 704 // ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now. 705 } 706 707 // Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths. 708 int fd = open_library_on_path(name, gLdPaths); 709 if (fd == -1) { 710 fd = open_library_on_path(name, gDefaultLdPaths); 711 } 712 return fd; 713} 714 715static soinfo* load_library(const char* name) { 716 // Open the file. 717 int fd = open_library(name); 718 if (fd == -1) { 719 DL_ERR("library \"%s\" not found", name); 720 return NULL; 721 } 722 723 // Read the ELF header and load the segments. 724 ElfReader elf_reader(name, fd); 725 if (!elf_reader.Load()) { 726 return NULL; 727 } 728 729 const char* bname = strrchr(name, '/'); 730 soinfo* si = soinfo_alloc(bname ? bname + 1 : name); 731 if (si == NULL) { 732 return NULL; 733 } 734 si->base = elf_reader.load_start(); 735 si->size = elf_reader.load_size(); 736 si->load_bias = elf_reader.load_bias(); 737 si->flags = 0; 738 si->entry = 0; 739 si->dynamic = NULL; 740 si->phnum = elf_reader.phdr_count(); 741 si->phdr = elf_reader.loaded_phdr(); 742 return si; 743} 744 745static soinfo *find_loaded_library(const char *name) 746{ 747 soinfo *si; 748 const char *bname; 749 750 // TODO: don't use basename only for determining libraries 751 // http://code.google.com/p/android/issues/detail?id=6670 752 753 bname = strrchr(name, '/'); 754 bname = bname ? bname + 1 : name; 755 756 for (si = solist; si != NULL; si = si->next) { 757 if (!strcmp(bname, si->name)) { 758 return si; 759 } 760 } 761 return NULL; 762} 763 764static soinfo* find_library_internal(const char* name) { 765 if (name == NULL) { 766 return somain; 767 } 768 769 soinfo* si = find_loaded_library(name); 770 if (si != NULL) { 771 if (si->flags & FLAG_LINKED) { 772 return si; 773 } 774 DL_ERR("OOPS: recursive link to \"%s\"", si->name); 775 return NULL; 776 } 777 778 TRACE("[ '%s' has not been loaded yet. Locating...]", name); 779 si = load_library(name); 780 if (si == NULL) { 781 return NULL; 782 } 783 784 // At this point we know that whatever is loaded @ base is a valid ELF 785 // shared library whose segments are properly mapped in. 786 TRACE("[ init_library base=%p sz=0x%08x name='%s' ]", 787 reinterpret_cast<void*>(si->base), si->size, si->name); 788 789 if (!soinfo_link_image(si)) { 790 munmap(reinterpret_cast<void*>(si->base), si->size); 791 soinfo_free(si); 792 return NULL; 793 } 794 795 return si; 796} 797 798static soinfo* find_library(const char* name) { 799 soinfo* si = find_library_internal(name); 800 if (si != NULL) { 801 si->ref_count++; 802 } 803 return si; 804} 805 806static int soinfo_unload(soinfo* si) { 807 if (si->ref_count == 1) { 808 TRACE("unloading '%s'", si->name); 809 si->CallDestructors(); 810 811 for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 812 if (d->d_tag == DT_NEEDED) { 813 const char* library_name = si->strtab + d->d_un.d_val; 814 TRACE("%s needs to unload %s", si->name, library_name); 815 soinfo_unload(find_loaded_library(library_name)); 816 } 817 } 818 819 munmap(reinterpret_cast<void*>(si->base), si->size); 820 notify_gdb_of_unload(si); 821 soinfo_free(si); 822 si->ref_count = 0; 823 } else { 824 si->ref_count--; 825 TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count); 826 } 827 return 0; 828} 829 830void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) { 831 snprintf(buffer, buffer_size, "%s:%s", gDefaultLdPaths[0], gDefaultLdPaths[1]); 832} 833 834void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) { 835 if (!get_AT_SECURE()) { 836 parse_LD_LIBRARY_PATH(ld_library_path); 837 } 838} 839 840soinfo* do_dlopen(const char* name, int flags) { 841 if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL)) != 0) { 842 DL_ERR("invalid flags to dlopen: %x", flags); 843 return NULL; 844 } 845 set_soinfo_pool_protection(PROT_READ | PROT_WRITE); 846 soinfo* si = find_library(name); 847 if (si != NULL) { 848 si->CallConstructors(); 849 } 850 set_soinfo_pool_protection(PROT_READ); 851 return si; 852} 853 854int do_dlclose(soinfo* si) { 855 set_soinfo_pool_protection(PROT_READ | PROT_WRITE); 856 int result = soinfo_unload(si); 857 set_soinfo_pool_protection(PROT_READ); 858 return result; 859} 860 861#if defined(USE_RELA) 862static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo* needed[]) { 863 Elf_Sym* symtab = si->symtab; 864 const char* strtab = si->strtab; 865 Elf_Sym* s; 866 Elf_Rela* start = rela; 867 soinfo* lsi; 868 869 for (size_t idx = 0; idx < count; ++idx, ++rela) { 870 unsigned type = ELF_R_TYPE(rela->r_info); 871 unsigned sym = ELF_R_SYM(rela->r_info); 872 Elf_Addr reloc = static_cast<Elf_Addr>(rela->r_offset + si->load_bias); 873 Elf_Addr sym_addr = 0; 874 char* sym_name = NULL; 875 876 DEBUG("Processing '%s' relocation at index %zd", si->name, idx); 877 if (type == 0) { // R_*_NONE 878 continue; 879 } 880 if (sym != 0) { 881 sym_name = (char *)(strtab + symtab[sym].st_name); 882 s = soinfo_do_lookup(si, sym_name, &lsi, needed); 883 if (s == NULL) { 884 // We only allow an undefined symbol if this is a weak reference... 885 s = &symtab[sym]; 886 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 887 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name); 888 return -1; 889 } 890 891 /* IHI0044C AAELF 4.5.1.1: 892 893 Libraries are not searched to resolve weak references. 894 It is not an error for a weak reference to remain unsatisfied. 895 896 During linking, the value of an undefined weak reference is: 897 - Zero if the relocation type is absolute 898 - The address of the place if the relocation is pc-relative 899 - The address of nominal base address if the relocation 900 type is base-relative. 901 */ 902 903 switch (type) { 904#if defined(__aarch64__) 905 case R_AARCH64_JUMP_SLOT: 906 case R_AARCH64_GLOB_DAT: 907 case R_AARCH64_ABS64: 908 case R_AARCH64_ABS32: 909 case R_AARCH64_ABS16: 910 case R_AARCH64_RELATIVE: 911 /* 912 * The sym_addr was initialized to be zero above, or the relocation 913 * code below does not care about value of sym_addr. 914 * No need to do anything. 915 */ 916 break; 917#elif defined(__x86_64__) 918 case R_X86_64_JUMP_SLOT: 919 case R_X86_64_GLOB_DAT: 920 case R_X86_64_32: 921 case R_X86_64_RELATIVE: 922 // No need to do anything. 923 break; 924 case R_X86_64_PC32: 925 sym_addr = reloc; 926 break; 927#endif 928 default: 929 DL_ERR("unknown weak reloc type %d @ %p (%d)", type, rela, (int) (rela - start)); 930 return -1; 931 } 932 } else { 933 // We got a definition. 934 sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias); 935 } 936 count_relocation(kRelocSymbol); 937 } else { 938 s = NULL; 939 } 940 941 switch (type) { 942#if defined(__aarch64__) 943 case R_AARCH64_JUMP_SLOT: 944 count_relocation(kRelocAbsolute); 945 MARK(rela->r_offset); 946 TRACE_TYPE(RELO, "RELO JMP_SLOT %16lx <- %16lx %s\n", 947 reloc, 948 (sym_addr + rela->r_addend), 949 sym_name); 950 *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend); 951 break; 952 case R_AARCH64_GLOB_DAT: 953 count_relocation(kRelocAbsolute); 954 MARK(rela->r_offset); 955 TRACE_TYPE(RELO, "RELO GLOB_DAT %16lx <- %16lx %s\n", 956 reloc, 957 (sym_addr + rela->r_addend), 958 sym_name); 959 *reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend); 960 break; 961 case R_AARCH64_ABS64: 962 count_relocation(kRelocAbsolute); 963 MARK(rela->r_offset); 964 TRACE_TYPE(RELO, "RELO ABS64 %16lx <- %16lx %s\n", 965 reloc, 966 (sym_addr + rela->r_addend), 967 sym_name); 968 *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend); 969 break; 970 case R_AARCH64_ABS32: 971 count_relocation(kRelocAbsolute); 972 MARK(rela->r_offset); 973 TRACE_TYPE(RELO, "RELO ABS32 %16lx <- %16lx %s\n", 974 reloc, 975 (sym_addr + rela->r_addend), 976 sym_name); 977 if ((static_cast<Elf_Addr>(INT32_MIN) <= 978 (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) && 979 ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <= 980 static_cast<Elf_Addr>(UINT32_MAX))) { 981 *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend); 982 } else { 983 DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx", 984 (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)), 985 static_cast<Elf_Addr>(INT32_MIN), 986 static_cast<Elf_Addr>(UINT32_MAX)); 987 return -1; 988 } 989 break; 990 case R_AARCH64_ABS16: 991 count_relocation(kRelocAbsolute); 992 MARK(rela->r_offset); 993 TRACE_TYPE(RELO, "RELO ABS16 %16lx <- %16lx %s\n", 994 reloc, 995 (sym_addr + rela->r_addend), 996 sym_name); 997 if ((static_cast<Elf_Addr>(INT16_MIN) <= 998 (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) && 999 ((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <= 1000 static_cast<Elf_Addr>(UINT16_MAX))) { 1001 *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend); 1002 } else { 1003 DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx", 1004 (*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)), 1005 static_cast<Elf_Addr>(INT16_MIN), 1006 static_cast<Elf_Addr>(UINT16_MAX)); 1007 return -1; 1008 } 1009 break; 1010 case R_AARCH64_PREL64: 1011 count_relocation(kRelocRelative); 1012 MARK(rela->r_offset); 1013 TRACE_TYPE(RELO, "RELO REL64 %16lx <- %16lx - %16lx %s\n", 1014 reloc, 1015 (sym_addr + rela->r_addend), 1016 rela->r_offset, 1017 sym_name); 1018 *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset; 1019 break; 1020 case R_AARCH64_PREL32: 1021 count_relocation(kRelocRelative); 1022 MARK(rela->r_offset); 1023 TRACE_TYPE(RELO, "RELO REL32 %16lx <- %16lx - %16lx %s\n", 1024 reloc, 1025 (sym_addr + rela->r_addend), 1026 rela->r_offset, sym_name); 1027 if ((static_cast<Elf_Addr>(INT32_MIN) <= 1028 (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) && 1029 ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= 1030 static_cast<Elf_Addr>(UINT32_MAX))) { 1031 *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset); 1032 } else { 1033 DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx", 1034 (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)), 1035 static_cast<Elf_Addr>(INT32_MIN), 1036 static_cast<Elf_Addr>(UINT32_MAX)); 1037 return -1; 1038 } 1039 break; 1040 case R_AARCH64_PREL16: 1041 count_relocation(kRelocRelative); 1042 MARK(rela->r_offset); 1043 TRACE_TYPE(RELO, "RELO REL16 %16lx <- %16lx - %16lx %s\n", 1044 reloc, 1045 (sym_addr + rela->r_addend), 1046 rela->r_offset, sym_name); 1047 if ((static_cast<Elf_Addr>(INT16_MIN) <= 1048 (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) && 1049 ((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= 1050 static_cast<Elf_Addr>(UINT16_MAX))) { 1051 *reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset); 1052 } else { 1053 DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx", 1054 (*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)), 1055 static_cast<Elf_Addr>(INT16_MIN), 1056 static_cast<Elf_Addr>(UINT16_MAX)); 1057 return -1; 1058 } 1059 break; 1060 1061 case R_AARCH64_RELATIVE: 1062 count_relocation(kRelocRelative); 1063 MARK(rela->r_offset); 1064 if (sym) { 1065 DL_ERR("odd RELATIVE form..."); 1066 return -1; 1067 } 1068 TRACE_TYPE(RELO, "RELO RELATIVE %16lx <- %16lx\n", 1069 reloc, 1070 (si->base + rela->r_addend)); 1071 *reinterpret_cast<Elf_Addr*>(reloc) = (si->base + rela->r_addend); 1072 break; 1073 1074 case R_AARCH64_COPY: 1075 if ((si->flags & FLAG_EXE) == 0) { 1076 /* 1077 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf 1078 * 1079 * Section 4.7.1.10 "Dynamic relocations" 1080 * R_AARCH64_COPY may only appear in executable objects where e_type is 1081 * set to ET_EXEC. 1082 * 1083 * FLAG_EXE is set for both ET_DYN and ET_EXEC executables. 1084 * We should explicitly disallow ET_DYN executables from having 1085 * R_AARCH64_COPY relocations. 1086 */ 1087 DL_ERR("%s R_AARCH64_COPY relocations only supported for ET_EXEC", si->name); 1088 return -1; 1089 } 1090 count_relocation(kRelocCopy); 1091 MARK(rela->r_offset); 1092 TRACE_TYPE(RELO, "RELO COPY %16lx <- %ld @ %16lx %s\n", 1093 reloc, 1094 s->st_size, 1095 (sym_addr + rela->r_addend), 1096 sym_name); 1097 if (reloc == (sym_addr + rela->r_addend)) { 1098 Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed); 1099 1100 if (src == NULL) { 1101 DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name); 1102 return -1; 1103 } 1104 if (lsi->has_DT_SYMBOLIC) { 1105 DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared " 1106 "library %s (built with -Bsymbolic?)", si->name, lsi->name); 1107 return -1; 1108 } 1109 if (s->st_size < src->st_size) { 1110 DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%ld < %ld)", 1111 si->name, s->st_size, src->st_size); 1112 return -1; 1113 } 1114 memcpy((void*)reloc, (void*)(src->st_value + lsi->load_bias), src->st_size); 1115 } else { 1116 DL_ERR("%s R_AARCH64_COPY relocation target cannot be resolved", si->name); 1117 return -1; 1118 } 1119 break; 1120 case R_AARCH64_TLS_TPREL64: 1121 TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16lx <- %16lx - %16lx\n", 1122 reloc, 1123 (sym_addr + rela->r_addend), 1124 rela->r_offset); 1125 break; 1126 case R_AARCH64_TLS_DTPREL32: 1127 TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16lx <- %16lx - %16lx\n", 1128 reloc, 1129 (sym_addr + rela->r_addend), 1130 rela->r_offset); 1131 break; 1132#elif defined(__x86_64__) 1133 case R_X86_64_JUMP_SLOT: 1134 count_relocation(kRelocAbsolute); 1135 MARK(rela->r_offset); 1136 TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc), 1137 static_cast<size_t>(sym_addr + rela->r_addend), sym_name); 1138 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend; 1139 break; 1140 case R_X86_64_GLOB_DAT: 1141 count_relocation(kRelocAbsolute); 1142 MARK(rela->r_offset); 1143 TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc), 1144 static_cast<size_t>(sym_addr + rela->r_addend), sym_name); 1145 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend; 1146 break; 1147 case R_X86_64_RELATIVE: 1148 count_relocation(kRelocRelative); 1149 MARK(rela->r_offset); 1150 if (sym) { 1151 DL_ERR("odd RELATIVE form..."); 1152 return -1; 1153 } 1154 TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc), 1155 static_cast<size_t>(si->base)); 1156 *reinterpret_cast<Elf_Addr*>(reloc) = si->base + rela->r_addend; 1157 break; 1158 case R_X86_64_32: 1159 count_relocation(kRelocRelative); 1160 MARK(rela->r_offset); 1161 TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc), 1162 static_cast<size_t>(sym_addr), sym_name); 1163 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend; 1164 break; 1165 case R_X86_64_64: 1166 count_relocation(kRelocRelative); 1167 MARK(rela->r_offset); 1168 TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc), 1169 static_cast<size_t>(sym_addr), sym_name); 1170 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend; 1171 break; 1172 case R_X86_64_PC32: 1173 count_relocation(kRelocRelative); 1174 MARK(rela->r_offset); 1175 TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s", 1176 static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc), 1177 static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name); 1178 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend - reloc; 1179 break; 1180#endif 1181 1182 default: 1183 DL_ERR("unknown reloc type %d @ %p (%d)", type, rela, (int) (rela - start)); 1184 return -1; 1185 } 1186 } 1187 return 0; 1188} 1189#else 1190static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count, 1191 soinfo* needed[]) 1192{ 1193 Elf_Sym* symtab = si->symtab; 1194 const char* strtab = si->strtab; 1195 Elf_Sym* s; 1196 Elf_Rel* start = rel; 1197 soinfo* lsi; 1198 1199 for (size_t idx = 0; idx < count; ++idx, ++rel) { 1200 unsigned type = ELF_R_TYPE(rel->r_info); 1201 // TODO: don't use unsigned for 'sym'. Use uint32_t or Elf_Addr instead. 1202 unsigned sym = ELF_R_SYM(rel->r_info); 1203 Elf_Addr reloc = static_cast<Elf_Addr>(rel->r_offset + si->load_bias); 1204 Elf_Addr sym_addr = 0; 1205 char* sym_name = NULL; 1206 1207 DEBUG("Processing '%s' relocation at index %zd", si->name, idx); 1208 if (type == 0) { // R_*_NONE 1209 continue; 1210 } 1211 if (sym != 0) { 1212 sym_name = (char *)(strtab + symtab[sym].st_name); 1213 s = soinfo_do_lookup(si, sym_name, &lsi, needed); 1214 if (s == NULL) { 1215 // We only allow an undefined symbol if this is a weak reference... 1216 s = &symtab[sym]; 1217 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1218 DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name); 1219 return -1; 1220 } 1221 1222 /* IHI0044C AAELF 4.5.1.1: 1223 1224 Libraries are not searched to resolve weak references. 1225 It is not an error for a weak reference to remain 1226 unsatisfied. 1227 1228 During linking, the value of an undefined weak reference is: 1229 - Zero if the relocation type is absolute 1230 - The address of the place if the relocation is pc-relative 1231 - The address of nominal base address if the relocation 1232 type is base-relative. 1233 */ 1234 1235 switch (type) { 1236#if defined(__arm__) 1237 case R_ARM_JUMP_SLOT: 1238 case R_ARM_GLOB_DAT: 1239 case R_ARM_ABS32: 1240 case R_ARM_RELATIVE: /* Don't care. */ 1241 // sym_addr was initialized to be zero above or relocation 1242 // code below does not care about value of sym_addr. 1243 // No need to do anything. 1244 break; 1245#elif defined(__i386__) 1246 case R_386_JMP_SLOT: 1247 case R_386_GLOB_DAT: 1248 case R_386_32: 1249 case R_386_RELATIVE: /* Don't care. */ 1250 // sym_addr was initialized to be zero above or relocation 1251 // code below does not care about value of sym_addr. 1252 // No need to do anything. 1253 break; 1254 case R_386_PC32: 1255 sym_addr = reloc; 1256 break; 1257#endif 1258 1259#if defined(__arm__) 1260 case R_ARM_COPY: 1261 // Fall through. Can't really copy if weak symbol is not found at run-time. 1262#endif 1263 default: 1264 DL_ERR("unknown weak reloc type %d @ %p (%d)", type, rel, (int) (rel - start)); 1265 return -1; 1266 } 1267 } else { 1268 // We got a definition. 1269 sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias); 1270 } 1271 count_relocation(kRelocSymbol); 1272 } else { 1273 s = NULL; 1274 } 1275 1276 switch (type) { 1277#if defined(__arm__) 1278 case R_ARM_JUMP_SLOT: 1279 count_relocation(kRelocAbsolute); 1280 MARK(rel->r_offset); 1281 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name); 1282 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr; 1283 break; 1284 case R_ARM_GLOB_DAT: 1285 count_relocation(kRelocAbsolute); 1286 MARK(rel->r_offset); 1287 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name); 1288 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr; 1289 break; 1290 case R_ARM_ABS32: 1291 count_relocation(kRelocAbsolute); 1292 MARK(rel->r_offset); 1293 TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name); 1294 *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr; 1295 break; 1296 case R_ARM_REL32: 1297 count_relocation(kRelocRelative); 1298 MARK(rel->r_offset); 1299 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s", 1300 reloc, sym_addr, rel->r_offset, sym_name); 1301 *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr - rel->r_offset; 1302 break; 1303 case R_ARM_COPY: 1304 if ((si->flags & FLAG_EXE) == 0) { 1305 /* 1306 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf 1307 * 1308 * Section 4.7.1.10 "Dynamic relocations" 1309 * R_ARM_COPY may only appear in executable objects where e_type is 1310 * set to ET_EXEC. 1311 * 1312 * TODO: FLAG_EXE is set for both ET_DYN and ET_EXEC executables. 1313 * We should explicitly disallow ET_DYN executables from having 1314 * R_ARM_COPY relocations. 1315 */ 1316 DL_ERR("%s R_ARM_COPY relocations only supported for ET_EXEC", si->name); 1317 return -1; 1318 } 1319 count_relocation(kRelocCopy); 1320 MARK(rel->r_offset); 1321 TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name); 1322 if (reloc == sym_addr) { 1323 Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed); 1324 1325 if (src == NULL) { 1326 DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name); 1327 return -1; 1328 } 1329 if (lsi->has_DT_SYMBOLIC) { 1330 DL_ERR("%s invalid R_ARM_COPY relocation against DT_SYMBOLIC shared " 1331 "library %s (built with -Bsymbolic?)", si->name, lsi->name); 1332 return -1; 1333 } 1334 if (s->st_size < src->st_size) { 1335 DL_ERR("%s R_ARM_COPY relocation size mismatch (%d < %d)", 1336 si->name, s->st_size, src->st_size); 1337 return -1; 1338 } 1339 memcpy((void*)reloc, (void*)(src->st_value + lsi->load_bias), src->st_size); 1340 } else { 1341 DL_ERR("%s R_ARM_COPY relocation target cannot be resolved", si->name); 1342 return -1; 1343 } 1344 break; 1345#elif defined(__i386__) 1346 case R_386_JMP_SLOT: 1347 count_relocation(kRelocAbsolute); 1348 MARK(rel->r_offset); 1349 TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name); 1350 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr; 1351 break; 1352 case R_386_GLOB_DAT: 1353 count_relocation(kRelocAbsolute); 1354 MARK(rel->r_offset); 1355 TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name); 1356 *reinterpret_cast<Elf_Addr*>(reloc) = sym_addr; 1357 break; 1358 case R_386_32: 1359 count_relocation(kRelocRelative); 1360 MARK(rel->r_offset); 1361 TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name); 1362 *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr; 1363 break; 1364 case R_386_PC32: 1365 count_relocation(kRelocRelative); 1366 MARK(rel->r_offset); 1367 TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s", 1368 reloc, (sym_addr - reloc), sym_addr, reloc, sym_name); 1369 *reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr - reloc); 1370 break; 1371#elif defined(__mips__) 1372 case R_MIPS_REL32: 1373 count_relocation(kRelocAbsolute); 1374 MARK(rel->r_offset); 1375 TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x %s", 1376 reloc, sym_addr, (sym_name) ? sym_name : "*SECTIONHDR*"); 1377 if (s) { 1378 *reinterpret_cast<Elf_Addr*>(reloc) += sym_addr; 1379 } else { 1380 *reinterpret_cast<Elf_Addr*>(reloc) += si->base; 1381 } 1382 break; 1383#endif 1384 1385#if defined(__arm__) 1386 case R_ARM_RELATIVE: 1387#elif defined(__i386__) 1388 case R_386_RELATIVE: 1389#endif 1390 count_relocation(kRelocRelative); 1391 MARK(rel->r_offset); 1392 if (sym) { 1393 DL_ERR("odd RELATIVE form..."); 1394 return -1; 1395 } 1396 TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p", 1397 reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base)); 1398 *reinterpret_cast<Elf_Addr*>(reloc) += si->base; 1399 break; 1400 1401 default: 1402 DL_ERR("unknown reloc type %d @ %p (%d)", type, rel, (int) (rel - start)); 1403 return -1; 1404 } 1405 } 1406 return 0; 1407} 1408#endif 1409 1410#if defined(__mips__) 1411static bool mips_relocate_got(soinfo* si, soinfo* needed[]) { 1412 unsigned* got = si->plt_got; 1413 if (got == NULL) { 1414 return true; 1415 } 1416 unsigned local_gotno = si->mips_local_gotno; 1417 unsigned gotsym = si->mips_gotsym; 1418 unsigned symtabno = si->mips_symtabno; 1419 Elf_Sym* symtab = si->symtab; 1420 1421 /* 1422 * got[0] is address of lazy resolver function 1423 * got[1] may be used for a GNU extension 1424 * set it to a recognizable address in case someone calls it 1425 * (should be _rtld_bind_start) 1426 * FIXME: maybe this should be in a separate routine 1427 */ 1428 1429 if ((si->flags & FLAG_LINKER) == 0) { 1430 size_t g = 0; 1431 got[g++] = 0xdeadbeef; 1432 if (got[g] & 0x80000000) { 1433 got[g++] = 0xdeadfeed; 1434 } 1435 /* 1436 * Relocate the local GOT entries need to be relocated 1437 */ 1438 for (; g < local_gotno; g++) { 1439 got[g] += si->load_bias; 1440 } 1441 } 1442 1443 /* Now for the global GOT entries */ 1444 Elf_Sym* sym = symtab + gotsym; 1445 got = si->plt_got + local_gotno; 1446 for (size_t g = gotsym; g < symtabno; g++, sym++, got++) { 1447 const char* sym_name; 1448 Elf_Sym* s; 1449 soinfo* lsi; 1450 1451 /* This is an undefined reference... try to locate it */ 1452 sym_name = si->strtab + sym->st_name; 1453 s = soinfo_do_lookup(si, sym_name, &lsi, needed); 1454 if (s == NULL) { 1455 /* We only allow an undefined symbol if this is a weak 1456 reference.. */ 1457 s = &symtab[g]; 1458 if (ELF_ST_BIND(s->st_info) != STB_WEAK) { 1459 DL_ERR("cannot locate \"%s\"...", sym_name); 1460 return false; 1461 } 1462 *got = 0; 1463 } 1464 else { 1465 /* FIXME: is this sufficient? 1466 * For reference see NetBSD link loader 1467 * http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup 1468 */ 1469 *got = lsi->load_bias + s->st_value; 1470 } 1471 } 1472 return true; 1473} 1474#endif 1475 1476void soinfo::CallArray(const char* array_name UNUSED, linker_function_t* functions, size_t count, bool reverse) { 1477 if (functions == NULL) { 1478 return; 1479 } 1480 1481 TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name); 1482 1483 int begin = reverse ? (count - 1) : 0; 1484 int end = reverse ? -1 : count; 1485 int step = reverse ? -1 : 1; 1486 1487 for (int i = begin; i != end; i += step) { 1488 TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]); 1489 CallFunction("function", functions[i]); 1490 } 1491 1492 TRACE("[ Done calling %s for '%s' ]", array_name, name); 1493} 1494 1495void soinfo::CallFunction(const char* function_name UNUSED, linker_function_t function) { 1496 if (function == NULL || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) { 1497 return; 1498 } 1499 1500 TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name); 1501 function(); 1502 TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name); 1503 1504 // The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures 1505 // are still writable. This happens with our debug malloc (see http://b/7941716). 1506 set_soinfo_pool_protection(PROT_READ | PROT_WRITE); 1507} 1508 1509void soinfo::CallPreInitConstructors() { 1510 // DT_PREINIT_ARRAY functions are called before any other constructors for executables, 1511 // but ignored in a shared library. 1512 CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false); 1513} 1514 1515void soinfo::CallConstructors() { 1516 if (constructors_called) { 1517 return; 1518 } 1519 1520 // We set constructors_called before actually calling the constructors, otherwise it doesn't 1521 // protect against recursive constructor calls. One simple example of constructor recursion 1522 // is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so: 1523 // 1. The program depends on libc, so libc's constructor is called here. 1524 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so. 1525 // 3. dlopen() calls the constructors on the newly created 1526 // soinfo for libc_malloc_debug_leak.so. 1527 // 4. The debug .so depends on libc, so CallConstructors is 1528 // called again with the libc soinfo. If it doesn't trigger the early- 1529 // out above, the libc constructor will be called again (recursively!). 1530 constructors_called = true; 1531 1532 if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) { 1533 // The GNU dynamic linker silently ignores these, but we warn the developer. 1534 PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!", 1535 name, preinit_array_count); 1536 } 1537 1538 if (dynamic != NULL) { 1539 for (Elf_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) { 1540 if (d->d_tag == DT_NEEDED) { 1541 const char* library_name = strtab + d->d_un.d_val; 1542 TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name); 1543 find_loaded_library(library_name)->CallConstructors(); 1544 } 1545 } 1546 } 1547 1548 TRACE("\"%s\": calling constructors", name); 1549 1550 // DT_INIT should be called before DT_INIT_ARRAY if both are present. 1551 CallFunction("DT_INIT", init_func); 1552 CallArray("DT_INIT_ARRAY", init_array, init_array_count, false); 1553} 1554 1555void soinfo::CallDestructors() { 1556 TRACE("\"%s\": calling destructors", name); 1557 1558 // DT_FINI_ARRAY must be parsed in reverse order. 1559 CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true); 1560 1561 // DT_FINI should be called after DT_FINI_ARRAY if both are present. 1562 CallFunction("DT_FINI", fini_func); 1563} 1564 1565/* Force any of the closed stdin, stdout and stderr to be associated with 1566 /dev/null. */ 1567static int nullify_closed_stdio() { 1568 int dev_null, i, status; 1569 int return_value = 0; 1570 1571 dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR)); 1572 if (dev_null < 0) { 1573 DL_ERR("cannot open /dev/null: %s", strerror(errno)); 1574 return -1; 1575 } 1576 TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null); 1577 1578 /* If any of the stdio file descriptors is valid and not associated 1579 with /dev/null, dup /dev/null to it. */ 1580 for (i = 0; i < 3; i++) { 1581 /* If it is /dev/null already, we are done. */ 1582 if (i == dev_null) { 1583 continue; 1584 } 1585 1586 TRACE("[ Nullifying stdio file descriptor %d]", i); 1587 status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL)); 1588 1589 /* If file is opened, we are good. */ 1590 if (status != -1) { 1591 continue; 1592 } 1593 1594 /* The only error we allow is that the file descriptor does not 1595 exist, in which case we dup /dev/null to it. */ 1596 if (errno != EBADF) { 1597 DL_ERR("fcntl failed: %s", strerror(errno)); 1598 return_value = -1; 1599 continue; 1600 } 1601 1602 /* Try dupping /dev/null to this stdio file descriptor and 1603 repeat if there is a signal. Note that any errors in closing 1604 the stdio descriptor are lost. */ 1605 status = TEMP_FAILURE_RETRY(dup2(dev_null, i)); 1606 if (status < 0) { 1607 DL_ERR("dup2 failed: %s", strerror(errno)); 1608 return_value = -1; 1609 continue; 1610 } 1611 } 1612 1613 /* If /dev/null is not one of the stdio file descriptors, close it. */ 1614 if (dev_null > 2) { 1615 TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null); 1616 status = TEMP_FAILURE_RETRY(close(dev_null)); 1617 if (status == -1) { 1618 DL_ERR("close failed: %s", strerror(errno)); 1619 return_value = -1; 1620 } 1621 } 1622 1623 return return_value; 1624} 1625 1626static bool soinfo_link_image(soinfo* si) { 1627 /* "base" might wrap around UINT32_MAX. */ 1628 Elf_Addr base = si->load_bias; 1629 const Elf_Phdr *phdr = si->phdr; 1630 int phnum = si->phnum; 1631 bool relocating_linker = (si->flags & FLAG_LINKER) != 0; 1632 1633 /* We can't debug anything until the linker is relocated */ 1634 if (!relocating_linker) { 1635 INFO("[ linking %s ]", si->name); 1636 DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(si->base), si->flags); 1637 } 1638 1639 /* Extract dynamic section */ 1640 size_t dynamic_count; 1641 Elf_Word dynamic_flags; 1642 phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic, 1643 &dynamic_count, &dynamic_flags); 1644 if (si->dynamic == NULL) { 1645 if (!relocating_linker) { 1646 DL_ERR("missing PT_DYNAMIC in \"%s\"", si->name); 1647 } 1648 return false; 1649 } else { 1650 if (!relocating_linker) { 1651 DEBUG("dynamic = %p", si->dynamic); 1652 } 1653 } 1654 1655#if defined(__arm__) 1656 (void) phdr_table_get_arm_exidx(phdr, phnum, base, 1657 &si->ARM_exidx, &si->ARM_exidx_count); 1658#endif 1659 1660 // Extract useful information from dynamic section. 1661 uint32_t needed_count = 0; 1662 for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 1663 DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p", 1664 d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val)); 1665 switch (d->d_tag) { 1666 case DT_HASH: 1667 si->nbucket = ((unsigned *) (base + d->d_un.d_ptr))[0]; 1668 si->nchain = ((unsigned *) (base + d->d_un.d_ptr))[1]; 1669 si->bucket = (unsigned *) (base + d->d_un.d_ptr + 8); 1670 si->chain = (unsigned *) (base + d->d_un.d_ptr + 8 + si->nbucket * 4); 1671 break; 1672 case DT_STRTAB: 1673 si->strtab = (const char *) (base + d->d_un.d_ptr); 1674 break; 1675 case DT_SYMTAB: 1676 si->symtab = (Elf_Sym *) (base + d->d_un.d_ptr); 1677 break; 1678#if !defined(__LP64__) 1679 case DT_PLTREL: 1680 if (d->d_un.d_val != DT_REL) { 1681 DL_ERR("unsupported DT_RELA in \"%s\"", si->name); 1682 return false; 1683 } 1684 break; 1685#endif 1686 case DT_JMPREL: 1687#if defined(USE_RELA) 1688 si->plt_rela = (Elf_Rela*) (base + d->d_un.d_ptr); 1689#else 1690 si->plt_rel = (Elf_Rel*) (base + d->d_un.d_ptr); 1691#endif 1692 break; 1693 case DT_PLTRELSZ: 1694#if defined(USE_RELA) 1695 si->plt_rela_count = d->d_un.d_val / sizeof(Elf_Rela); 1696#else 1697 si->plt_rel_count = d->d_un.d_val / sizeof(Elf_Rel); 1698#endif 1699 break; 1700#if !defined(__LP64__) 1701 case DT_PLTGOT: 1702 // Used by 32-bit MIPS. 1703 si->plt_got = (unsigned *)(base + d->d_un.d_ptr); 1704 break; 1705#endif 1706 case DT_DEBUG: 1707 // Set the DT_DEBUG entry to the address of _r_debug for GDB 1708 // if the dynamic table is writable 1709 if ((dynamic_flags & PF_W) != 0) { 1710 d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug); 1711 } 1712 break; 1713#if defined(USE_RELA) 1714 case DT_RELA: 1715 si->rela = (Elf_Rela*) (base + d->d_un.d_ptr); 1716 break; 1717 case DT_RELASZ: 1718 si->rela_count = d->d_un.d_val / sizeof(Elf_Rela); 1719 break; 1720 case DT_REL: 1721 DL_ERR("unsupported DT_REL in \"%s\"", si->name); 1722 return false; 1723 case DT_RELSZ: 1724 DL_ERR("unsupported DT_RELSZ in \"%s\"", si->name); 1725 return false; 1726#else 1727 case DT_REL: 1728 si->rel = (Elf_Rel*) (base + d->d_un.d_ptr); 1729 break; 1730 case DT_RELSZ: 1731 si->rel_count = d->d_un.d_val / sizeof(Elf_Rel); 1732 break; 1733 case DT_RELA: 1734 DL_ERR("unsupported DT_RELA in \"%s\"", si->name); 1735 return false; 1736#endif 1737 case DT_INIT: 1738 si->init_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr); 1739 DEBUG("%s constructors (DT_INIT) found at %p", si->name, si->init_func); 1740 break; 1741 case DT_FINI: 1742 si->fini_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr); 1743 DEBUG("%s destructors (DT_FINI) found at %p", si->name, si->fini_func); 1744 break; 1745 case DT_INIT_ARRAY: 1746 si->init_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr); 1747 DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array); 1748 break; 1749 case DT_INIT_ARRAYSZ: 1750 si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr); 1751 break; 1752 case DT_FINI_ARRAY: 1753 si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr); 1754 DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array); 1755 break; 1756 case DT_FINI_ARRAYSZ: 1757 si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr); 1758 break; 1759 case DT_PREINIT_ARRAY: 1760 si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr); 1761 DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array); 1762 break; 1763 case DT_PREINIT_ARRAYSZ: 1764 si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr); 1765 break; 1766 case DT_TEXTREL: 1767#if defined(__LP64__) 1768 DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", si->name); 1769 return false; 1770#else 1771 si->has_text_relocations = true; 1772 break; 1773#endif 1774 case DT_SYMBOLIC: 1775 si->has_DT_SYMBOLIC = true; 1776 break; 1777 case DT_NEEDED: 1778 ++needed_count; 1779 break; 1780 case DT_FLAGS: 1781 if (d->d_un.d_val & DF_TEXTREL) { 1782#if defined(__LP64__) 1783 DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", si->name); 1784 return false; 1785#else 1786 si->has_text_relocations = true; 1787#endif 1788 } 1789 if (d->d_un.d_val & DF_SYMBOLIC) { 1790 si->has_DT_SYMBOLIC = true; 1791 } 1792 break; 1793#if defined(__mips__) 1794 case DT_STRSZ: 1795 case DT_SYMENT: 1796 case DT_RELENT: 1797 break; 1798 case DT_MIPS_RLD_MAP: 1799 // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB. 1800 { 1801 r_debug** dp = (r_debug**) d->d_un.d_ptr; 1802 *dp = &_r_debug; 1803 } 1804 break; 1805 case DT_MIPS_RLD_VERSION: 1806 case DT_MIPS_FLAGS: 1807 case DT_MIPS_BASE_ADDRESS: 1808 case DT_MIPS_UNREFEXTNO: 1809 break; 1810 1811 case DT_MIPS_SYMTABNO: 1812 si->mips_symtabno = d->d_un.d_val; 1813 break; 1814 1815 case DT_MIPS_LOCAL_GOTNO: 1816 si->mips_local_gotno = d->d_un.d_val; 1817 break; 1818 1819 case DT_MIPS_GOTSYM: 1820 si->mips_gotsym = d->d_un.d_val; 1821 break; 1822#endif 1823 1824 default: 1825 DEBUG("Unused DT entry: type %p arg %p", 1826 reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val)); 1827 break; 1828 } 1829 } 1830 1831 DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p", 1832 reinterpret_cast<void*>(si->base), si->strtab, si->symtab); 1833 1834 // Sanity checks. 1835 if (relocating_linker && needed_count != 0) { 1836 DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries"); 1837 return false; 1838 } 1839 if (si->nbucket == 0) { 1840 DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", si->name); 1841 return false; 1842 } 1843 if (si->strtab == 0) { 1844 DL_ERR("empty/missing DT_STRTAB in \"%s\"", si->name); 1845 return false; 1846 } 1847 if (si->symtab == 0) { 1848 DL_ERR("empty/missing DT_SYMTAB in \"%s\"", si->name); 1849 return false; 1850 } 1851 1852 // If this is the main executable, then load all of the libraries from LD_PRELOAD now. 1853 if (si->flags & FLAG_EXE) { 1854 memset(gLdPreloads, 0, sizeof(gLdPreloads)); 1855 size_t preload_count = 0; 1856 for (size_t i = 0; gLdPreloadNames[i] != NULL; i++) { 1857 soinfo* lsi = find_library(gLdPreloadNames[i]); 1858 if (lsi != NULL) { 1859 gLdPreloads[preload_count++] = lsi; 1860 } else { 1861 // As with glibc, failure to load an LD_PRELOAD library is just a warning. 1862 DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s", 1863 gLdPreloadNames[i], si->name, linker_get_error_buffer()); 1864 } 1865 } 1866 } 1867 1868 soinfo** needed = (soinfo**) alloca((1 + needed_count) * sizeof(soinfo*)); 1869 soinfo** pneeded = needed; 1870 1871 for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) { 1872 if (d->d_tag == DT_NEEDED) { 1873 const char* library_name = si->strtab + d->d_un.d_val; 1874 DEBUG("%s needs %s", si->name, library_name); 1875 soinfo* lsi = find_library(library_name); 1876 if (lsi == NULL) { 1877 strlcpy(tmp_err_buf, linker_get_error_buffer(), sizeof(tmp_err_buf)); 1878 DL_ERR("could not load library \"%s\" needed by \"%s\"; caused by %s", 1879 library_name, si->name, tmp_err_buf); 1880 return false; 1881 } 1882 *pneeded++ = lsi; 1883 } 1884 } 1885 *pneeded = NULL; 1886 1887#if !defined(__LP64__) 1888 if (si->has_text_relocations) { 1889 // Make segments writable to allow text relocations to work properly. We will later call 1890 // phdr_table_protect_segments() after all of them are applied and all constructors are run. 1891 DL_WARN("%s has text relocations. This is wasting memory and prevents " 1892 "security hardening. Please fix.", si->name); 1893 if (phdr_table_unprotect_segments(si->phdr, si->phnum, si->load_bias) < 0) { 1894 DL_ERR("can't unprotect loadable segments for \"%s\": %s", 1895 si->name, strerror(errno)); 1896 return false; 1897 } 1898 } 1899#endif 1900 1901#if defined(USE_RELA) 1902 if (si->plt_rela != NULL) { 1903 DEBUG("[ relocating %s plt ]\n", si->name ); 1904 if (soinfo_relocate_a(si, si->plt_rela, si->plt_rela_count, needed)) { 1905 return false; 1906 } 1907 } 1908 if (si->rela != NULL) { 1909 DEBUG("[ relocating %s ]\n", si->name ); 1910 if (soinfo_relocate_a(si, si->rela, si->rela_count, needed)) { 1911 return false; 1912 } 1913 } 1914#else 1915 if (si->plt_rel != NULL) { 1916 DEBUG("[ relocating %s plt ]", si->name ); 1917 if (soinfo_relocate(si, si->plt_rel, si->plt_rel_count, needed)) { 1918 return false; 1919 } 1920 } 1921 if (si->rel != NULL) { 1922 DEBUG("[ relocating %s ]", si->name ); 1923 if (soinfo_relocate(si, si->rel, si->rel_count, needed)) { 1924 return false; 1925 } 1926 } 1927#endif 1928 1929#if defined(__mips__) 1930 if (!mips_relocate_got(si, needed)) { 1931 return false; 1932 } 1933#endif 1934 1935 si->flags |= FLAG_LINKED; 1936 DEBUG("[ finished linking %s ]", si->name); 1937 1938#if !defined(__LP64__) 1939 if (si->has_text_relocations) { 1940 // All relocations are done, we can protect our segments back to read-only. 1941 if (phdr_table_protect_segments(si->phdr, si->phnum, si->load_bias) < 0) { 1942 DL_ERR("can't protect segments for \"%s\": %s", 1943 si->name, strerror(errno)); 1944 return false; 1945 } 1946 } 1947#endif 1948 1949 /* We can also turn on GNU RELRO protection */ 1950 if (phdr_table_protect_gnu_relro(si->phdr, si->phnum, si->load_bias) < 0) { 1951 DL_ERR("can't enable GNU RELRO protection for \"%s\": %s", 1952 si->name, strerror(errno)); 1953 return false; 1954 } 1955 1956 notify_gdb_of_load(si); 1957 return true; 1958} 1959 1960/* 1961 * This function add vdso to internal dso list. 1962 * It helps to stack unwinding through signal handlers. 1963 * Also, it makes bionic more like glibc. 1964 */ 1965static void add_vdso(KernelArgumentBlock& args UNUSED) { 1966#if defined(AT_SYSINFO_EHDR) 1967 Elf_Ehdr* ehdr_vdso = reinterpret_cast<Elf_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR)); 1968 if (ehdr_vdso == NULL) { 1969 return; 1970 } 1971 1972 soinfo* si = soinfo_alloc("[vdso]"); 1973 1974 si->phdr = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff); 1975 si->phnum = ehdr_vdso->e_phnum; 1976 si->base = reinterpret_cast<Elf_Addr>(ehdr_vdso); 1977 si->size = phdr_table_get_load_size(si->phdr, si->phnum); 1978 si->flags = 0; 1979 si->load_bias = get_elf_exec_load_bias(ehdr_vdso); 1980 1981 soinfo_link_image(si); 1982#endif 1983} 1984 1985/* 1986 * This code is called after the linker has linked itself and 1987 * fixed it's own GOT. It is safe to make references to externs 1988 * and other non-local data at this point. 1989 */ 1990static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Addr linker_base) { 1991 /* NOTE: we store the args pointer on a special location 1992 * of the temporary TLS area in order to pass it to 1993 * the C Library's runtime initializer. 1994 * 1995 * The initializer must clear the slot and reset the TLS 1996 * to point to a different location to ensure that no other 1997 * shared library constructor can access it. 1998 */ 1999 __libc_init_tls(args); 2000 2001#if TIMING 2002 struct timeval t0, t1; 2003 gettimeofday(&t0, 0); 2004#endif 2005 2006 // Initialize environment functions, and get to the ELF aux vectors table. 2007 linker_env_init(args); 2008 2009 // If this is a setuid/setgid program, close the security hole described in 2010 // ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc 2011 if (get_AT_SECURE()) { 2012 nullify_closed_stdio(); 2013 } 2014 2015 debuggerd_init(); 2016 2017 // Get a few environment variables. 2018 const char* LD_DEBUG = linker_env_get("LD_DEBUG"); 2019 if (LD_DEBUG != NULL) { 2020 gLdDebugVerbosity = atoi(LD_DEBUG); 2021 } 2022 2023 // Normally, these are cleaned by linker_env_init, but the test 2024 // doesn't cost us anything. 2025 const char* ldpath_env = NULL; 2026 const char* ldpreload_env = NULL; 2027 if (!get_AT_SECURE()) { 2028 ldpath_env = linker_env_get("LD_LIBRARY_PATH"); 2029 ldpreload_env = linker_env_get("LD_PRELOAD"); 2030 } 2031 2032 INFO("[ android linker & debugger ]"); 2033 2034 soinfo* si = soinfo_alloc(args.argv[0]); 2035 if (si == NULL) { 2036 exit(EXIT_FAILURE); 2037 } 2038 2039 /* bootstrap the link map, the main exe always needs to be first */ 2040 si->flags |= FLAG_EXE; 2041 link_map_t* map = &(si->link_map); 2042 2043 map->l_addr = 0; 2044 map->l_name = args.argv[0]; 2045 map->l_prev = NULL; 2046 map->l_next = NULL; 2047 2048 _r_debug.r_map = map; 2049 r_debug_tail = map; 2050 2051 /* gdb expects the linker to be in the debug shared object list. 2052 * Without this, gdb has trouble locating the linker's ".text" 2053 * and ".plt" sections. Gdb could also potentially use this to 2054 * relocate the offset of our exported 'rtld_db_dlactivity' symbol. 2055 * Don't use soinfo_alloc(), because the linker shouldn't 2056 * be on the soinfo list. 2057 */ 2058 { 2059 static soinfo linker_soinfo; 2060#if defined(__LP64__) 2061 strlcpy(linker_soinfo.name, "/system/bin/linker64", sizeof(linker_soinfo.name)); 2062#else 2063 strlcpy(linker_soinfo.name, "/system/bin/linker", sizeof(linker_soinfo.name)); 2064#endif 2065 linker_soinfo.flags = 0; 2066 linker_soinfo.base = linker_base; 2067 2068 /* 2069 * Set the dynamic field in the link map otherwise gdb will complain with 2070 * the following: 2071 * warning: .dynamic section for "/system/bin/linker" is not at the 2072 * expected address (wrong library or version mismatch?) 2073 */ 2074 Elf_Ehdr *elf_hdr = (Elf_Ehdr *) linker_base; 2075 Elf_Phdr *phdr = (Elf_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff); 2076 phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base, 2077 &linker_soinfo.dynamic, NULL, NULL); 2078 insert_soinfo_into_debug_map(&linker_soinfo); 2079 } 2080 2081 // Extract information passed from the kernel. 2082 si->phdr = reinterpret_cast<Elf_Phdr*>(args.getauxval(AT_PHDR)); 2083 si->phnum = args.getauxval(AT_PHNUM); 2084 si->entry = args.getauxval(AT_ENTRY); 2085 2086 /* Compute the value of si->base. We can't rely on the fact that 2087 * the first entry is the PHDR because this will not be true 2088 * for certain executables (e.g. some in the NDK unit test suite) 2089 */ 2090 si->base = 0; 2091 si->size = phdr_table_get_load_size(si->phdr, si->phnum); 2092 si->load_bias = 0; 2093 for (size_t i = 0; i < si->phnum; ++i) { 2094 if (si->phdr[i].p_type == PT_PHDR) { 2095 si->load_bias = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_vaddr; 2096 si->base = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_offset; 2097 break; 2098 } 2099 } 2100 si->dynamic = NULL; 2101 si->ref_count = 1; 2102 2103 // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid). 2104 parse_LD_LIBRARY_PATH(ldpath_env); 2105 parse_LD_PRELOAD(ldpreload_env); 2106 2107 somain = si; 2108 2109 if (!soinfo_link_image(si)) { 2110 __libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer()); 2111 exit(EXIT_FAILURE); 2112 } 2113 2114 add_vdso(args); 2115 2116 si->CallPreInitConstructors(); 2117 2118 for (size_t i = 0; gLdPreloads[i] != NULL; ++i) { 2119 gLdPreloads[i]->CallConstructors(); 2120 } 2121 2122 /* After the link_image, the si->load_bias is initialized. 2123 * For so lib, the map->l_addr will be updated in notify_gdb_of_load. 2124 * We need to update this value for so exe here. So Unwind_Backtrace 2125 * for some arch like x86 could work correctly within so exe. 2126 */ 2127 map->l_addr = si->load_bias; 2128 si->CallConstructors(); 2129 2130#if TIMING 2131 gettimeofday(&t1,NULL); 2132 PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) ( 2133 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) - 2134 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec) 2135 )); 2136#endif 2137#if STATS 2138 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0], 2139 linker_stats.count[kRelocAbsolute], 2140 linker_stats.count[kRelocRelative], 2141 linker_stats.count[kRelocCopy], 2142 linker_stats.count[kRelocSymbol]); 2143#endif 2144#if COUNT_PAGES 2145 { 2146 unsigned n; 2147 unsigned i; 2148 unsigned count = 0; 2149 for (n = 0; n < 4096; n++) { 2150 if (bitmask[n]) { 2151 unsigned x = bitmask[n]; 2152#if defined(__LP64__) 2153 for (i = 0; i < 32; i++) { 2154#else 2155 for (i = 0; i < 8; i++) { 2156#endif 2157 if (x & 1) { 2158 count++; 2159 } 2160 x >>= 1; 2161 } 2162 } 2163 } 2164 PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4); 2165 } 2166#endif 2167 2168#if TIMING || STATS || COUNT_PAGES 2169 fflush(stdout); 2170#endif 2171 2172 TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry)); 2173 return si->entry; 2174} 2175 2176/* Compute the load-bias of an existing executable. This shall only 2177 * be used to compute the load bias of an executable or shared library 2178 * that was loaded by the kernel itself. 2179 * 2180 * Input: 2181 * elf -> address of ELF header, assumed to be at the start of the file. 2182 * Return: 2183 * load bias, i.e. add the value of any p_vaddr in the file to get 2184 * the corresponding address in memory. 2185 */ 2186static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf) { 2187 Elf_Addr offset = elf->e_phoff; 2188 const Elf_Phdr* phdr_table = (const Elf_Phdr*)((char*)elf + offset); 2189 const Elf_Phdr* phdr_end = phdr_table + elf->e_phnum; 2190 2191 for (const Elf_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) { 2192 if (phdr->p_type == PT_LOAD) { 2193 return reinterpret_cast<Elf_Addr>(elf) + phdr->p_offset - phdr->p_vaddr; 2194 } 2195 } 2196 return 0; 2197} 2198 2199/* 2200 * This is the entry point for the linker, called from begin.S. This 2201 * method is responsible for fixing the linker's own relocations, and 2202 * then calling __linker_init_post_relocation(). 2203 * 2204 * Because this method is called before the linker has fixed it's own 2205 * relocations, any attempt to reference an extern variable, extern 2206 * function, or other GOT reference will generate a segfault. 2207 */ 2208extern "C" Elf_Addr __linker_init(void* raw_args) { 2209 KernelArgumentBlock args(raw_args); 2210 2211 Elf_Addr linker_addr = args.getauxval(AT_BASE); 2212 Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr); 2213 Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff); 2214 2215 soinfo linker_so; 2216 memset(&linker_so, 0, sizeof(soinfo)); 2217 2218 strcpy(linker_so.name, "[dynamic linker]"); 2219 linker_so.base = linker_addr; 2220 linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum); 2221 linker_so.load_bias = get_elf_exec_load_bias(elf_hdr); 2222 linker_so.dynamic = NULL; 2223 linker_so.phdr = phdr; 2224 linker_so.phnum = elf_hdr->e_phnum; 2225 linker_so.flags |= FLAG_LINKER; 2226 2227 if (!soinfo_link_image(&linker_so)) { 2228 // It would be nice to print an error message, but if the linker 2229 // can't link itself, there's no guarantee that we'll be able to 2230 // call write() (because it involves a GOT reference). We may as 2231 // well try though... 2232 const char* msg = "CANNOT LINK EXECUTABLE: "; 2233 write(2, msg, strlen(msg)); 2234 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf)); 2235 write(2, "\n", 1); 2236 _exit(EXIT_FAILURE); 2237 } 2238 2239 // We have successfully fixed our own relocations. It's safe to run 2240 // the main part of the linker now. 2241 args.abort_message_ptr = &gAbortMessage; 2242 Elf_Addr start_address = __linker_init_post_relocation(args, linker_addr); 2243 2244 set_soinfo_pool_protection(PROT_READ); 2245 2246 // Return the address that the calling assembly stub should jump to. 2247 return start_address; 2248} 2249