linker.c revision 8d0c0334f1106d36f2fd5c1cf6d5dc75a4b88850
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <linux/auxvec.h>
30
31#include <stdio.h>
32#include <stdlib.h>
33#include <string.h>
34#include <unistd.h>
35#include <fcntl.h>
36#include <errno.h>
37#include <dlfcn.h>
38#include <sys/stat.h>
39
40#include <pthread.h>
41
42#include <sys/mman.h>
43
44#include <sys/atomics.h>
45
46/* special private C library header - see Android.mk */
47#include <bionic_tls.h>
48
49#include "linker.h"
50#include "linker_debug.h"
51
52#include "ba.h"
53
54#define SO_MAX 96
55
56/* Assume average path length of 64 and max 8 paths */
57#define LDPATH_BUFSIZE 512
58#define LDPATH_MAX 8
59
60/* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
61 *
62 * Do NOT use malloc() and friends or pthread_*() code here.
63 * Don't use printf() either; it's caused mysterious memory
64 * corruption in the past.
65 * The linker runs before we bring up libc and it's easiest
66 * to make sure it does not depend on any complex libc features
67 *
68 * open issues / todo:
69 *
70 * - should we do anything special for STB_WEAK symbols?
71 * - are we doing everything we should for ARM_COPY relocations?
72 * - cleaner error reporting
73 * - after linking, set as much stuff as possible to READONLY
74 *   and NOEXEC
75 * - linker hardcodes PAGE_SIZE and PAGE_MASK because the kernel
76 *   headers provide versions that are negative...
77 * - allocate space for soinfo structs dynamically instead of
78 *   having a hard limit (64)
79*/
80
81
82static int link_image(soinfo *si, unsigned wr_offset);
83
84static int socount = 0;
85static soinfo sopool[SO_MAX];
86static soinfo *freelist = NULL;
87static soinfo *solist = &libdl_info;
88static soinfo *sonext = &libdl_info;
89
90static inline int validate_soinfo(soinfo *si)
91{
92    return (si >= sopool && si < sopool + SO_MAX) ||
93        si == &libdl_info;
94}
95
96static char ldpaths_buf[LDPATH_BUFSIZE];
97static const char *ldpaths[LDPATH_MAX + 1];
98
99int debug_verbosity;
100static int pid;
101
102#if STATS
103struct _link_stats linker_stats;
104#endif
105
106#if COUNT_PAGES
107unsigned bitmask[4096];
108#endif
109
110#ifndef PT_ARM_EXIDX
111#define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
112#endif
113
114#define HOODLUM(name, ret, ...)                                               \
115    ret name __VA_ARGS__                                                      \
116    {                                                                         \
117        char errstr[] = "ERROR: " #name " called from the dynamic linker!\n"; \
118        write(2, errstr, sizeof(errstr));                                     \
119        abort();                                                              \
120    }
121HOODLUM(malloc, void *, (size_t size));
122HOODLUM(free, void, (void *ptr));
123HOODLUM(realloc, void *, (void *ptr, size_t size));
124HOODLUM(calloc, void *, (size_t cnt, size_t size));
125
126static char tmp_err_buf[768];
127static char __linker_dl_err_buf[768];
128#define DL_ERR(fmt, x...)                                                     \
129    do {                                                                      \
130        snprintf(__linker_dl_err_buf, sizeof(__linker_dl_err_buf),            \
131                 "%s[%d]: " fmt, __func__, __LINE__, ##x);                    \
132        ERROR(fmt "\n", ##x);                                                      \
133    } while(0)
134
135const char *linker_get_error(void)
136{
137    return (const char *)&__linker_dl_err_buf[0];
138}
139
140/*
141 * This function is an empty stub where GDB locates a breakpoint to get notified
142 * about linker activity.
143 */
144extern void __attribute__((noinline)) rtld_db_dlactivity(void);
145
146static struct r_debug _r_debug = {1, NULL, &rtld_db_dlactivity,
147                                  RT_CONSISTENT, 0};
148static struct link_map *r_debug_tail = 0;
149
150static pthread_mutex_t _r_debug_lock = PTHREAD_MUTEX_INITIALIZER;
151
152static void insert_soinfo_into_debug_map(soinfo * info)
153{
154    struct link_map * map;
155
156    /* Copy the necessary fields into the debug structure.
157     */
158    map = &(info->linkmap);
159    map->l_addr = info->base;
160    map->l_name = (char*) info->name;
161
162    /* Stick the new library at the end of the list.
163     * gdb tends to care more about libc than it does
164     * about leaf libraries, and ordering it this way
165     * reduces the back-and-forth over the wire.
166     */
167    if (r_debug_tail) {
168        r_debug_tail->l_next = map;
169        map->l_prev = r_debug_tail;
170        map->l_next = 0;
171    } else {
172        _r_debug.r_map = map;
173        map->l_prev = 0;
174        map->l_next = 0;
175    }
176    r_debug_tail = map;
177}
178
179static void remove_soinfo_from_debug_map(soinfo * info)
180{
181    struct link_map * map = &(info->linkmap);
182
183    if (r_debug_tail == map)
184        r_debug_tail = map->l_prev;
185
186    if (map->l_prev) map->l_prev->l_next = map->l_next;
187    if (map->l_next) map->l_next->l_prev = map->l_prev;
188}
189
190void notify_gdb_of_load(soinfo * info)
191{
192    if (info->flags & FLAG_EXE) {
193        // GDB already knows about the main executable
194        return;
195    }
196
197    pthread_mutex_lock(&_r_debug_lock);
198
199    _r_debug.r_state = RT_ADD;
200    rtld_db_dlactivity();
201
202    insert_soinfo_into_debug_map(info);
203
204    _r_debug.r_state = RT_CONSISTENT;
205    rtld_db_dlactivity();
206
207    pthread_mutex_unlock(&_r_debug_lock);
208}
209
210void notify_gdb_of_unload(soinfo * info)
211{
212    if (info->flags & FLAG_EXE) {
213        // GDB already knows about the main executable
214        return;
215    }
216
217    pthread_mutex_lock(&_r_debug_lock);
218
219    _r_debug.r_state = RT_DELETE;
220    rtld_db_dlactivity();
221
222    remove_soinfo_from_debug_map(info);
223
224    _r_debug.r_state = RT_CONSISTENT;
225    rtld_db_dlactivity();
226
227    pthread_mutex_unlock(&_r_debug_lock);
228}
229
230void notify_gdb_of_libraries()
231{
232    _r_debug.r_state = RT_ADD;
233    rtld_db_dlactivity();
234    _r_debug.r_state = RT_CONSISTENT;
235    rtld_db_dlactivity();
236}
237
238static soinfo *alloc_info(const char *name)
239{
240    soinfo *si;
241
242    if(strlen(name) >= SOINFO_NAME_LEN) {
243        DL_ERR("%5d library name %s too long", pid, name);
244        return 0;
245    }
246
247    /* The freelist is populated when we call free_info(), which in turn is
248       done only by dlclose(), which is not likely to be used.
249    */
250    if (!freelist) {
251        if(socount == SO_MAX) {
252            DL_ERR("%5d too many libraries when loading %s", pid, name);
253            return NULL;
254        }
255        freelist = sopool + socount++;
256        freelist->next = NULL;
257    }
258
259    si = freelist;
260    freelist = freelist->next;
261
262    /* Make sure we get a clean block of soinfo */
263    memset(si, 0, sizeof(soinfo));
264    strcpy((char*) si->name, name);
265    sonext->next = si;
266    si->ba_index = -1; /* by default, prelinked */
267    si->next = NULL;
268    si->refcount = 0;
269    sonext = si;
270
271    TRACE("%5d name %s: allocated soinfo @ %p\n", pid, name, si);
272    return si;
273}
274
275static void free_info(soinfo *si)
276{
277    soinfo *prev = NULL, *trav;
278
279    TRACE("%5d name %s: freeing soinfo @ %p\n", pid, si->name, si);
280
281    for(trav = solist; trav != NULL; trav = trav->next){
282        if (trav == si)
283            break;
284        prev = trav;
285    }
286    if (trav == NULL) {
287        /* si was not ni solist */
288        DL_ERR("%5d name %s is not in solist!", pid, si->name);
289        return;
290    }
291
292    /* prev will never be NULL, because the first entry in solist is
293       always the static libdl_info.
294    */
295    prev->next = si->next;
296    if (si == sonext) sonext = prev;
297    si->next = freelist;
298    freelist = si;
299}
300
301#ifndef LINKER_TEXT_BASE
302#error "linker's makefile must define LINKER_TEXT_BASE"
303#endif
304#ifndef LINKER_AREA_SIZE
305#error "linker's makefile must define LINKER_AREA_SIZE"
306#endif
307#define LINKER_BASE ((LINKER_TEXT_BASE) & 0xfff00000)
308#define LINKER_TOP  (LINKER_BASE + (LINKER_AREA_SIZE))
309
310const char *addr_to_name(unsigned addr)
311{
312    soinfo *si;
313
314    for(si = solist; si != 0; si = si->next){
315        if((addr >= si->base) && (addr < (si->base + si->size))) {
316            return si->name;
317        }
318    }
319
320    if((addr >= LINKER_BASE) && (addr < LINKER_TOP)){
321        return "linker";
322    }
323
324    return "";
325}
326
327/* For a given PC, find the .so that it belongs to.
328 * Returns the base address of the .ARM.exidx section
329 * for that .so, and the number of 8-byte entries
330 * in that section (via *pcount).
331 *
332 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
333 *
334 * This function is exposed via dlfcn.c and libdl.so.
335 */
336#ifdef ANDROID_ARM_LINKER
337_Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int *pcount)
338{
339    soinfo *si;
340    unsigned addr = (unsigned)pc;
341
342    if ((addr < LINKER_BASE) || (addr >= LINKER_TOP)) {
343        for (si = solist; si != 0; si = si->next){
344            if ((addr >= si->base) && (addr < (si->base + si->size))) {
345                *pcount = si->ARM_exidx_count;
346                return (_Unwind_Ptr)(si->base + (unsigned long)si->ARM_exidx);
347            }
348        }
349    }
350   *pcount = 0;
351    return NULL;
352}
353#elif defined(ANDROID_X86_LINKER)
354/* Here, we only have to provide a callback to iterate across all the
355 * loaded libraries. gcc_eh does the rest. */
356int
357dl_iterate_phdr(int (*cb)(struct dl_phdr_info *info, size_t size, void *data),
358                void *data)
359{
360    soinfo *si;
361    struct dl_phdr_info dl_info;
362    int rv = 0;
363
364    for (si = solist; si != NULL; si = si->next) {
365        dl_info.dlpi_addr = si->linkmap.l_addr;
366        dl_info.dlpi_name = si->linkmap.l_name;
367        dl_info.dlpi_phdr = si->phdr;
368        dl_info.dlpi_phnum = si->phnum;
369        rv = cb(&dl_info, sizeof (struct dl_phdr_info), data);
370        if (rv != 0)
371            break;
372    }
373    return rv;
374}
375#endif
376
377static Elf32_Sym *_elf_lookup(soinfo *si, unsigned hash, const char *name)
378{
379    Elf32_Sym *s;
380    Elf32_Sym *symtab = si->symtab;
381    const char *strtab = si->strtab;
382    unsigned n;
383
384    TRACE_TYPE(LOOKUP, "%5d SEARCH %s in %s@0x%08x %08x %d\n", pid,
385               name, si->name, si->base, hash, hash % si->nbucket);
386    n = hash % si->nbucket;
387
388    for(n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]){
389        s = symtab + n;
390        if(strcmp(strtab + s->st_name, name)) continue;
391
392            /* only concern ourselves with global symbols */
393        switch(ELF32_ST_BIND(s->st_info)){
394        case STB_GLOBAL:
395                /* no section == undefined */
396            if(s->st_shndx == 0) continue;
397
398        case STB_WEAK:
399            TRACE_TYPE(LOOKUP, "%5d FOUND %s in %s (%08x) %d\n", pid,
400                       name, si->name, s->st_value, s->st_size);
401            return s;
402        }
403    }
404
405    return 0;
406}
407
408static unsigned elfhash(const char *_name)
409{
410    const unsigned char *name = (const unsigned char *) _name;
411    unsigned h = 0, g;
412
413    while(*name) {
414        h = (h << 4) + *name++;
415        g = h & 0xf0000000;
416        h ^= g;
417        h ^= g >> 24;
418    }
419    return h;
420}
421
422static Elf32_Sym *
423_do_lookup_in_so(soinfo *si, const char *name, unsigned *elf_hash)
424{
425    if (*elf_hash == 0)
426        *elf_hash = elfhash(name);
427    return _elf_lookup (si, *elf_hash, name);
428}
429
430static Elf32_Sym *
431_do_lookup(soinfo *si, const char *name, unsigned *base)
432{
433    unsigned elf_hash = 0;
434    Elf32_Sym *s;
435    unsigned *d;
436    soinfo *lsi = si;
437
438    /* Look for symbols in the local scope first (the object who is
439     * searching). This happens with C++ templates on i386 for some
440     * reason. */
441    s = _do_lookup_in_so(si, name, &elf_hash);
442    if(s != NULL)
443        goto done;
444
445    for(d = si->dynamic; *d; d += 2) {
446        if(d[0] == DT_NEEDED){
447            lsi = (soinfo *)d[1];
448            if (!validate_soinfo(lsi)) {
449                DL_ERR("%5d bad DT_NEEDED pointer in %s",
450                       pid, si->name);
451                return 0;
452            }
453
454            DEBUG("%5d %s: looking up %s in %s\n",
455                  pid, si->name, name, lsi->name);
456            s = _do_lookup_in_so(lsi, name, &elf_hash);
457            if(s != NULL)
458                goto done;
459        }
460    }
461
462done:
463    if(s != NULL) {
464        TRACE_TYPE(LOOKUP, "%5d si %s sym %s s->st_value = 0x%08x, "
465                   "found in %s, base = 0x%08x\n",
466                   pid, si->name, name, s->st_value, lsi->name, lsi->base);
467        *base = lsi->base;
468        return s;
469    }
470
471    return 0;
472}
473
474/* This is used by dl_sym().  It performs symbol lookup only within the
475   specified soinfo object and not in any of its dependencies.
476 */
477Elf32_Sym *lookup_in_library(soinfo *si, const char *name)
478{
479    unsigned unused = 0;
480    return _do_lookup_in_so(si, name, &unused);
481}
482
483/* This is used by dl_sym().  It performs a global symbol lookup.
484 */
485Elf32_Sym *lookup(const char *name, unsigned *base)
486{
487    unsigned elf_hash = 0;
488    Elf32_Sym *s = NULL;
489    soinfo *si;
490
491    for(si = solist; (s == NULL) && (si != NULL); si = si->next)
492    {
493        if(si->flags & FLAG_ERROR)
494            continue;
495        s = _do_lookup_in_so(si, name, &elf_hash);
496        if (s != NULL) {
497            *base = si->base;
498            break;
499        }
500    }
501
502    if(s != NULL) {
503        TRACE_TYPE(LOOKUP, "%5d %s s->st_value = 0x%08x, "
504                   "si->base = 0x%08x\n", pid, name, s->st_value, si->base);
505        return s;
506    }
507
508    return 0;
509}
510
511#if 0
512static void dump(soinfo *si)
513{
514    Elf32_Sym *s = si->symtab;
515    unsigned n;
516
517    for(n = 0; n < si->nchain; n++) {
518        TRACE("%5d %04d> %08x: %02x %04x %08x %08x %s\n", pid, n, s,
519               s->st_info, s->st_shndx, s->st_value, s->st_size,
520               si->strtab + s->st_name);
521        s++;
522    }
523}
524#endif
525
526static const char *sopaths[] = {
527    "/system/lib",
528    "/lib",
529    0
530};
531
532static int _open_lib(const char *name)
533{
534    int fd;
535    struct stat filestat;
536
537    if ((stat(name, &filestat) >= 0) && S_ISREG(filestat.st_mode)) {
538        if ((fd = open(name, O_RDONLY)) >= 0)
539            return fd;
540    }
541
542    return -1;
543}
544
545static int open_library(const char *name)
546{
547    int fd;
548    char buf[512];
549    const char **path;
550    int n;
551
552    TRACE("[ %5d opening %s ]\n", pid, name);
553
554    if(name == 0) return -1;
555    if(strlen(name) > 256) return -1;
556
557    if ((name[0] == '/') && ((fd = _open_lib(name)) >= 0))
558        return fd;
559
560    for (path = ldpaths; *path; path++) {
561        n = snprintf(buf, sizeof(buf), "%s/%s", *path, name);
562        if (n < 0 || n >= (int)sizeof(buf)) {
563            WARN("Ignoring very long library path: %s/%s\n", *path, name);
564            continue;
565        }
566        if ((fd = _open_lib(buf)) >= 0)
567            return fd;
568    }
569    for (path = sopaths; *path; path++) {
570        n = snprintf(buf, sizeof(buf), "%s/%s", *path, name);
571        if (n < 0 || n >= (int)sizeof(buf)) {
572            WARN("Ignoring very long library path: %s/%s\n", *path, name);
573            continue;
574        }
575        if ((fd = _open_lib(buf)) >= 0)
576            return fd;
577    }
578
579    return -1;
580}
581
582/* temporary space for holding the first page of the shared lib
583 * which contains the elf header (with the pht). */
584static unsigned char __header[PAGE_SIZE];
585
586typedef struct {
587    long mmap_addr;
588    char tag[4]; /* 'P', 'R', 'E', ' ' */
589} prelink_info_t;
590
591/* Returns the requested base address if the library is prelinked,
592 * and 0 otherwise.  */
593static unsigned long
594is_prelinked(int fd, const char *name)
595{
596    off_t sz;
597    prelink_info_t info;
598
599    sz = lseek(fd, -sizeof(prelink_info_t), SEEK_END);
600    if (sz < 0) {
601        DL_ERR("lseek() failed!");
602        return 0;
603    }
604
605    if (read(fd, &info, sizeof(info)) != sizeof(info)) {
606        WARN("Could not read prelink_info_t structure for `%s`\n", name);
607        return 0;
608    }
609
610    if (strncmp(info.tag, "PRE ", 4)) {
611        WARN("`%s` is not a prelinked library\n", name);
612        return 0;
613    }
614
615    return (unsigned long)info.mmap_addr;
616}
617
618/* verify_elf_object
619 *      Verifies if the object @ base is a valid ELF object
620 *
621 * Args:
622 *
623 * Returns:
624 *       0 on success
625 *      -1 if no valid ELF object is found @ base.
626 */
627static int
628verify_elf_object(void *base, const char *name)
629{
630    Elf32_Ehdr *hdr = (Elf32_Ehdr *) base;
631
632    if (hdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
633    if (hdr->e_ident[EI_MAG1] != ELFMAG1) return -1;
634    if (hdr->e_ident[EI_MAG2] != ELFMAG2) return -1;
635    if (hdr->e_ident[EI_MAG3] != ELFMAG3) return -1;
636
637    /* TODO: Should we verify anything else in the header? */
638
639    return 0;
640}
641
642
643/* get_lib_extents
644 *      Retrieves the base (*base) address where the ELF object should be
645 *      mapped and its overall memory size (*total_sz).
646 *
647 * Args:
648 *      fd: Opened file descriptor for the library
649 *      name: The name of the library
650 *      _hdr: Pointer to the header page of the library
651 *      total_sz: Total size of the memory that should be allocated for
652 *                this library
653 *
654 * Returns:
655 *      -1 if there was an error while trying to get the lib extents.
656 *         The possible reasons are:
657 *             - Could not determine if the library was prelinked.
658 *             - The library provided is not a valid ELF object
659 *       0 if the library did not request a specific base offset (normal
660 *         for non-prelinked libs)
661 *     > 0 if the library requests a specific address to be mapped to.
662 *         This indicates a pre-linked library.
663 */
664static unsigned
665get_lib_extents(int fd, const char *name, void *__hdr, unsigned *total_sz)
666{
667    unsigned req_base;
668    unsigned min_vaddr = 0xffffffff;
669    unsigned max_vaddr = 0;
670    unsigned char *_hdr = (unsigned char *)__hdr;
671    Elf32_Ehdr *ehdr = (Elf32_Ehdr *)_hdr;
672    Elf32_Phdr *phdr;
673    int cnt;
674
675    TRACE("[ %5d Computing extents for '%s'. ]\n", pid, name);
676    if (verify_elf_object(_hdr, name) < 0) {
677        DL_ERR("%5d - %s is not a valid ELF object", pid, name);
678        return (unsigned)-1;
679    }
680
681    req_base = (unsigned) is_prelinked(fd, name);
682    if (req_base == (unsigned)-1)
683        return -1;
684    else if (req_base != 0) {
685        TRACE("[ %5d - Prelinked library '%s' requesting base @ 0x%08x ]\n",
686              pid, name, req_base);
687    } else {
688        TRACE("[ %5d - Non-prelinked library '%s' found. ]\n", pid, name);
689    }
690
691    phdr = (Elf32_Phdr *)(_hdr + ehdr->e_phoff);
692
693    /* find the min/max p_vaddrs from all the PT_LOAD segments so we can
694     * get the range. */
695    for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
696        if (phdr->p_type == PT_LOAD) {
697            if ((phdr->p_vaddr + phdr->p_memsz) > max_vaddr)
698                max_vaddr = phdr->p_vaddr + phdr->p_memsz;
699            if (phdr->p_vaddr < min_vaddr)
700                min_vaddr = phdr->p_vaddr;
701        }
702    }
703
704    if ((min_vaddr == 0xffffffff) && (max_vaddr == 0)) {
705        DL_ERR("%5d - No loadable segments found in %s.", pid, name);
706        return (unsigned)-1;
707    }
708
709    /* truncate min_vaddr down to page boundary */
710    min_vaddr &= ~PAGE_MASK;
711
712    /* round max_vaddr up to the next page */
713    max_vaddr = (max_vaddr + PAGE_SIZE - 1) & ~PAGE_MASK;
714
715    *total_sz = (max_vaddr - min_vaddr);
716    return (unsigned)req_base;
717}
718
719/* alloc_mem_region
720 *
721 *     This function reserves a chunk of memory to be used for mapping in
722 *     the shared library. We reserve the entire memory region here, and
723 *     then the rest of the linker will relocate the individual loadable
724 *     segments into the correct locations within this memory range.
725 *
726 * Args:
727 *     si->base: The requested base of the allocation. If 0, a sane one will be
728 *               chosen in the range LIBBASE <= base < LIBLAST.
729 *     si->size: The size of the allocation.
730 *
731 * Returns:
732 *     -1 on failure, and 0 on success.  On success, si->base will contain
733 *     the virtual address at which the library will be mapped.
734 */
735
736static int reserve_mem_region(soinfo *si)
737{
738    void *base = mmap((void *)si->base, si->size, PROT_READ | PROT_EXEC,
739                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
740    if (base == MAP_FAILED) {
741        DL_ERR("%5d can NOT map (%sprelinked) library '%s' at 0x%08x "
742              "as requested, will try general pool: %d (%s)",
743              pid, (si->base ? "" : "non-"), si->name, si->base,
744              errno, strerror(errno));
745        return -1;
746    } else if (base != (void *)si->base) {
747        DL_ERR("OOPS: %5d %sprelinked library '%s' mapped at 0x%08x, "
748              "not at 0x%08x", pid, (si->base ? "" : "non-"),
749              si->name, (unsigned)base, si->base);
750        munmap(base, si->size);
751        return -1;
752    }
753    return 0;
754}
755
756static int
757alloc_mem_region(soinfo *si)
758{
759    if (si->base) {
760        /* Attempt to mmap a prelinked library. */
761        si->ba_index = -1;
762        return reserve_mem_region(si);
763    }
764
765    /* This is not a prelinked library, so we attempt to allocate space
766       for it from the buddy allocator, which manages the area between
767       LIBBASE and LIBLAST.
768    */
769    si->ba_index = ba_allocate(si->size);
770    if(si->ba_index >= 0) {
771        si->base = ba_start_addr(si->ba_index);
772        PRINT("%5d mapping library '%s' at %08x (index %d) " \
773              "through buddy allocator.\n",
774              pid, si->name, si->base, si->ba_index);
775        if (reserve_mem_region(si) < 0) {
776            ba_free(si->ba_index);
777            si->ba_index = -1;
778            si->base = 0;
779            goto err;
780        }
781        return 0;
782    }
783
784err:
785    DL_ERR("OOPS: %5d cannot map library '%s'. no vspace available.",
786          pid, si->name);
787    return -1;
788}
789
790#define MAYBE_MAP_FLAG(x,from,to)    (((x) & (from)) ? (to) : 0)
791#define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
792                                      MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
793                                      MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
794/* load_segments
795 *
796 *     This function loads all the loadable (PT_LOAD) segments into memory
797 *     at their appropriate memory offsets off the base address.
798 *
799 * Args:
800 *     fd: Open file descriptor to the library to load.
801 *     header: Pointer to a header page that contains the ELF header.
802 *             This is needed since we haven't mapped in the real file yet.
803 *     si: ptr to soinfo struct describing the shared object.
804 *
805 * Returns:
806 *     0 on success, -1 on failure.
807 */
808static int
809load_segments(int fd, void *header, soinfo *si)
810{
811    Elf32_Ehdr *ehdr = (Elf32_Ehdr *)header;
812    Elf32_Phdr *phdr = (Elf32_Phdr *)((unsigned char *)header + ehdr->e_phoff);
813    unsigned char *base = (unsigned char *)si->base;
814    int cnt;
815    unsigned len;
816    unsigned char *tmp;
817    unsigned char *pbase;
818    unsigned char *extra_base;
819    unsigned extra_len;
820    unsigned total_sz = 0;
821
822    si->wrprotect_start = 0xffffffff;
823    si->wrprotect_end = 0;
824
825    TRACE("[ %5d - Begin loading segments for '%s' @ 0x%08x ]\n",
826          pid, si->name, (unsigned)si->base);
827    /* Now go through all the PT_LOAD segments and map them into memory
828     * at the appropriate locations. */
829    for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
830        if (phdr->p_type == PT_LOAD) {
831            DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
832            /* we want to map in the segment on a page boundary */
833            tmp = base + (phdr->p_vaddr & (~PAGE_MASK));
834            /* add the # of bytes we masked off above to the total length. */
835            len = phdr->p_filesz + (phdr->p_vaddr & PAGE_MASK);
836
837            TRACE("[ %d - Trying to load segment from '%s' @ 0x%08x "
838                  "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x ]\n", pid, si->name,
839                  (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
840            pbase = mmap(tmp, len, PFLAGS_TO_PROT(phdr->p_flags),
841                         MAP_PRIVATE | MAP_FIXED, fd,
842                         phdr->p_offset & (~PAGE_MASK));
843            if (pbase == MAP_FAILED) {
844                DL_ERR("%d failed to map segment from '%s' @ 0x%08x (0x%08x). "
845                      "p_vaddr=0x%08x p_offset=0x%08x", pid, si->name,
846                      (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
847                goto fail;
848            }
849
850            /* If 'len' didn't end on page boundary, and it's a writable
851             * segment, zero-fill the rest. */
852            if ((len & PAGE_MASK) && (phdr->p_flags & PF_W))
853                memset((void *)(pbase + len), 0, PAGE_SIZE - (len & PAGE_MASK));
854
855            /* Check to see if we need to extend the map for this segment to
856             * cover the diff between filesz and memsz (i.e. for bss).
857             *
858             *  base           _+---------------------+  page boundary
859             *                  .                     .
860             *                  |                     |
861             *                  .                     .
862             *  pbase          _+---------------------+  page boundary
863             *                  |                     |
864             *                  .                     .
865             *  base + p_vaddr _|                     |
866             *                  . \          \        .
867             *                  . | filesz   |        .
868             *  pbase + len    _| /          |        |
869             *     <0 pad>      .            .        .
870             *  extra_base     _+------------|--------+  page boundary
871             *               /  .            .        .
872             *               |  .            .        .
873             *               |  +------------|--------+  page boundary
874             *  extra_len->  |  |            |        |
875             *               |  .            | memsz  .
876             *               |  .            |        .
877             *               \ _|            /        |
878             *                  .                     .
879             *                  |                     |
880             *                 _+---------------------+  page boundary
881             */
882            tmp = (unsigned char *)(((unsigned)pbase + len + PAGE_SIZE - 1) &
883                                    (~PAGE_MASK));
884            if (tmp < (base + phdr->p_vaddr + phdr->p_memsz)) {
885                extra_len = base + phdr->p_vaddr + phdr->p_memsz - tmp;
886                TRACE("[ %5d - Need to extend segment from '%s' @ 0x%08x "
887                      "(0x%08x) ]\n", pid, si->name, (unsigned)tmp, extra_len);
888                /* map in the extra page(s) as anonymous into the range.
889                 * This is probably not necessary as we already mapped in
890                 * the entire region previously, but we just want to be
891                 * sure. This will also set the right flags on the region
892                 * (though we can probably accomplish the same thing with
893                 * mprotect).
894                 */
895                extra_base = mmap((void *)tmp, extra_len,
896                                  PFLAGS_TO_PROT(phdr->p_flags),
897                                  MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
898                                  -1, 0);
899                if (extra_base == MAP_FAILED) {
900                    DL_ERR("[ %5d - failed to extend segment from '%s' @ 0x%08x"
901                           " (0x%08x) ]", pid, si->name, (unsigned)tmp,
902                          extra_len);
903                    goto fail;
904                }
905                /* TODO: Check if we need to memset-0 this region.
906                 * Anonymous mappings are zero-filled copy-on-writes, so we
907                 * shouldn't need to. */
908                TRACE("[ %5d - Segment from '%s' extended @ 0x%08x "
909                      "(0x%08x)\n", pid, si->name, (unsigned)extra_base,
910                      extra_len);
911            }
912            /* set the len here to show the full extent of the segment we
913             * just loaded, mostly for debugging */
914            len = (((unsigned)base + phdr->p_vaddr + phdr->p_memsz +
915                    PAGE_SIZE - 1) & (~PAGE_MASK)) - (unsigned)pbase;
916            TRACE("[ %5d - Successfully loaded segment from '%s' @ 0x%08x "
917                  "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x\n", pid, si->name,
918                  (unsigned)pbase, len, phdr->p_vaddr, phdr->p_offset);
919            total_sz += len;
920            /* Make the section writable just in case we'll have to write to
921             * it during relocation (i.e. text segment). However, we will
922             * remember what range of addresses should be write protected.
923             *
924             */
925            if (!(phdr->p_flags & PF_W)) {
926                if ((unsigned)pbase < si->wrprotect_start)
927                    si->wrprotect_start = (unsigned)pbase;
928                if (((unsigned)pbase + len) > si->wrprotect_end)
929                    si->wrprotect_end = (unsigned)pbase + len;
930                mprotect(pbase, len,
931                         PFLAGS_TO_PROT(phdr->p_flags) | PROT_WRITE);
932            }
933        } else if (phdr->p_type == PT_DYNAMIC) {
934            DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
935            /* this segment contains the dynamic linking information */
936            si->dynamic = (unsigned *)(base + phdr->p_vaddr);
937        } else {
938#ifdef ANDROID_ARM_LINKER
939            if (phdr->p_type == PT_ARM_EXIDX) {
940                DEBUG_DUMP_PHDR(phdr, "PT_ARM_EXIDX", pid);
941                /* exidx entries (used for stack unwinding) are 8 bytes each.
942                 */
943                si->ARM_exidx = (unsigned *)phdr->p_vaddr;
944                si->ARM_exidx_count = phdr->p_memsz / 8;
945            }
946#endif
947        }
948
949    }
950
951    /* Sanity check */
952    if (total_sz > si->size) {
953        DL_ERR("%5d - Total length (0x%08x) of mapped segments from '%s' is "
954              "greater than what was allocated (0x%08x). THIS IS BAD!",
955              pid, total_sz, si->name, si->size);
956        goto fail;
957    }
958
959    TRACE("[ %5d - Finish loading segments for '%s' @ 0x%08x. "
960          "Total memory footprint: 0x%08x bytes ]\n", pid, si->name,
961          (unsigned)si->base, si->size);
962    return 0;
963
964fail:
965    /* We can just blindly unmap the entire region even though some things
966     * were mapped in originally with anonymous and others could have been
967     * been mapped in from the file before we failed. The kernel will unmap
968     * all the pages in the range, irrespective of how they got there.
969     */
970    munmap((void *)si->base, si->size);
971    si->flags |= FLAG_ERROR;
972    return -1;
973}
974
975/* TODO: Implement this to take care of the fact that Android ARM
976 * ELF objects shove everything into a single loadable segment that has the
977 * write bit set. wr_offset is then used to set non-(data|bss) pages to be
978 * non-writable.
979 */
980#if 0
981static unsigned
982get_wr_offset(int fd, const char *name, Elf32_Ehdr *ehdr)
983{
984    Elf32_Shdr *shdr_start;
985    Elf32_Shdr *shdr;
986    int shdr_sz = ehdr->e_shnum * sizeof(Elf32_Shdr);
987    int cnt;
988    unsigned wr_offset = 0xffffffff;
989
990    shdr_start = mmap(0, shdr_sz, PROT_READ, MAP_PRIVATE, fd,
991                      ehdr->e_shoff & (~PAGE_MASK));
992    if (shdr_start == MAP_FAILED) {
993        WARN("%5d - Could not read section header info from '%s'. Will not "
994             "not be able to determine write-protect offset.\n", pid, name);
995        return (unsigned)-1;
996    }
997
998    for(cnt = 0, shdr = shdr_start; cnt < ehdr->e_shnum; ++cnt, ++shdr) {
999        if ((shdr->sh_type != SHT_NULL) && (shdr->sh_flags & SHF_WRITE) &&
1000            (shdr->sh_addr < wr_offset)) {
1001            wr_offset = shdr->sh_addr;
1002        }
1003    }
1004
1005    munmap(shdr_start, shdr_sz);
1006    return wr_offset;
1007}
1008#endif
1009
1010static soinfo *
1011load_library(const char *name)
1012{
1013    int fd = open_library(name);
1014    int cnt;
1015    unsigned ext_sz;
1016    unsigned req_base;
1017    const char *bname;
1018    soinfo *si = NULL;
1019    Elf32_Ehdr *hdr;
1020
1021    if(fd == -1) {
1022        DL_ERR("Library '%s' not found", name);
1023        return NULL;
1024    }
1025
1026    /* We have to read the ELF header to figure out what to do with this image
1027     */
1028    if (lseek(fd, 0, SEEK_SET) < 0) {
1029        DL_ERR("lseek() failed!");
1030        goto fail;
1031    }
1032
1033    if ((cnt = read(fd, &__header[0], PAGE_SIZE)) < 0) {
1034        DL_ERR("read() failed!");
1035        goto fail;
1036    }
1037
1038    /* Parse the ELF header and get the size of the memory footprint for
1039     * the library */
1040    req_base = get_lib_extents(fd, name, &__header[0], &ext_sz);
1041    if (req_base == (unsigned)-1)
1042        goto fail;
1043    TRACE("[ %5d - '%s' (%s) wants base=0x%08x sz=0x%08x ]\n", pid, name,
1044          (req_base ? "prelinked" : "not pre-linked"), req_base, ext_sz);
1045
1046    /* Now configure the soinfo struct where we'll store all of our data
1047     * for the ELF object. If the loading fails, we waste the entry, but
1048     * same thing would happen if we failed during linking. Configuring the
1049     * soinfo struct here is a lot more convenient.
1050     */
1051    bname = strrchr(name, '/');
1052    si = alloc_info(bname ? bname + 1 : name);
1053    if (si == NULL)
1054        goto fail;
1055
1056    /* Carve out a chunk of memory where we will map in the individual
1057     * segments */
1058    si->base = req_base;
1059    si->size = ext_sz;
1060    si->flags = 0;
1061    si->entry = 0;
1062    si->dynamic = (unsigned *)-1;
1063    if (alloc_mem_region(si) < 0)
1064        goto fail;
1065
1066    TRACE("[ %5d allocated memory for %s @ %p (0x%08x) ]\n",
1067          pid, name, (void *)si->base, (unsigned) ext_sz);
1068
1069    /* Now actually load the library's segments into right places in memory */
1070    if (load_segments(fd, &__header[0], si) < 0) {
1071        if (si->ba_index >= 0) {
1072            ba_free(si->ba_index);
1073            si->ba_index = -1;
1074        }
1075        goto fail;
1076    }
1077
1078    /* this might not be right. Technically, we don't even need this info
1079     * once we go through 'load_segments'. */
1080    hdr = (Elf32_Ehdr *)si->base;
1081    si->phdr = (Elf32_Phdr *)((unsigned char *)si->base + hdr->e_phoff);
1082    si->phnum = hdr->e_phnum;
1083    /**/
1084
1085    close(fd);
1086    return si;
1087
1088fail:
1089    if (si) free_info(si);
1090    close(fd);
1091    return NULL;
1092}
1093
1094static soinfo *
1095init_library(soinfo *si)
1096{
1097    unsigned wr_offset = 0xffffffff;
1098
1099    /* At this point we know that whatever is loaded @ base is a valid ELF
1100     * shared library whose segments are properly mapped in. */
1101    TRACE("[ %5d init_library base=0x%08x sz=0x%08x name='%s') ]\n",
1102          pid, si->base, si->size, si->name);
1103
1104    if (si->base < LIBBASE || si->base >= LIBLAST)
1105        si->flags |= FLAG_PRELINKED;
1106
1107    if(link_image(si, wr_offset)) {
1108            /* We failed to link.  However, we can only restore libbase
1109            ** if no additional libraries have moved it since we updated it.
1110            */
1111        munmap((void *)si->base, si->size);
1112        return NULL;
1113    }
1114
1115    return si;
1116}
1117
1118soinfo *find_library(const char *name)
1119{
1120    soinfo *si;
1121    const char *bname = strrchr(name, '/');
1122    bname = bname ? bname + 1 : name;
1123
1124    for(si = solist; si != 0; si = si->next){
1125        if(!strcmp(bname, si->name)) {
1126            if(si->flags & FLAG_ERROR) {
1127                DL_ERR("%5d '%s' failed to load previously", pid, bname);
1128                return NULL;
1129            }
1130            if(si->flags & FLAG_LINKED) return si;
1131            DL_ERR("OOPS: %5d recursive link to '%s'", pid, si->name);
1132            return NULL;
1133        }
1134    }
1135
1136    TRACE("[ %5d '%s' has not been loaded yet.  Locating...]\n", pid, name);
1137    si = load_library(name);
1138    if(si == NULL)
1139        return NULL;
1140    return init_library(si);
1141}
1142
1143/* TODO:
1144 *   notify gdb of unload
1145 *   for non-prelinked libraries, find a way to decrement libbase
1146 */
1147static void call_destructors(soinfo *si);
1148unsigned unload_library(soinfo *si)
1149{
1150    unsigned *d;
1151    if (si->refcount == 1) {
1152        TRACE("%5d unloading '%s'\n", pid, si->name);
1153        call_destructors(si);
1154
1155        for(d = si->dynamic; *d; d += 2) {
1156            if(d[0] == DT_NEEDED){
1157                soinfo *lsi = (soinfo *)d[1];
1158                d[1] = 0;
1159                if (validate_soinfo(lsi)) {
1160                    TRACE("%5d %s needs to unload %s\n", pid,
1161                          si->name, lsi->name);
1162                    unload_library(lsi);
1163                }
1164                else
1165                    DL_ERR("%5d %s: could not unload dependent library",
1166                           pid, si->name);
1167            }
1168        }
1169
1170        munmap((char *)si->base, si->size);
1171        if (si->ba_index >= 0) {
1172            PRINT("%5d releasing library '%s' address space at %08x "\
1173                  "through buddy allocator.\n",
1174                  pid, si->name, si->base);
1175            ba_free(si->ba_index);
1176        }
1177        notify_gdb_of_unload(si);
1178        free_info(si);
1179        si->refcount = 0;
1180    }
1181    else {
1182        si->refcount--;
1183        PRINT("%5d not unloading '%s', decrementing refcount to %d\n",
1184              pid, si->name, si->refcount);
1185    }
1186    return si->refcount;
1187}
1188
1189/* TODO: don't use unsigned for addrs below. It works, but is not
1190 * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
1191 * long.
1192 */
1193static int reloc_library(soinfo *si, Elf32_Rel *rel, unsigned count)
1194{
1195    Elf32_Sym *symtab = si->symtab;
1196    const char *strtab = si->strtab;
1197    Elf32_Sym *s;
1198    unsigned base;
1199    Elf32_Rel *start = rel;
1200    unsigned idx;
1201
1202    for (idx = 0; idx < count; ++idx) {
1203        unsigned type = ELF32_R_TYPE(rel->r_info);
1204        unsigned sym = ELF32_R_SYM(rel->r_info);
1205        unsigned reloc = (unsigned)(rel->r_offset + si->base);
1206        unsigned sym_addr = 0;
1207        char *sym_name = NULL;
1208
1209        DEBUG("%5d Processing '%s' relocation at index %d\n", pid,
1210              si->name, idx);
1211        if(sym != 0) {
1212            sym_name = (char *)(strtab + symtab[sym].st_name);
1213            s = _do_lookup(si, sym_name, &base);
1214            if(s == 0) {
1215                DL_ERR("%5d cannot locate '%s'...", pid, sym_name);
1216                return -1;
1217            }
1218#if 0
1219            if((base == 0) && (si->base != 0)){
1220                    /* linking from libraries to main image is bad */
1221                DL_ERR("%5d cannot locate '%s'...",
1222                       pid, strtab + symtab[sym].st_name);
1223                return -1;
1224            }
1225#endif
1226            if ((s->st_shndx == SHN_UNDEF) && (s->st_value != 0)) {
1227                DL_ERR("%5d In '%s', shndx=%d && value=0x%08x. We do not "
1228                      "handle this yet", pid, si->name, s->st_shndx,
1229                      s->st_value);
1230                return -1;
1231            }
1232            sym_addr = (unsigned)(s->st_value + base);
1233            COUNT_RELOC(RELOC_SYMBOL);
1234        } else {
1235            s = 0;
1236        }
1237
1238/* TODO: This is ugly. Split up the relocations by arch into
1239 * different files.
1240 */
1241        switch(type){
1242#if defined(ANDROID_ARM_LINKER)
1243        case R_ARM_JUMP_SLOT:
1244            COUNT_RELOC(RELOC_ABSOLUTE);
1245            MARK(rel->r_offset);
1246            TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1247                       reloc, sym_addr, sym_name);
1248            *((unsigned*)reloc) = sym_addr;
1249            break;
1250        case R_ARM_GLOB_DAT:
1251            COUNT_RELOC(RELOC_ABSOLUTE);
1252            MARK(rel->r_offset);
1253            TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1254                       reloc, sym_addr, sym_name);
1255            *((unsigned*)reloc) = sym_addr;
1256            break;
1257        case R_ARM_ABS32:
1258            COUNT_RELOC(RELOC_ABSOLUTE);
1259            MARK(rel->r_offset);
1260            TRACE_TYPE(RELO, "%5d RELO ABS %08x <- %08x %s\n", pid,
1261                       reloc, sym_addr, sym_name);
1262            *((unsigned*)reloc) += sym_addr;
1263            break;
1264#elif defined(ANDROID_X86_LINKER)
1265        case R_386_JUMP_SLOT:
1266            COUNT_RELOC(RELOC_ABSOLUTE);
1267            MARK(rel->r_offset);
1268            TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1269                       reloc, sym_addr, sym_name);
1270            *((unsigned*)reloc) = sym_addr;
1271            break;
1272        case R_386_GLOB_DAT:
1273            COUNT_RELOC(RELOC_ABSOLUTE);
1274            MARK(rel->r_offset);
1275            TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1276                       reloc, sym_addr, sym_name);
1277            *((unsigned*)reloc) = sym_addr;
1278            break;
1279#endif /* ANDROID_*_LINKER */
1280
1281#if defined(ANDROID_ARM_LINKER)
1282        case R_ARM_RELATIVE:
1283#elif defined(ANDROID_X86_LINKER)
1284        case R_386_RELATIVE:
1285#endif /* ANDROID_*_LINKER */
1286            COUNT_RELOC(RELOC_RELATIVE);
1287            MARK(rel->r_offset);
1288            if(sym){
1289                DL_ERR("%5d odd RELATIVE form...", pid);
1290                return -1;
1291            }
1292            TRACE_TYPE(RELO, "%5d RELO RELATIVE %08x <- +%08x\n", pid,
1293                       reloc, si->base);
1294            *((unsigned*)reloc) += si->base;
1295            break;
1296
1297#if defined(ANDROID_X86_LINKER)
1298        case R_386_32:
1299            COUNT_RELOC(RELOC_RELATIVE);
1300            MARK(rel->r_offset);
1301
1302            TRACE_TYPE(RELO, "%5d RELO R_386_32 %08x <- +%08x %s\n", pid,
1303                       reloc, sym_addr, sym_name);
1304            *((unsigned *)reloc) += (unsigned)sym_addr;
1305            break;
1306
1307        case R_386_PC32:
1308            COUNT_RELOC(RELOC_RELATIVE);
1309            MARK(rel->r_offset);
1310            TRACE_TYPE(RELO, "%5d RELO R_386_PC32 %08x <- "
1311                       "+%08x (%08x - %08x) %s\n", pid, reloc,
1312                       (sym_addr - reloc), sym_addr, reloc, sym_name);
1313            *((unsigned *)reloc) += (unsigned)(sym_addr - reloc);
1314            break;
1315#endif /* ANDROID_X86_LINKER */
1316
1317#ifdef ANDROID_ARM_LINKER
1318        case R_ARM_COPY:
1319            COUNT_RELOC(RELOC_COPY);
1320            MARK(rel->r_offset);
1321            TRACE_TYPE(RELO, "%5d RELO %08x <- %d @ %08x %s\n", pid,
1322                       reloc, s->st_size, sym_addr, sym_name);
1323            memcpy((void*)reloc, (void*)sym_addr, s->st_size);
1324            break;
1325        case R_ARM_NONE:
1326            break;
1327#endif /* ANDROID_ARM_LINKER */
1328
1329        default:
1330            DL_ERR("%5d unknown reloc type %d @ %p (%d)",
1331                  pid, type, rel, (int) (rel - start));
1332            return -1;
1333        }
1334        rel++;
1335    }
1336    return 0;
1337}
1338
1339
1340/* Please read the "Initialization and Termination functions" functions.
1341 * of the linker design note in bionic/linker/README.TXT to understand
1342 * what the following code is doing.
1343 *
1344 * The important things to remember are:
1345 *
1346 *   DT_PREINIT_ARRAY must be called first for executables, and should
1347 *   not appear in shared libraries.
1348 *
1349 *   DT_INIT should be called before DT_INIT_ARRAY if both are present
1350 *
1351 *   DT_FINI should be called after DT_FINI_ARRAY if both are present
1352 *
1353 *   DT_FINI_ARRAY must be parsed in reverse order.
1354 */
1355
1356static void call_array(unsigned *ctor, int count, int reverse)
1357{
1358    int n, inc = 1;
1359
1360    if (reverse) {
1361        ctor += (count-1);
1362        inc   = -1;
1363    }
1364
1365    for(n = count; n > 0; n--) {
1366        TRACE("[ %5d Looking at %s *0x%08x == 0x%08x ]\n", pid,
1367              reverse ? "dtor" : "ctor",
1368              (unsigned)ctor, (unsigned)*ctor);
1369        void (*func)() = (void (*)()) *ctor;
1370        ctor += inc;
1371        if(((int) func == 0) || ((int) func == -1)) continue;
1372        TRACE("[ %5d Calling func @ 0x%08x ]\n", pid, (unsigned)func);
1373        func();
1374    }
1375}
1376
1377static void call_constructors(soinfo *si)
1378{
1379    if (si->flags & FLAG_EXE) {
1380        TRACE("[ %5d Calling preinit_array @ 0x%08x [%d] for '%s' ]\n",
1381              pid, (unsigned)si->preinit_array, si->preinit_array_count,
1382              si->name);
1383        call_array(si->preinit_array, si->preinit_array_count, 0);
1384        TRACE("[ %5d Done calling preinit_array for '%s' ]\n", pid, si->name);
1385    } else {
1386        if (si->preinit_array) {
1387            DL_ERR("%5d Shared library '%s' has a preinit_array table @ 0x%08x."
1388                   " This is INVALID.", pid, si->name,
1389                   (unsigned)si->preinit_array);
1390        }
1391    }
1392
1393    if (si->init_func) {
1394        TRACE("[ %5d Calling init_func @ 0x%08x for '%s' ]\n", pid,
1395              (unsigned)si->init_func, si->name);
1396        si->init_func();
1397        TRACE("[ %5d Done calling init_func for '%s' ]\n", pid, si->name);
1398    }
1399
1400    if (si->init_array) {
1401        TRACE("[ %5d Calling init_array @ 0x%08x [%d] for '%s' ]\n", pid,
1402              (unsigned)si->init_array, si->init_array_count, si->name);
1403        call_array(si->init_array, si->init_array_count, 0);
1404        TRACE("[ %5d Done calling init_array for '%s' ]\n", pid, si->name);
1405    }
1406}
1407
1408
1409static void call_destructors(soinfo *si)
1410{
1411    if (si->fini_array) {
1412        TRACE("[ %5d Calling fini_array @ 0x%08x [%d] for '%s' ]\n", pid,
1413              (unsigned)si->fini_array, si->fini_array_count, si->name);
1414        call_array(si->fini_array, si->fini_array_count, 1);
1415        TRACE("[ %5d Done calling fini_array for '%s' ]\n", pid, si->name);
1416    }
1417
1418    if (si->fini_func) {
1419        TRACE("[ %5d Calling fini_func @ 0x%08x for '%s' ]\n", pid,
1420              (unsigned)si->fini_func, si->name);
1421        si->fini_func();
1422        TRACE("[ %5d Done calling fini_func for '%s' ]\n", pid, si->name);
1423    }
1424}
1425
1426/* Force any of the closed stdin, stdout and stderr to be associated with
1427   /dev/null. */
1428static int nullify_closed_stdio (void)
1429{
1430    int dev_null, i, status;
1431    int return_value = 0;
1432
1433    dev_null = open("/dev/null", O_RDWR);
1434    if (dev_null < 0) {
1435        DL_ERR("Cannot open /dev/null.");
1436        return -1;
1437    }
1438    TRACE("[ %5d Opened /dev/null file-descriptor=%d]\n", pid, dev_null);
1439
1440    /* If any of the stdio file descriptors is valid and not associated
1441       with /dev/null, dup /dev/null to it.  */
1442    for (i = 0; i < 3; i++) {
1443        /* If it is /dev/null already, we are done. */
1444        if (i == dev_null)
1445            continue;
1446
1447        TRACE("[ %5d Nullifying stdio file descriptor %d]\n", pid, i);
1448        /* The man page of fcntl does not say that fcntl(..,F_GETFL)
1449           can be interrupted but we do this just to be safe. */
1450        do {
1451          status = fcntl(i, F_GETFL);
1452        } while (status < 0 && errno == EINTR);
1453
1454        /* If file is openned, we are good. */
1455        if (status >= 0)
1456          continue;
1457
1458        /* The only error we allow is that the file descriptor does not
1459           exist, in which case we dup /dev/null to it. */
1460        if (errno != EBADF) {
1461            DL_ERR("nullify_stdio: unhandled error %s", strerror(errno));
1462            return_value = -1;
1463            continue;
1464        }
1465
1466        /* Try dupping /dev/null to this stdio file descriptor and
1467           repeat if there is a signal.  Note that any errors in closing
1468           the stdio descriptor are lost.  */
1469        do {
1470            status = dup2(dev_null, i);
1471        } while (status < 0 && errno == EINTR);
1472
1473        if (status < 0) {
1474            DL_ERR("nullify_stdio: dup2 error %s", strerror(errno));
1475            return_value = -1;
1476            continue;
1477        }
1478    }
1479
1480    /* If /dev/null is not one of the stdio file descriptors, close it. */
1481    if (dev_null > 2) {
1482        TRACE("[ %5d Closing /dev/null file-descriptor=%d]\n", pid, dev_null);
1483        do {
1484            status = close(dev_null);
1485        } while (status < 0 && errno == EINTR);
1486
1487        if (status < 0) {
1488            DL_ERR("nullify_stdio: close error %s", strerror(errno));
1489            return_value = -1;
1490        }
1491    }
1492
1493    return return_value;
1494}
1495
1496static int link_image(soinfo *si, unsigned wr_offset)
1497{
1498    unsigned *d;
1499    Elf32_Phdr *phdr = si->phdr;
1500    int phnum = si->phnum;
1501
1502    INFO("[ %5d linking %s ]\n", pid, si->name);
1503    DEBUG("%5d si->base = 0x%08x si->flags = 0x%08x\n", pid,
1504          si->base, si->flags);
1505
1506    if (si->flags & FLAG_EXE) {
1507        /* Locate the needed program segments (DYNAMIC/ARM_EXIDX) for
1508         * linkage info if this is the executable. If this was a
1509         * dynamic lib, that would have been done at load time.
1510         *
1511         * TODO: It's unfortunate that small pieces of this are
1512         * repeated from the load_library routine. Refactor this just
1513         * slightly to reuse these bits.
1514         */
1515        si->size = 0;
1516        for(; phnum > 0; --phnum, ++phdr) {
1517#ifdef ANDROID_ARM_LINKER
1518            if(phdr->p_type == PT_ARM_EXIDX) {
1519                /* exidx entries (used for stack unwinding) are 8 bytes each.
1520                 */
1521                si->ARM_exidx = (unsigned *)phdr->p_vaddr;
1522                si->ARM_exidx_count = phdr->p_memsz / 8;
1523            }
1524#endif
1525            if (phdr->p_type == PT_LOAD) {
1526                /* For the executable, we use the si->size field only in
1527                   dl_unwind_find_exidx(), so the meaning of si->size
1528                   is not the size of the executable; it is the last
1529                   virtual address of the loadable part of the executable;
1530                   since si->base == 0 for an executable, we use the
1531                   range [0, si->size) to determine whether a PC value
1532                   falls within the executable section.  Of course, if
1533                   a value is below phdr->p_vaddr, it's not in the
1534                   executable section, but a) we shouldn't be asking for
1535                   such a value anyway, and b) if we have to provide
1536                   an EXIDX for such a value, then the executable's
1537                   EXIDX is probably the better choice.
1538                */
1539                DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
1540                if (phdr->p_vaddr + phdr->p_memsz > si->size)
1541                    si->size = phdr->p_vaddr + phdr->p_memsz;
1542                /* try to remember what range of addresses should be write
1543                 * protected */
1544                if (!(phdr->p_flags & PF_W)) {
1545                    unsigned _end;
1546
1547                    if (phdr->p_vaddr < si->wrprotect_start)
1548                        si->wrprotect_start = phdr->p_vaddr;
1549                    _end = (((phdr->p_vaddr + phdr->p_memsz + PAGE_SIZE - 1) &
1550                             (~PAGE_MASK)));
1551                    if (_end > si->wrprotect_end)
1552                        si->wrprotect_end = _end;
1553                }
1554            } else if (phdr->p_type == PT_DYNAMIC) {
1555                if (si->dynamic != (unsigned *)-1) {
1556                    DL_ERR("%5d multiple PT_DYNAMIC segments found in '%s'. "
1557                          "Segment at 0x%08x, previously one found at 0x%08x",
1558                          pid, si->name, si->base + phdr->p_vaddr,
1559                          (unsigned)si->dynamic);
1560                    goto fail;
1561                }
1562                DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
1563                si->dynamic = (unsigned *) (si->base + phdr->p_vaddr);
1564            }
1565        }
1566    }
1567
1568    if (si->dynamic == (unsigned *)-1) {
1569        DL_ERR("%5d missing PT_DYNAMIC?!", pid);
1570        goto fail;
1571    }
1572
1573    DEBUG("%5d dynamic = %p\n", pid, si->dynamic);
1574
1575    /* extract useful information from dynamic section */
1576    for(d = si->dynamic; *d; d++){
1577        DEBUG("%5d d = %p, d[0] = 0x%08x d[1] = 0x%08x\n", pid, d, d[0], d[1]);
1578        switch(*d++){
1579        case DT_HASH:
1580            si->nbucket = ((unsigned *) (si->base + *d))[0];
1581            si->nchain = ((unsigned *) (si->base + *d))[1];
1582            si->bucket = (unsigned *) (si->base + *d + 8);
1583            si->chain = (unsigned *) (si->base + *d + 8 + si->nbucket * 4);
1584            break;
1585        case DT_STRTAB:
1586            si->strtab = (const char *) (si->base + *d);
1587            break;
1588        case DT_SYMTAB:
1589            si->symtab = (Elf32_Sym *) (si->base + *d);
1590            break;
1591        case DT_PLTREL:
1592            if(*d != DT_REL) {
1593                DL_ERR("DT_RELA not supported");
1594                goto fail;
1595            }
1596            break;
1597        case DT_JMPREL:
1598            si->plt_rel = (Elf32_Rel*) (si->base + *d);
1599            break;
1600        case DT_PLTRELSZ:
1601            si->plt_rel_count = *d / 8;
1602            break;
1603        case DT_REL:
1604            si->rel = (Elf32_Rel*) (si->base + *d);
1605            break;
1606        case DT_RELSZ:
1607            si->rel_count = *d / 8;
1608            break;
1609        case DT_PLTGOT:
1610            /* Save this in case we decide to do lazy binding. We don't yet. */
1611            si->plt_got = (unsigned *)(si->base + *d);
1612            break;
1613        case DT_DEBUG:
1614            // Set the DT_DEBUG entry to the addres of _r_debug for GDB
1615            *d = (int) &_r_debug;
1616            break;
1617        case DT_RELA:
1618            DL_ERR("%5d DT_RELA not supported", pid);
1619            goto fail;
1620        case DT_INIT:
1621            si->init_func = (void (*)(void))(si->base + *d);
1622            DEBUG("%5d %s constructors (init func) found at %p\n",
1623                  pid, si->name, si->init_func);
1624            break;
1625        case DT_FINI:
1626            si->fini_func = (void (*)(void))(si->base + *d);
1627            DEBUG("%5d %s destructors (fini func) found at %p\n",
1628                  pid, si->name, si->fini_func);
1629            break;
1630        case DT_INIT_ARRAY:
1631            si->init_array = (unsigned *)(si->base + *d);
1632            DEBUG("%5d %s constructors (init_array) found at %p\n",
1633                  pid, si->name, si->init_array);
1634            break;
1635        case DT_INIT_ARRAYSZ:
1636            si->init_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1637            break;
1638        case DT_FINI_ARRAY:
1639            si->fini_array = (unsigned *)(si->base + *d);
1640            DEBUG("%5d %s destructors (fini_array) found at %p\n",
1641                  pid, si->name, si->fini_array);
1642            break;
1643        case DT_FINI_ARRAYSZ:
1644            si->fini_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1645            break;
1646        case DT_PREINIT_ARRAY:
1647            si->preinit_array = (unsigned *)(si->base + *d);
1648            DEBUG("%5d %s constructors (preinit_array) found at %p\n",
1649                  pid, si->name, si->preinit_array);
1650            break;
1651        case DT_PREINIT_ARRAYSZ:
1652            si->preinit_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1653            break;
1654        case DT_TEXTREL:
1655            /* TODO: make use of this. */
1656            /* this means that we might have to write into where the text
1657             * segment was loaded during relocation... Do something with
1658             * it.
1659             */
1660            DEBUG("%5d Text segment should be writable during relocation.\n",
1661                  pid);
1662            break;
1663        }
1664    }
1665
1666    DEBUG("%5d si->base = 0x%08x, si->strtab = %p, si->symtab = %p\n",
1667           pid, si->base, si->strtab, si->symtab);
1668
1669    if((si->strtab == 0) || (si->symtab == 0)) {
1670        DL_ERR("%5d missing essential tables", pid);
1671        goto fail;
1672    }
1673
1674    for(d = si->dynamic; *d; d += 2) {
1675        if(d[0] == DT_NEEDED){
1676            DEBUG("%5d %s needs %s\n", pid, si->name, si->strtab + d[1]);
1677            soinfo *lsi = find_library(si->strtab + d[1]);
1678            if(lsi == 0) {
1679                strlcpy(tmp_err_buf, linker_get_error(), sizeof(tmp_err_buf));
1680                DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1681                       pid, si->strtab + d[1], si->name, tmp_err_buf);
1682                goto fail;
1683            }
1684            /* Save the soinfo of the loaded DT_NEEDED library in the payload
1685               of the DT_NEEDED entry itself, so that we can retrieve the
1686               soinfo directly later from the dynamic segment.  This is a hack,
1687               but it allows us to map from DT_NEEDED to soinfo efficiently
1688               later on when we resolve relocations, trying to look up a symgol
1689               with dlsym().
1690            */
1691            d[1] = (unsigned)lsi;
1692            lsi->refcount++;
1693        }
1694    }
1695
1696    if(si->plt_rel) {
1697        DEBUG("[ %5d relocating %s plt ]\n", pid, si->name );
1698        if(reloc_library(si, si->plt_rel, si->plt_rel_count))
1699            goto fail;
1700    }
1701    if(si->rel) {
1702        DEBUG("[ %5d relocating %s ]\n", pid, si->name );
1703        if(reloc_library(si, si->rel, si->rel_count))
1704            goto fail;
1705    }
1706
1707    si->flags |= FLAG_LINKED;
1708    DEBUG("[ %5d finished linking %s ]\n", pid, si->name);
1709
1710#if 0
1711    /* This is the way that the old dynamic linker did protection of
1712     * non-writable areas. It would scan section headers and find where
1713     * .text ended (rather where .data/.bss began) and assume that this is
1714     * the upper range of the non-writable area. This is too coarse,
1715     * and is kept here for reference until we fully move away from single
1716     * segment elf objects. See the code in get_wr_offset (also #if'd 0)
1717     * that made this possible.
1718     */
1719    if(wr_offset < 0xffffffff){
1720        mprotect((void*) si->base, wr_offset, PROT_READ | PROT_EXEC);
1721    }
1722#else
1723    /* TODO: Verify that this does the right thing in all cases, as it
1724     * presently probably does not. It is possible that an ELF image will
1725     * come with multiple read-only segments. What we ought to do is scan
1726     * the program headers again and mprotect all the read-only segments.
1727     * To prevent re-scanning the program header, we would have to build a
1728     * list of loadable segments in si, and then scan that instead. */
1729    if (si->wrprotect_start != 0xffffffff && si->wrprotect_end != 0) {
1730        mprotect((void *)si->wrprotect_start,
1731                 si->wrprotect_end - si->wrprotect_start,
1732                 PROT_READ | PROT_EXEC);
1733    }
1734#endif
1735
1736    /* If this is a SET?ID program, dup /dev/null to opened stdin,
1737       stdout and stderr to close a security hole described in:
1738
1739    ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
1740
1741     */
1742    if (getuid() != geteuid() || getgid() != getegid())
1743        nullify_closed_stdio ();
1744    call_constructors(si);
1745    notify_gdb_of_load(si);
1746    return 0;
1747
1748fail:
1749    ERROR("failed to link %s\n", si->name);
1750    si->flags |= FLAG_ERROR;
1751    return -1;
1752}
1753
1754static void parse_library_path(char *path, char *delim)
1755{
1756    size_t len;
1757    char *ldpaths_bufp = ldpaths_buf;
1758    int i = 0;
1759
1760    len = strlcpy(ldpaths_buf, path, sizeof(ldpaths_buf));
1761
1762    while (i < LDPATH_MAX && (ldpaths[i] = strsep(&ldpaths_bufp, delim))) {
1763        if (*ldpaths[i] != '\0')
1764            ++i;
1765    }
1766
1767    /* Forget the last path if we had to truncate; this occurs if the 2nd to
1768     * last char isn't '\0' (i.e. not originally a delim). */
1769    if (i > 0 && len >= sizeof(ldpaths_buf) &&
1770            ldpaths_buf[sizeof(ldpaths_buf) - 2] != '\0') {
1771        ldpaths[i - 1] = NULL;
1772    } else {
1773        ldpaths[i] = NULL;
1774    }
1775}
1776
1777int main(int argc, char **argv)
1778{
1779    return 0;
1780}
1781
1782#define ANDROID_TLS_SLOTS  BIONIC_TLS_SLOTS
1783
1784static void * __tls_area[ANDROID_TLS_SLOTS];
1785
1786unsigned __linker_init(unsigned **elfdata)
1787{
1788    static soinfo linker_soinfo;
1789
1790    int argc = (int) *elfdata;
1791    char **argv = (char**) (elfdata + 1);
1792    unsigned *vecs = (unsigned*) (argv + argc + 1);
1793    soinfo *si;
1794    struct link_map * map;
1795    char *ldpath_env = NULL;
1796
1797    /* Setup a temporary TLS area that is used to get a working
1798     * errno for system calls.
1799     */
1800    __set_tls(__tls_area);
1801
1802    pid = getpid();
1803
1804#if TIMING
1805    struct timeval t0, t1;
1806    gettimeofday(&t0, 0);
1807#endif
1808
1809    /* NOTE: we store the elfdata pointer on a special location
1810     *       of the temporary TLS area in order to pass it to
1811     *       the C Library's runtime initializer.
1812     *
1813     *       The initializer must clear the slot and reset the TLS
1814     *       to point to a different location to ensure that no other
1815     *       shared library constructor can access it.
1816     */
1817    __tls_area[TLS_SLOT_BIONIC_PREINIT] = elfdata;
1818
1819    debugger_init();
1820
1821        /* skip past the environment */
1822    while(vecs[0] != 0) {
1823        if(!strncmp((char*) vecs[0], "DEBUG=", 6)) {
1824            debug_verbosity = atoi(((char*) vecs[0]) + 6);
1825        } else if(!strncmp((char*) vecs[0], "LD_LIBRARY_PATH=", 16)) {
1826            ldpath_env = (char*) vecs[0] + 16;
1827        }
1828        vecs++;
1829    }
1830    vecs++;
1831
1832    INFO("[ android linker & debugger ]\n");
1833    DEBUG("%5d elfdata @ 0x%08x\n", pid, (unsigned)elfdata);
1834
1835    si = alloc_info(argv[0]);
1836    if(si == 0) {
1837        exit(-1);
1838    }
1839
1840        /* bootstrap the link map, the main exe always needs to be first */
1841    si->flags |= FLAG_EXE;
1842    map = &(si->linkmap);
1843
1844    map->l_addr = 0;
1845    map->l_name = argv[0];
1846    map->l_prev = NULL;
1847    map->l_next = NULL;
1848
1849    _r_debug.r_map = map;
1850    r_debug_tail = map;
1851
1852        /* gdb expects the linker to be in the debug shared object list,
1853         * and we need to make sure that the reported load address is zero.
1854         * Without this, gdb gets the wrong idea of where rtld_db_dlactivity()
1855         * is.  Don't use alloc_info(), because the linker shouldn't
1856         * be on the soinfo list.
1857         */
1858    strcpy((char*) linker_soinfo.name, "/system/bin/linker");
1859    linker_soinfo.flags = 0;
1860    linker_soinfo.base = 0;     // This is the important part; must be zero.
1861    insert_soinfo_into_debug_map(&linker_soinfo);
1862
1863        /* extract information passed from the kernel */
1864    while(vecs[0] != 0){
1865        switch(vecs[0]){
1866        case AT_PHDR:
1867            si->phdr = (Elf32_Phdr*) vecs[1];
1868            break;
1869        case AT_PHNUM:
1870            si->phnum = (int) vecs[1];
1871            break;
1872        case AT_ENTRY:
1873            si->entry = vecs[1];
1874            break;
1875        }
1876        vecs += 2;
1877    }
1878
1879    ba_init();
1880
1881    si->base = 0;
1882    si->dynamic = (unsigned *)-1;
1883    si->wrprotect_start = 0xffffffff;
1884    si->wrprotect_end = 0;
1885
1886        /* Use LD_LIBRARY_PATH if we aren't setuid/setgid */
1887    if (ldpath_env && getuid() == geteuid() && getgid() == getegid())
1888        parse_library_path(ldpath_env, ":");
1889
1890    if(link_image(si, 0)) {
1891        char errmsg[] = "CANNOT LINK EXECUTABLE\n";
1892        write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
1893        write(2, errmsg, sizeof(errmsg));
1894        exit(-1);
1895    }
1896
1897#if TIMING
1898    gettimeofday(&t1,NULL);
1899    PRINT("LINKER TIME: %s: %d microseconds\n", argv[0], (int) (
1900               (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
1901               (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)
1902               ));
1903#endif
1904#if STATS
1905    PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol\n", argv[0],
1906           linker_stats.reloc[RELOC_ABSOLUTE],
1907           linker_stats.reloc[RELOC_RELATIVE],
1908           linker_stats.reloc[RELOC_COPY],
1909           linker_stats.reloc[RELOC_SYMBOL]);
1910#endif
1911#if COUNT_PAGES
1912    {
1913        unsigned n;
1914        unsigned i;
1915        unsigned count = 0;
1916        for(n = 0; n < 4096; n++){
1917            if(bitmask[n]){
1918                unsigned x = bitmask[n];
1919                for(i = 0; i < 8; i++){
1920                    if(x & 1) count++;
1921                    x >>= 1;
1922                }
1923            }
1924        }
1925        PRINT("PAGES MODIFIED: %s: %d (%dKB)\n", argv[0], count, count * 4);
1926    }
1927#endif
1928
1929#if TIMING || STATS || COUNT_PAGES
1930    fflush(stdout);
1931#endif
1932
1933    TRACE("[ %5d Ready to execute '%s' @ 0x%08x ]\n", pid, si->name,
1934          si->entry);
1935    return si->entry;
1936}
1937