linker_phdr.cpp revision 82dcc7910d9c25c4fdf635d6132fa86ae3677363
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "linker_phdr.h"
30
31#include <errno.h>
32#include <sys/mman.h>
33
34#include "linker.h"
35#include "linker_debug.h"
36
37/**
38  TECHNICAL NOTE ON ELF LOADING.
39
40  An ELF file's program header table contains one or more PT_LOAD
41  segments, which corresponds to portions of the file that need to
42  be mapped into the process' address space.
43
44  Each loadable segment has the following important properties:
45
46    p_offset  -> segment file offset
47    p_filesz  -> segment file size
48    p_memsz   -> segment memory size (always >= p_filesz)
49    p_vaddr   -> segment's virtual address
50    p_flags   -> segment flags (e.g. readable, writable, executable)
51
52  We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
53
54  The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
55  ranges of virtual addresses. A few rules apply:
56
57  - the virtual address ranges should not overlap.
58
59  - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
60    between them should always be initialized to 0.
61
62  - ranges do not necessarily start or end at page boundaries. Two distinct
63    segments can have their start and end on the same page. In this case, the
64    page inherits the mapping flags of the latter segment.
65
66  Finally, the real load addrs of each segment is not p_vaddr. Instead the
67  loader decides where to load the first segment, then will load all others
68  relative to the first one to respect the initial range layout.
69
70  For example, consider the following list:
71
72    [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
73    [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
74
75  This corresponds to two segments that cover these virtual address ranges:
76
77       0x30000...0x34000
78       0x40000...0x48000
79
80  If the loader decides to load the first segment at address 0xa0000000
81  then the segments' load address ranges will be:
82
83       0xa0030000...0xa0034000
84       0xa0040000...0xa0048000
85
86  In other words, all segments must be loaded at an address that has the same
87  constant offset from their p_vaddr value. This offset is computed as the
88  difference between the first segment's load address, and its p_vaddr value.
89
90  However, in practice, segments do _not_ start at page boundaries. Since we
91  can only memory-map at page boundaries, this means that the bias is
92  computed as:
93
94       load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
95
96  (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
97          possible wrap around UINT32_MAX for possible large p_vaddr values).
98
99  And that the phdr0_load_address must start at a page boundary, with
100  the segment's real content starting at:
101
102       phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
103
104  Note that ELF requires the following condition to make the mmap()-ing work:
105
106      PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
107
108  The load_bias must be added to any p_vaddr value read from the ELF file to
109  determine the corresponding memory address.
110
111 **/
112
113#define MAYBE_MAP_FLAG(x,from,to)    (((x) & (from)) ? (to) : 0)
114#define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
115                                      MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
116                                      MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
117
118ElfReader::ElfReader(const char* name, int fd)
119    : name_(name), fd_(fd),
120      phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0),
121      load_start_(NULL), load_size_(0), load_bias_(0),
122      loaded_phdr_(NULL) {
123}
124
125ElfReader::~ElfReader() {
126  if (fd_ != -1) {
127    close(fd_);
128  }
129  if (phdr_mmap_ != NULL) {
130    munmap(phdr_mmap_, phdr_size_);
131  }
132}
133
134bool ElfReader::Load() {
135  return ReadElfHeader() &&
136         VerifyElfHeader() &&
137         ReadProgramHeader() &&
138         ReserveAddressSpace() &&
139         LoadSegments() &&
140         FindPhdr();
141}
142
143bool ElfReader::ReadElfHeader() {
144  ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
145  if (rc < 0) {
146    DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
147    return false;
148  }
149  if (rc != sizeof(header_)) {
150    DL_ERR("\"%s\" is too small to be an ELF executable", name_);
151    return false;
152  }
153  return true;
154}
155
156bool ElfReader::VerifyElfHeader() {
157  if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
158      header_.e_ident[EI_MAG1] != ELFMAG1 ||
159      header_.e_ident[EI_MAG2] != ELFMAG2 ||
160      header_.e_ident[EI_MAG3] != ELFMAG3) {
161    DL_ERR("\"%s\" has bad ELF magic", name_);
162    return false;
163  }
164
165  if (header_.e_ident[EI_CLASS] != ELFCLASS32) {
166    DL_ERR("\"%s\" not 32-bit: %d", name_, header_.e_ident[EI_CLASS]);
167    return false;
168  }
169  if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
170    DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
171    return false;
172  }
173
174  if (header_.e_type != ET_DYN) {
175    DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
176    return false;
177  }
178
179  if (header_.e_version != EV_CURRENT) {
180    DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
181    return false;
182  }
183
184  if (header_.e_machine !=
185#ifdef ANDROID_ARM_LINKER
186      EM_ARM
187#elif defined(ANDROID_MIPS_LINKER)
188      EM_MIPS
189#elif defined(ANDROID_X86_LINKER)
190      EM_386
191#endif
192  ) {
193    DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
194    return false;
195  }
196
197  return true;
198}
199
200// Loads the program header table from an ELF file into a read-only private
201// anonymous mmap-ed block.
202bool ElfReader::ReadProgramHeader() {
203  phdr_num_ = header_.e_phnum;
204
205  // Like the kernel, we only accept program header tables that
206  // are smaller than 64KiB.
207  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
208    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
209    return false;
210  }
211
212  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
213  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
214  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
215
216  phdr_size_ = page_max - page_min;
217
218  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
219  if (mmap_result == MAP_FAILED) {
220    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
221    return false;
222  }
223
224  phdr_mmap_ = mmap_result;
225  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
226  return true;
227}
228
229/* Compute the extent of all loadable segments in an ELF program header
230 * table. This corresponds to the page-aligned size in bytes that needs to be
231 * reserved in the process' address space
232 *
233 * This returns 0 if there are no loadable segments.
234 */
235Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
236                                    size_t phdr_count)
237{
238    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
239    Elf32_Addr max_vaddr = 0x00000000U;
240
241    for (size_t i = 0; i < phdr_count; ++i) {
242        const Elf32_Phdr* phdr = &phdr_table[i];
243
244        if (phdr->p_type != PT_LOAD) {
245            continue;
246        }
247
248        if (phdr->p_vaddr < min_vaddr) {
249            min_vaddr = phdr->p_vaddr;
250        }
251
252        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
253            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
254        }
255    }
256
257    if (min_vaddr > max_vaddr) {
258        return 0;
259    }
260
261    min_vaddr = PAGE_START(min_vaddr);
262    max_vaddr = PAGE_END(max_vaddr);
263
264    return max_vaddr - min_vaddr;
265}
266
267// Reserve a virtual address range big enough to hold all loadable
268// segments of a program header table. This is done by creating a
269// private anonymous mmap() with PROT_NONE.
270bool ElfReader::ReserveAddressSpace() {
271  load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_);
272  if (load_size_ == 0) {
273    DL_ERR("\"%s\" has no loadable segments", name_);
274    return false;
275  }
276
277  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
278  void* start = mmap(NULL, load_size_, PROT_NONE, mmap_flags, -1, 0);
279  if (start == MAP_FAILED) {
280    DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_);
281    return false;
282  }
283
284  load_start_ = start;
285  load_bias_ = 0;
286
287  for (size_t i = 0; i < phdr_num_; ++i) {
288    const Elf32_Phdr* phdr = &phdr_table_[i];
289    if (phdr->p_type == PT_LOAD) {
290      load_bias_ = reinterpret_cast<Elf32_Addr>(start) - PAGE_START(phdr->p_vaddr);
291      break;
292    }
293  }
294  return true;
295}
296
297// Map all loadable segments in process' address space.
298// This assumes you already called phdr_table_reserve_memory to
299// reserve the address space range for the library.
300// TODO: assert assumption.
301bool ElfReader::LoadSegments() {
302  for (size_t i = 0; i < phdr_num_; ++i) {
303    const Elf32_Phdr* phdr = &phdr_table_[i];
304
305    if (phdr->p_type != PT_LOAD) {
306      continue;
307    }
308
309    // Segment addresses in memory.
310    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
311    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;
312
313    Elf32_Addr seg_page_start = PAGE_START(seg_start);
314    Elf32_Addr seg_page_end   = PAGE_END(seg_end);
315
316    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;
317
318    // File offsets.
319    Elf32_Addr file_start = phdr->p_offset;
320    Elf32_Addr file_end   = file_start + phdr->p_filesz;
321
322    Elf32_Addr file_page_start = PAGE_START(file_start);
323    Elf32_Addr file_length = file_end - file_page_start;
324
325    if (file_length != 0) {
326      void* seg_addr = mmap((void*)seg_page_start,
327                            file_length,
328                            PFLAGS_TO_PROT(phdr->p_flags),
329                            MAP_FIXED|MAP_PRIVATE,
330                            fd_,
331                            file_page_start);
332      if (seg_addr == MAP_FAILED) {
333        DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
334        return false;
335      }
336    }
337
338    // if the segment is writable, and does not end on a page boundary,
339    // zero-fill it until the page limit.
340    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
341      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
342    }
343
344    seg_file_end = PAGE_END(seg_file_end);
345
346    // seg_file_end is now the first page address after the file
347    // content. If seg_end is larger, we need to zero anything
348    // between them. This is done by using a private anonymous
349    // map for all extra pages.
350    if (seg_page_end > seg_file_end) {
351      void* zeromap = mmap((void*)seg_file_end,
352                           seg_page_end - seg_file_end,
353                           PFLAGS_TO_PROT(phdr->p_flags),
354                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
355                           -1,
356                           0);
357      if (zeromap == MAP_FAILED) {
358        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
359        return false;
360      }
361    }
362  }
363  return true;
364}
365
366/* Used internally. Used to set the protection bits of all loaded segments
367 * with optional extra flags (i.e. really PROT_WRITE). Used by
368 * phdr_table_protect_segments and phdr_table_unprotect_segments.
369 */
370static int
371_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
372                          int               phdr_count,
373                          Elf32_Addr        load_bias,
374                          int               extra_prot_flags)
375{
376    const Elf32_Phdr* phdr = phdr_table;
377    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
378
379    for (; phdr < phdr_limit; phdr++) {
380        if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
381            continue;
382
383        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
384        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
385
386        int ret = mprotect((void*)seg_page_start,
387                           seg_page_end - seg_page_start,
388                           PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
389        if (ret < 0) {
390            return -1;
391        }
392    }
393    return 0;
394}
395
396/* Restore the original protection modes for all loadable segments.
397 * You should only call this after phdr_table_unprotect_segments and
398 * applying all relocations.
399 *
400 * Input:
401 *   phdr_table  -> program header table
402 *   phdr_count  -> number of entries in tables
403 *   load_bias   -> load bias
404 * Return:
405 *   0 on error, -1 on failure (error code in errno).
406 */
407int
408phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
409                            int               phdr_count,
410                            Elf32_Addr        load_bias)
411{
412    return _phdr_table_set_load_prot(phdr_table, phdr_count,
413                                      load_bias, 0);
414}
415
416/* Change the protection of all loaded segments in memory to writable.
417 * This is useful before performing relocations. Once completed, you
418 * will have to call phdr_table_protect_segments to restore the original
419 * protection flags on all segments.
420 *
421 * Note that some writable segments can also have their content turned
422 * to read-only by calling phdr_table_protect_gnu_relro. This is no
423 * performed here.
424 *
425 * Input:
426 *   phdr_table  -> program header table
427 *   phdr_count  -> number of entries in tables
428 *   load_bias   -> load bias
429 * Return:
430 *   0 on error, -1 on failure (error code in errno).
431 */
432int
433phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
434                              int               phdr_count,
435                              Elf32_Addr        load_bias)
436{
437    return _phdr_table_set_load_prot(phdr_table, phdr_count,
438                                      load_bias, PROT_WRITE);
439}
440
441/* Used internally by phdr_table_protect_gnu_relro and
442 * phdr_table_unprotect_gnu_relro.
443 */
444static int
445_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
446                               int               phdr_count,
447                               Elf32_Addr        load_bias,
448                               int               prot_flags)
449{
450    const Elf32_Phdr* phdr = phdr_table;
451    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
452
453    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
454        if (phdr->p_type != PT_GNU_RELRO)
455            continue;
456
457        /* Tricky: what happens when the relro segment does not start
458         * or end at page boundaries?. We're going to be over-protective
459         * here and put every page touched by the segment as read-only.
460         *
461         * This seems to match Ian Lance Taylor's description of the
462         * feature at http://www.airs.com/blog/archives/189.
463         *
464         * Extract:
465         *    Note that the current dynamic linker code will only work
466         *    correctly if the PT_GNU_RELRO segment starts on a page
467         *    boundary. This is because the dynamic linker rounds the
468         *    p_vaddr field down to the previous page boundary. If
469         *    there is anything on the page which should not be read-only,
470         *    the program is likely to fail at runtime. So in effect the
471         *    linker must only emit a PT_GNU_RELRO segment if it ensures
472         *    that it starts on a page boundary.
473         */
474        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
475        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
476
477        int ret = mprotect((void*)seg_page_start,
478                           seg_page_end - seg_page_start,
479                           prot_flags);
480        if (ret < 0) {
481            return -1;
482        }
483    }
484    return 0;
485}
486
487/* Apply GNU relro protection if specified by the program header. This will
488 * turn some of the pages of a writable PT_LOAD segment to read-only, as
489 * specified by one or more PT_GNU_RELRO segments. This must be always
490 * performed after relocations.
491 *
492 * The areas typically covered are .got and .data.rel.ro, these are
493 * read-only from the program's POV, but contain absolute addresses
494 * that need to be relocated before use.
495 *
496 * Input:
497 *   phdr_table  -> program header table
498 *   phdr_count  -> number of entries in tables
499 *   load_bias   -> load bias
500 * Return:
501 *   0 on error, -1 on failure (error code in errno).
502 */
503int
504phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
505                             int               phdr_count,
506                             Elf32_Addr        load_bias)
507{
508    return _phdr_table_set_gnu_relro_prot(phdr_table,
509                                          phdr_count,
510                                          load_bias,
511                                          PROT_READ);
512}
513
514#ifdef ANDROID_ARM_LINKER
515
516#  ifndef PT_ARM_EXIDX
517#    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
518#  endif
519
520/* Return the address and size of the .ARM.exidx section in memory,
521 * if present.
522 *
523 * Input:
524 *   phdr_table  -> program header table
525 *   phdr_count  -> number of entries in tables
526 *   load_bias   -> load bias
527 * Output:
528 *   arm_exidx       -> address of table in memory (NULL on failure).
529 *   arm_exidx_count -> number of items in table (0 on failure).
530 * Return:
531 *   0 on error, -1 on failure (_no_ error code in errno)
532 */
533int
534phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
535                         int               phdr_count,
536                         Elf32_Addr        load_bias,
537                         Elf32_Addr**      arm_exidx,
538                         unsigned*         arm_exidx_count)
539{
540    const Elf32_Phdr* phdr = phdr_table;
541    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
542
543    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
544        if (phdr->p_type != PT_ARM_EXIDX)
545            continue;
546
547        *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
548        *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
549        return 0;
550    }
551    *arm_exidx = NULL;
552    *arm_exidx_count = 0;
553    return -1;
554}
555#endif /* ANDROID_ARM_LINKER */
556
557/* Return the address and size of the ELF file's .dynamic section in memory,
558 * or NULL if missing.
559 *
560 * Input:
561 *   phdr_table  -> program header table
562 *   phdr_count  -> number of entries in tables
563 *   load_bias   -> load bias
564 * Output:
565 *   dynamic       -> address of table in memory (NULL on failure).
566 *   dynamic_count -> number of items in table (0 on failure).
567 *   dynamic_flags -> protection flags for section (unset on failure)
568 * Return:
569 *   void
570 */
571void
572phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
573                               int               phdr_count,
574                               Elf32_Addr        load_bias,
575                               Elf32_Dyn**       dynamic,
576                               size_t*           dynamic_count,
577                               Elf32_Word*       dynamic_flags)
578{
579    const Elf32_Phdr* phdr = phdr_table;
580    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
581
582    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
583        if (phdr->p_type != PT_DYNAMIC) {
584            continue;
585        }
586
587        *dynamic = reinterpret_cast<Elf32_Dyn*>(load_bias + phdr->p_vaddr);
588        if (dynamic_count) {
589            *dynamic_count = (unsigned)(phdr->p_memsz / 8);
590        }
591        if (dynamic_flags) {
592            *dynamic_flags = phdr->p_flags;
593        }
594        return;
595    }
596    *dynamic = NULL;
597    if (dynamic_count) {
598        *dynamic_count = 0;
599    }
600}
601
602// Returns the address of the program header table as it appears in the loaded
603// segments in memory. This is in contrast with 'phdr_table_' which
604// is temporary and will be released before the library is relocated.
605bool ElfReader::FindPhdr() {
606  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
607
608  // If there is a PT_PHDR, use it directly.
609  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
610    if (phdr->p_type == PT_PHDR) {
611      return CheckPhdr(load_bias_ + phdr->p_vaddr);
612    }
613  }
614
615  // Otherwise, check the first loadable segment. If its file offset
616  // is 0, it starts with the ELF header, and we can trivially find the
617  // loaded program header from it.
618  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
619    if (phdr->p_type == PT_LOAD) {
620      if (phdr->p_offset == 0) {
621        Elf32_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
622        const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
623        Elf32_Addr  offset = ehdr->e_phoff;
624        return CheckPhdr((Elf32_Addr)ehdr + offset);
625      }
626      break;
627    }
628  }
629
630  DL_ERR("can't find loaded phdr for \"%s\"", name_);
631  return false;
632}
633
634// Ensures that our program header is actually within a loadable
635// segment. This should help catch badly-formed ELF files that
636// would cause the linker to crash later when trying to access it.
637bool ElfReader::CheckPhdr(Elf32_Addr loaded) {
638  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
639  Elf32_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf32_Phdr));
640  for (Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
641    if (phdr->p_type != PT_LOAD) {
642      continue;
643    }
644    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
645    Elf32_Addr seg_end = phdr->p_filesz + seg_start;
646    if (seg_start <= loaded && loaded_end <= seg_end) {
647      loaded_phdr_ = reinterpret_cast<const Elf32_Phdr*>(loaded);
648      return true;
649    }
650  }
651  DL_ERR("\"%s\" loaded phdr %x not in loadable segment", name_, loaded);
652  return false;
653}
654