linker_phdr.cpp revision 650be4e584eeab3591b9e273bfd6d169eea60853
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "linker_phdr.h"
30
31#include <errno.h>
32#include <sys/mman.h>
33
34#include "linker.h"
35#include "linker_debug.h"
36
37/**
38  TECHNICAL NOTE ON ELF LOADING.
39
40  An ELF file's program header table contains one or more PT_LOAD
41  segments, which corresponds to portions of the file that need to
42  be mapped into the process' address space.
43
44  Each loadable segment has the following important properties:
45
46    p_offset  -> segment file offset
47    p_filesz  -> segment file size
48    p_memsz   -> segment memory size (always >= p_filesz)
49    p_vaddr   -> segment's virtual address
50    p_flags   -> segment flags (e.g. readable, writable, executable)
51
52  We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
53
54  The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
55  ranges of virtual addresses. A few rules apply:
56
57  - the virtual address ranges should not overlap.
58
59  - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
60    between them should always be initialized to 0.
61
62  - ranges do not necessarily start or end at page boundaries. Two distinct
63    segments can have their start and end on the same page. In this case, the
64    page inherits the mapping flags of the latter segment.
65
66  Finally, the real load addrs of each segment is not p_vaddr. Instead the
67  loader decides where to load the first segment, then will load all others
68  relative to the first one to respect the initial range layout.
69
70  For example, consider the following list:
71
72    [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
73    [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
74
75  This corresponds to two segments that cover these virtual address ranges:
76
77       0x30000...0x34000
78       0x40000...0x48000
79
80  If the loader decides to load the first segment at address 0xa0000000
81  then the segments' load address ranges will be:
82
83       0xa0030000...0xa0034000
84       0xa0040000...0xa0048000
85
86  In other words, all segments must be loaded at an address that has the same
87  constant offset from their p_vaddr value. This offset is computed as the
88  difference between the first segment's load address, and its p_vaddr value.
89
90  However, in practice, segments do _not_ start at page boundaries. Since we
91  can only memory-map at page boundaries, this means that the bias is
92  computed as:
93
94       load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
95
96  (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
97          possible wrap around UINT32_MAX for possible large p_vaddr values).
98
99  And that the phdr0_load_address must start at a page boundary, with
100  the segment's real content starting at:
101
102       phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
103
104  Note that ELF requires the following condition to make the mmap()-ing work:
105
106      PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
107
108  The load_bias must be added to any p_vaddr value read from the ELF file to
109  determine the corresponding memory address.
110
111 **/
112
113#define MAYBE_MAP_FLAG(x,from,to)    (((x) & (from)) ? (to) : 0)
114#define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
115                                      MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
116                                      MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
117
118ElfReader::ElfReader(const char* name, int fd)
119    : name_(name), fd_(fd),
120      phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0),
121      load_start_(NULL), load_size_(0), load_bias_(0),
122      loaded_phdr_(NULL) {
123}
124
125ElfReader::~ElfReader() {
126  if (fd_ != -1) {
127    close(fd_);
128  }
129  if (phdr_mmap_ != NULL) {
130    munmap(phdr_mmap_, phdr_size_);
131  }
132}
133
134bool ElfReader::Load() {
135  return ReadElfHeader() &&
136         VerifyElfHeader() &&
137         ReadProgramHeader() &&
138         ReserveAddressSpace() &&
139         LoadSegments() &&
140         FindPhdr();
141}
142
143bool ElfReader::ReadElfHeader() {
144  ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
145  if (rc < 0) {
146    DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
147    return false;
148  }
149  if (rc != sizeof(header_)) {
150    DL_ERR("\"%s\" is too small to be an ELF executable", name_);
151    return false;
152  }
153  return true;
154}
155
156bool ElfReader::VerifyElfHeader() {
157  if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
158      header_.e_ident[EI_MAG1] != ELFMAG1 ||
159      header_.e_ident[EI_MAG2] != ELFMAG2 ||
160      header_.e_ident[EI_MAG3] != ELFMAG3) {
161    DL_ERR("\"%s\" has bad ELF magic", name_);
162    return false;
163  }
164
165  if (header_.e_ident[EI_CLASS] != ELFCLASS32) {
166    DL_ERR("\"%s\" not 32-bit: %d", name_, header_.e_ident[EI_CLASS]);
167    return false;
168  }
169  if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
170    DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
171    return false;
172  }
173
174  if (header_.e_type != ET_DYN) {
175    DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
176    return false;
177  }
178
179  if (header_.e_version != EV_CURRENT) {
180    DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
181    return false;
182  }
183
184  if (header_.e_machine !=
185#ifdef ANDROID_ARM_LINKER
186      EM_ARM
187#elif defined(ANDROID_MIPS_LINKER)
188      EM_MIPS
189#elif defined(ANDROID_X86_LINKER)
190      EM_386
191#endif
192  ) {
193    DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
194    return false;
195  }
196
197  return true;
198}
199
200// Loads the program header table from an ELF file into a read-only private
201// anonymous mmap-ed block.
202bool ElfReader::ReadProgramHeader() {
203  phdr_num_ = header_.e_phnum;
204
205  // Like the kernel, we only accept program header tables that
206  // are smaller than 64KiB.
207  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf32_Phdr)) {
208    DL_ERR("\"%s\" has invalid e_phnum: %d", name_, phdr_num_);
209    return false;
210  }
211
212  Elf32_Addr page_min = PAGE_START(header_.e_phoff);
213  Elf32_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf32_Phdr)));
214  Elf32_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
215
216  phdr_size_ = page_max - page_min;
217
218  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
219  if (mmap_result == MAP_FAILED) {
220    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
221    return false;
222  }
223
224  phdr_mmap_ = mmap_result;
225  phdr_table_ = reinterpret_cast<Elf32_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
226  return true;
227}
228
229/* Compute the extent of all loadable segments in an ELF program header
230 * table. This corresponds to the page-aligned size in bytes that needs to be
231 * reserved in the process' address space
232 *
233 * This returns 0 if there are no loadable segments.
234 */
235Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
236                                    size_t phdr_count)
237{
238    Elf32_Addr min_vaddr = 0xFFFFFFFFU;
239    Elf32_Addr max_vaddr = 0x00000000U;
240
241    for (size_t i = 0; i < phdr_count; ++i) {
242        const Elf32_Phdr* phdr = &phdr_table[i];
243
244        if (phdr->p_type != PT_LOAD) {
245            continue;
246        }
247
248        if (phdr->p_vaddr < min_vaddr) {
249            min_vaddr = phdr->p_vaddr;
250        }
251
252        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
253            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
254        }
255    }
256
257    if (min_vaddr > max_vaddr) {
258        return 0;
259    }
260
261    min_vaddr = PAGE_START(min_vaddr);
262    max_vaddr = PAGE_END(max_vaddr);
263
264    return max_vaddr - min_vaddr;
265}
266
267// Reserve a virtual address range big enough to hold all loadable
268// segments of a program header table. This is done by creating a
269// private anonymous mmap() with PROT_NONE.
270bool ElfReader::ReserveAddressSpace() {
271  load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_);
272  if (load_size_ == 0) {
273    DL_ERR("\"%s\" has no loadable segments", name_);
274    return false;
275  }
276
277  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
278  void* start = mmap(NULL, load_size_, PROT_NONE, mmap_flags, -1, 0);
279  if (start == MAP_FAILED) {
280    DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_);
281    return false;
282  }
283
284  load_start_ = start;
285  load_bias_ = 0;
286
287  for (size_t i = 0; i < phdr_num_; ++i) {
288    const Elf32_Phdr* phdr = &phdr_table_[i];
289    if (phdr->p_type == PT_LOAD) {
290      load_bias_ = reinterpret_cast<Elf32_Addr>(start) - PAGE_START(phdr->p_vaddr);
291      break;
292    }
293  }
294  return true;
295}
296
297// Map all loadable segments in process' address space.
298// This assumes you already called phdr_table_reserve_memory to
299// reserve the address space range for the library.
300// TODO: assert assumption.
301bool ElfReader::LoadSegments() {
302  for (size_t i = 0; i < phdr_num_; ++i) {
303    const Elf32_Phdr* phdr = &phdr_table_[i];
304
305    if (phdr->p_type != PT_LOAD) {
306      continue;
307    }
308
309    // Segment addresses in memory.
310    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
311    Elf32_Addr seg_end   = seg_start + phdr->p_memsz;
312
313    Elf32_Addr seg_page_start = PAGE_START(seg_start);
314    Elf32_Addr seg_page_end   = PAGE_END(seg_end);
315
316    Elf32_Addr seg_file_end   = seg_start + phdr->p_filesz;
317
318    // File offsets.
319    Elf32_Addr file_start = phdr->p_offset;
320    Elf32_Addr file_end   = file_start + phdr->p_filesz;
321
322    Elf32_Addr file_page_start = PAGE_START(file_start);
323
324    void* seg_addr = mmap((void*)seg_page_start,
325                          file_end - file_page_start,
326                          PFLAGS_TO_PROT(phdr->p_flags),
327                          MAP_FIXED|MAP_PRIVATE,
328                          fd_,
329                          file_page_start);
330    if (seg_addr == MAP_FAILED) {
331      DL_ERR("couldn't map \"%s\" segment %d: %s", name_, i, strerror(errno));
332      return false;
333    }
334
335    // if the segment is writable, and does not end on a page boundary,
336    // zero-fill it until the page limit.
337    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
338      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
339    }
340
341    seg_file_end = PAGE_END(seg_file_end);
342
343    // seg_file_end is now the first page address after the file
344    // content. If seg_end is larger, we need to zero anything
345    // between them. This is done by using a private anonymous
346    // map for all extra pages.
347    if (seg_page_end > seg_file_end) {
348      void* zeromap = mmap((void*)seg_file_end,
349                           seg_page_end - seg_file_end,
350                           PFLAGS_TO_PROT(phdr->p_flags),
351                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
352                           -1,
353                           0);
354      if (zeromap == MAP_FAILED) {
355        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
356        return false;
357      }
358    }
359  }
360  return true;
361}
362
363/* Used internally. Used to set the protection bits of all loaded segments
364 * with optional extra flags (i.e. really PROT_WRITE). Used by
365 * phdr_table_protect_segments and phdr_table_unprotect_segments.
366 */
367static int
368_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
369                          int               phdr_count,
370                          Elf32_Addr        load_bias,
371                          int               extra_prot_flags)
372{
373    const Elf32_Phdr* phdr = phdr_table;
374    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
375
376    for (; phdr < phdr_limit; phdr++) {
377        if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
378            continue;
379
380        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
381        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
382
383        int ret = mprotect((void*)seg_page_start,
384                           seg_page_end - seg_page_start,
385                           PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
386        if (ret < 0) {
387            return -1;
388        }
389    }
390    return 0;
391}
392
393/* Restore the original protection modes for all loadable segments.
394 * You should only call this after phdr_table_unprotect_segments and
395 * applying all relocations.
396 *
397 * Input:
398 *   phdr_table  -> program header table
399 *   phdr_count  -> number of entries in tables
400 *   load_bias   -> load bias
401 * Return:
402 *   0 on error, -1 on failure (error code in errno).
403 */
404int
405phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
406                            int               phdr_count,
407                            Elf32_Addr        load_bias)
408{
409    return _phdr_table_set_load_prot(phdr_table, phdr_count,
410                                      load_bias, 0);
411}
412
413/* Change the protection of all loaded segments in memory to writable.
414 * This is useful before performing relocations. Once completed, you
415 * will have to call phdr_table_protect_segments to restore the original
416 * protection flags on all segments.
417 *
418 * Note that some writable segments can also have their content turned
419 * to read-only by calling phdr_table_protect_gnu_relro. This is no
420 * performed here.
421 *
422 * Input:
423 *   phdr_table  -> program header table
424 *   phdr_count  -> number of entries in tables
425 *   load_bias   -> load bias
426 * Return:
427 *   0 on error, -1 on failure (error code in errno).
428 */
429int
430phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
431                              int               phdr_count,
432                              Elf32_Addr        load_bias)
433{
434    return _phdr_table_set_load_prot(phdr_table, phdr_count,
435                                      load_bias, PROT_WRITE);
436}
437
438/* Used internally by phdr_table_protect_gnu_relro and
439 * phdr_table_unprotect_gnu_relro.
440 */
441static int
442_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
443                               int               phdr_count,
444                               Elf32_Addr        load_bias,
445                               int               prot_flags)
446{
447    const Elf32_Phdr* phdr = phdr_table;
448    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
449
450    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
451        if (phdr->p_type != PT_GNU_RELRO)
452            continue;
453
454        /* Tricky: what happens when the relro segment does not start
455         * or end at page boundaries?. We're going to be over-protective
456         * here and put every page touched by the segment as read-only.
457         *
458         * This seems to match Ian Lance Taylor's description of the
459         * feature at http://www.airs.com/blog/archives/189.
460         *
461         * Extract:
462         *    Note that the current dynamic linker code will only work
463         *    correctly if the PT_GNU_RELRO segment starts on a page
464         *    boundary. This is because the dynamic linker rounds the
465         *    p_vaddr field down to the previous page boundary. If
466         *    there is anything on the page which should not be read-only,
467         *    the program is likely to fail at runtime. So in effect the
468         *    linker must only emit a PT_GNU_RELRO segment if it ensures
469         *    that it starts on a page boundary.
470         */
471        Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
472        Elf32_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
473
474        int ret = mprotect((void*)seg_page_start,
475                           seg_page_end - seg_page_start,
476                           prot_flags);
477        if (ret < 0) {
478            return -1;
479        }
480    }
481    return 0;
482}
483
484/* Apply GNU relro protection if specified by the program header. This will
485 * turn some of the pages of a writable PT_LOAD segment to read-only, as
486 * specified by one or more PT_GNU_RELRO segments. This must be always
487 * performed after relocations.
488 *
489 * The areas typically covered are .got and .data.rel.ro, these are
490 * read-only from the program's POV, but contain absolute addresses
491 * that need to be relocated before use.
492 *
493 * Input:
494 *   phdr_table  -> program header table
495 *   phdr_count  -> number of entries in tables
496 *   load_bias   -> load bias
497 * Return:
498 *   0 on error, -1 on failure (error code in errno).
499 */
500int
501phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
502                             int               phdr_count,
503                             Elf32_Addr        load_bias)
504{
505    return _phdr_table_set_gnu_relro_prot(phdr_table,
506                                          phdr_count,
507                                          load_bias,
508                                          PROT_READ);
509}
510
511#ifdef ANDROID_ARM_LINKER
512
513#  ifndef PT_ARM_EXIDX
514#    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
515#  endif
516
517/* Return the address and size of the .ARM.exidx section in memory,
518 * if present.
519 *
520 * Input:
521 *   phdr_table  -> program header table
522 *   phdr_count  -> number of entries in tables
523 *   load_bias   -> load bias
524 * Output:
525 *   arm_exidx       -> address of table in memory (NULL on failure).
526 *   arm_exidx_count -> number of items in table (0 on failure).
527 * Return:
528 *   0 on error, -1 on failure (_no_ error code in errno)
529 */
530int
531phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
532                         int               phdr_count,
533                         Elf32_Addr        load_bias,
534                         Elf32_Addr**      arm_exidx,
535                         unsigned*         arm_exidx_count)
536{
537    const Elf32_Phdr* phdr = phdr_table;
538    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
539
540    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
541        if (phdr->p_type != PT_ARM_EXIDX)
542            continue;
543
544        *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
545        *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
546        return 0;
547    }
548    *arm_exidx = NULL;
549    *arm_exidx_count = 0;
550    return -1;
551}
552#endif /* ANDROID_ARM_LINKER */
553
554/* Return the address and size of the ELF file's .dynamic section in memory,
555 * or NULL if missing.
556 *
557 * Input:
558 *   phdr_table  -> program header table
559 *   phdr_count  -> number of entries in tables
560 *   load_bias   -> load bias
561 * Output:
562 *   dynamic       -> address of table in memory (NULL on failure).
563 *   dynamic_count -> number of items in table (0 on failure).
564 *   dynamic_flags -> protection flags for section (unset on failure)
565 * Return:
566 *   void
567 */
568void
569phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
570                               int               phdr_count,
571                               Elf32_Addr        load_bias,
572                               Elf32_Dyn**       dynamic,
573                               size_t*           dynamic_count,
574                               Elf32_Word*       dynamic_flags)
575{
576    const Elf32_Phdr* phdr = phdr_table;
577    const Elf32_Phdr* phdr_limit = phdr + phdr_count;
578
579    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
580        if (phdr->p_type != PT_DYNAMIC) {
581            continue;
582        }
583
584        *dynamic = reinterpret_cast<Elf32_Dyn*>(load_bias + phdr->p_vaddr);
585        if (dynamic_count) {
586            *dynamic_count = (unsigned)(phdr->p_memsz / 8);
587        }
588        if (dynamic_flags) {
589            *dynamic_flags = phdr->p_flags;
590        }
591        return;
592    }
593    *dynamic = NULL;
594    if (dynamic_count) {
595        *dynamic_count = 0;
596    }
597}
598
599// Returns the address of the program header table as it appears in the loaded
600// segments in memory. This is in contrast with 'phdr_table_' which
601// is temporary and will be released before the library is relocated.
602bool ElfReader::FindPhdr() {
603  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
604
605  // If there is a PT_PHDR, use it directly.
606  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
607    if (phdr->p_type == PT_PHDR) {
608      return CheckPhdr(load_bias_ + phdr->p_vaddr);
609    }
610  }
611
612  // Otherwise, check the first loadable segment. If its file offset
613  // is 0, it starts with the ELF header, and we can trivially find the
614  // loaded program header from it.
615  for (const Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
616    if (phdr->p_type == PT_LOAD) {
617      if (phdr->p_offset == 0) {
618        Elf32_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
619        const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
620        Elf32_Addr  offset = ehdr->e_phoff;
621        return CheckPhdr((Elf32_Addr)ehdr + offset);
622      }
623      break;
624    }
625  }
626
627  DL_ERR("can't find loaded phdr for \"%s\"", name_);
628  return false;
629}
630
631// Ensures that our program header is actually within a loadable
632// segment. This should help catch badly-formed ELF files that
633// would cause the linker to crash later when trying to access it.
634bool ElfReader::CheckPhdr(Elf32_Addr loaded) {
635  const Elf32_Phdr* phdr_limit = phdr_table_ + phdr_num_;
636  Elf32_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf32_Phdr));
637  for (Elf32_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
638    if (phdr->p_type != PT_LOAD) {
639      continue;
640    }
641    Elf32_Addr seg_start = phdr->p_vaddr + load_bias_;
642    Elf32_Addr seg_end = phdr->p_filesz + seg_start;
643    if (seg_start <= loaded && loaded_end <= seg_end) {
644      loaded_phdr_ = reinterpret_cast<const Elf32_Phdr*>(loaded);
645      return true;
646    }
647  }
648  DL_ERR("\"%s\" loaded phdr %x not in loadable segment", name_, loaded);
649  return false;
650}
651