linker_phdr.cpp revision c620059479c47a78d57086d73726c9adc2f337ad
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "linker_phdr.h"
30
31#include <errno.h>
32#include <sys/mman.h>
33
34#include "linker.h"
35#include "linker_debug.h"
36
37/**
38  TECHNICAL NOTE ON ELF LOADING.
39
40  An ELF file's program header table contains one or more PT_LOAD
41  segments, which corresponds to portions of the file that need to
42  be mapped into the process' address space.
43
44  Each loadable segment has the following important properties:
45
46    p_offset  -> segment file offset
47    p_filesz  -> segment file size
48    p_memsz   -> segment memory size (always >= p_filesz)
49    p_vaddr   -> segment's virtual address
50    p_flags   -> segment flags (e.g. readable, writable, executable)
51
52  We will ignore the p_paddr and p_align fields of Elf_Phdr for now.
53
54  The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
55  ranges of virtual addresses. A few rules apply:
56
57  - the virtual address ranges should not overlap.
58
59  - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
60    between them should always be initialized to 0.
61
62  - ranges do not necessarily start or end at page boundaries. Two distinct
63    segments can have their start and end on the same page. In this case, the
64    page inherits the mapping flags of the latter segment.
65
66  Finally, the real load addrs of each segment is not p_vaddr. Instead the
67  loader decides where to load the first segment, then will load all others
68  relative to the first one to respect the initial range layout.
69
70  For example, consider the following list:
71
72    [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
73    [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
74
75  This corresponds to two segments that cover these virtual address ranges:
76
77       0x30000...0x34000
78       0x40000...0x48000
79
80  If the loader decides to load the first segment at address 0xa0000000
81  then the segments' load address ranges will be:
82
83       0xa0030000...0xa0034000
84       0xa0040000...0xa0048000
85
86  In other words, all segments must be loaded at an address that has the same
87  constant offset from their p_vaddr value. This offset is computed as the
88  difference between the first segment's load address, and its p_vaddr value.
89
90  However, in practice, segments do _not_ start at page boundaries. Since we
91  can only memory-map at page boundaries, this means that the bias is
92  computed as:
93
94       load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
95
96  (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
97          possible wrap around UINT32_MAX for possible large p_vaddr values).
98
99  And that the phdr0_load_address must start at a page boundary, with
100  the segment's real content starting at:
101
102       phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
103
104  Note that ELF requires the following condition to make the mmap()-ing work:
105
106      PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
107
108  The load_bias must be added to any p_vaddr value read from the ELF file to
109  determine the corresponding memory address.
110
111 **/
112
113#define MAYBE_MAP_FLAG(x,from,to)    (((x) & (from)) ? (to) : 0)
114#define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
115                                      MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
116                                      MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
117
118ElfReader::ElfReader(const char* name, int fd)
119    : name_(name), fd_(fd),
120      phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0),
121      load_start_(NULL), load_size_(0), load_bias_(0),
122      loaded_phdr_(NULL) {
123}
124
125ElfReader::~ElfReader() {
126  if (fd_ != -1) {
127    close(fd_);
128  }
129  if (phdr_mmap_ != NULL) {
130    munmap(phdr_mmap_, phdr_size_);
131  }
132}
133
134bool ElfReader::Load() {
135  return ReadElfHeader() &&
136         VerifyElfHeader() &&
137         ReadProgramHeader() &&
138         ReserveAddressSpace() &&
139         LoadSegments() &&
140         FindPhdr();
141}
142
143bool ElfReader::ReadElfHeader() {
144  ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
145  if (rc < 0) {
146    DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
147    return false;
148  }
149  if (rc != sizeof(header_)) {
150    DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
151           static_cast<size_t>(rc));
152    return false;
153  }
154  return true;
155}
156
157bool ElfReader::VerifyElfHeader() {
158  if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
159      header_.e_ident[EI_MAG1] != ELFMAG1 ||
160      header_.e_ident[EI_MAG2] != ELFMAG2 ||
161      header_.e_ident[EI_MAG3] != ELFMAG3) {
162    DL_ERR("\"%s\" has bad ELF magic", name_);
163    return false;
164  }
165
166  if (header_.e_ident[EI_CLASS] != ELFCLASS32) {
167    DL_ERR("\"%s\" not 32-bit: %d", name_, header_.e_ident[EI_CLASS]);
168    return false;
169  }
170  if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
171    DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
172    return false;
173  }
174
175  if (header_.e_type != ET_DYN) {
176    DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
177    return false;
178  }
179
180  if (header_.e_version != EV_CURRENT) {
181    DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
182    return false;
183  }
184
185  if (header_.e_machine !=
186#ifdef ANDROID_ARM_LINKER
187      EM_ARM
188#elif defined(ANDROID_MIPS_LINKER)
189      EM_MIPS
190#elif defined(ANDROID_X86_LINKER)
191      EM_386
192#endif
193  ) {
194    DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
195    return false;
196  }
197
198  return true;
199}
200
201// Loads the program header table from an ELF file into a read-only private
202// anonymous mmap-ed block.
203bool ElfReader::ReadProgramHeader() {
204  phdr_num_ = header_.e_phnum;
205
206  // Like the kernel, we only accept program header tables that
207  // are smaller than 64KiB.
208  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf_Phdr)) {
209    DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
210    return false;
211  }
212
213  Elf_Addr page_min = PAGE_START(header_.e_phoff);
214  Elf_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf_Phdr)));
215  Elf_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
216
217  phdr_size_ = page_max - page_min;
218
219  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
220  if (mmap_result == MAP_FAILED) {
221    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
222    return false;
223  }
224
225  phdr_mmap_ = mmap_result;
226  phdr_table_ = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
227  return true;
228}
229
230/* Returns the size of the extent of all the possibly non-contiguous
231 * loadable segments in an ELF program header table. This corresponds
232 * to the page-aligned size in bytes that needs to be reserved in the
233 * process' address space. If there are no loadable segments, 0 is
234 * returned.
235 *
236 * If out_min_vaddr or out_max_vaddr are non-NULL, they will be
237 * set to the minimum and maximum addresses of pages to be reserved,
238 * or 0 if there is nothing to load.
239 */
240size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count,
241                                Elf_Addr* out_min_vaddr,
242                                Elf_Addr* out_max_vaddr) {
243    Elf_Addr min_vaddr = 0xFFFFFFFFU;
244    Elf_Addr max_vaddr = 0x00000000U;
245
246    bool found_pt_load = false;
247    for (size_t i = 0; i < phdr_count; ++i) {
248        const Elf_Phdr* phdr = &phdr_table[i];
249
250        if (phdr->p_type != PT_LOAD) {
251            continue;
252        }
253        found_pt_load = true;
254
255        if (phdr->p_vaddr < min_vaddr) {
256            min_vaddr = phdr->p_vaddr;
257        }
258
259        if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
260            max_vaddr = phdr->p_vaddr + phdr->p_memsz;
261        }
262    }
263    if (!found_pt_load) {
264        min_vaddr = 0x00000000U;
265    }
266
267    min_vaddr = PAGE_START(min_vaddr);
268    max_vaddr = PAGE_END(max_vaddr);
269
270    if (out_min_vaddr != NULL) {
271        *out_min_vaddr = min_vaddr;
272    }
273    if (out_max_vaddr != NULL) {
274        *out_max_vaddr = max_vaddr;
275    }
276    return max_vaddr - min_vaddr;
277}
278
279// Reserve a virtual address range big enough to hold all loadable
280// segments of a program header table. This is done by creating a
281// private anonymous mmap() with PROT_NONE.
282bool ElfReader::ReserveAddressSpace() {
283  Elf_Addr min_vaddr;
284  load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
285  if (load_size_ == 0) {
286    DL_ERR("\"%s\" has no loadable segments", name_);
287    return false;
288  }
289
290  uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
291  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
292  void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
293  if (start == MAP_FAILED) {
294    DL_ERR("couldn't reserve %d bytes of address space for \"%s\"", load_size_, name_);
295    return false;
296  }
297
298  load_start_ = start;
299  load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
300  return true;
301}
302
303// Map all loadable segments in process' address space.
304// This assumes you already called phdr_table_reserve_memory to
305// reserve the address space range for the library.
306// TODO: assert assumption.
307bool ElfReader::LoadSegments() {
308  for (size_t i = 0; i < phdr_num_; ++i) {
309    const Elf_Phdr* phdr = &phdr_table_[i];
310
311    if (phdr->p_type != PT_LOAD) {
312      continue;
313    }
314
315    // Segment addresses in memory.
316    Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
317    Elf_Addr seg_end   = seg_start + phdr->p_memsz;
318
319    Elf_Addr seg_page_start = PAGE_START(seg_start);
320    Elf_Addr seg_page_end   = PAGE_END(seg_end);
321
322    Elf_Addr seg_file_end   = seg_start + phdr->p_filesz;
323
324    // File offsets.
325    Elf_Addr file_start = phdr->p_offset;
326    Elf_Addr file_end   = file_start + phdr->p_filesz;
327
328    Elf_Addr file_page_start = PAGE_START(file_start);
329    Elf_Addr file_length = file_end - file_page_start;
330
331    if (file_length != 0) {
332      void* seg_addr = mmap((void*)seg_page_start,
333                            file_length,
334                            PFLAGS_TO_PROT(phdr->p_flags),
335                            MAP_FIXED|MAP_PRIVATE,
336                            fd_,
337                            file_page_start);
338      if (seg_addr == MAP_FAILED) {
339        DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
340        return false;
341      }
342    }
343
344    // if the segment is writable, and does not end on a page boundary,
345    // zero-fill it until the page limit.
346    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
347      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
348    }
349
350    seg_file_end = PAGE_END(seg_file_end);
351
352    // seg_file_end is now the first page address after the file
353    // content. If seg_end is larger, we need to zero anything
354    // between them. This is done by using a private anonymous
355    // map for all extra pages.
356    if (seg_page_end > seg_file_end) {
357      void* zeromap = mmap((void*)seg_file_end,
358                           seg_page_end - seg_file_end,
359                           PFLAGS_TO_PROT(phdr->p_flags),
360                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
361                           -1,
362                           0);
363      if (zeromap == MAP_FAILED) {
364        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
365        return false;
366      }
367    }
368  }
369  return true;
370}
371
372/* Used internally. Used to set the protection bits of all loaded segments
373 * with optional extra flags (i.e. really PROT_WRITE). Used by
374 * phdr_table_protect_segments and phdr_table_unprotect_segments.
375 */
376static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
377                                     Elf_Addr load_bias, int extra_prot_flags) {
378    const Elf_Phdr* phdr = phdr_table;
379    const Elf_Phdr* phdr_limit = phdr + phdr_count;
380
381    for (; phdr < phdr_limit; phdr++) {
382        if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
383            continue;
384
385        Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
386        Elf_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
387
388        int ret = mprotect((void*)seg_page_start,
389                           seg_page_end - seg_page_start,
390                           PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
391        if (ret < 0) {
392            return -1;
393        }
394    }
395    return 0;
396}
397
398/* Restore the original protection modes for all loadable segments.
399 * You should only call this after phdr_table_unprotect_segments and
400 * applying all relocations.
401 *
402 * Input:
403 *   phdr_table  -> program header table
404 *   phdr_count  -> number of entries in tables
405 *   load_bias   -> load bias
406 * Return:
407 *   0 on error, -1 on failure (error code in errno).
408 */
409int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
410    return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
411}
412
413/* Change the protection of all loaded segments in memory to writable.
414 * This is useful before performing relocations. Once completed, you
415 * will have to call phdr_table_protect_segments to restore the original
416 * protection flags on all segments.
417 *
418 * Note that some writable segments can also have their content turned
419 * to read-only by calling phdr_table_protect_gnu_relro. This is no
420 * performed here.
421 *
422 * Input:
423 *   phdr_table  -> program header table
424 *   phdr_count  -> number of entries in tables
425 *   load_bias   -> load bias
426 * Return:
427 *   0 on error, -1 on failure (error code in errno).
428 */
429int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
430    return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
431}
432
433/* Used internally by phdr_table_protect_gnu_relro and
434 * phdr_table_unprotect_gnu_relro.
435 */
436static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
437                                          Elf_Addr load_bias, int prot_flags) {
438    const Elf_Phdr* phdr = phdr_table;
439    const Elf_Phdr* phdr_limit = phdr + phdr_count;
440
441    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
442        if (phdr->p_type != PT_GNU_RELRO)
443            continue;
444
445        /* Tricky: what happens when the relro segment does not start
446         * or end at page boundaries?. We're going to be over-protective
447         * here and put every page touched by the segment as read-only.
448         *
449         * This seems to match Ian Lance Taylor's description of the
450         * feature at http://www.airs.com/blog/archives/189.
451         *
452         * Extract:
453         *    Note that the current dynamic linker code will only work
454         *    correctly if the PT_GNU_RELRO segment starts on a page
455         *    boundary. This is because the dynamic linker rounds the
456         *    p_vaddr field down to the previous page boundary. If
457         *    there is anything on the page which should not be read-only,
458         *    the program is likely to fail at runtime. So in effect the
459         *    linker must only emit a PT_GNU_RELRO segment if it ensures
460         *    that it starts on a page boundary.
461         */
462        Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
463        Elf_Addr seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
464
465        int ret = mprotect((void*)seg_page_start,
466                           seg_page_end - seg_page_start,
467                           prot_flags);
468        if (ret < 0) {
469            return -1;
470        }
471    }
472    return 0;
473}
474
475/* Apply GNU relro protection if specified by the program header. This will
476 * turn some of the pages of a writable PT_LOAD segment to read-only, as
477 * specified by one or more PT_GNU_RELRO segments. This must be always
478 * performed after relocations.
479 *
480 * The areas typically covered are .got and .data.rel.ro, these are
481 * read-only from the program's POV, but contain absolute addresses
482 * that need to be relocated before use.
483 *
484 * Input:
485 *   phdr_table  -> program header table
486 *   phdr_count  -> number of entries in tables
487 *   load_bias   -> load bias
488 * Return:
489 *   0 on error, -1 on failure (error code in errno).
490 */
491int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
492    return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
493}
494
495#ifdef ANDROID_ARM_LINKER
496
497#  ifndef PT_ARM_EXIDX
498#    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
499#  endif
500
501/* Return the address and size of the .ARM.exidx section in memory,
502 * if present.
503 *
504 * Input:
505 *   phdr_table  -> program header table
506 *   phdr_count  -> number of entries in tables
507 *   load_bias   -> load bias
508 * Output:
509 *   arm_exidx       -> address of table in memory (NULL on failure).
510 *   arm_exidx_count -> number of items in table (0 on failure).
511 * Return:
512 *   0 on error, -1 on failure (_no_ error code in errno)
513 */
514int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count,
515                             Elf_Addr load_bias,
516                             Elf_Addr** arm_exidx, unsigned* arm_exidx_count) {
517    const Elf_Phdr* phdr = phdr_table;
518    const Elf_Phdr* phdr_limit = phdr + phdr_count;
519
520    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
521        if (phdr->p_type != PT_ARM_EXIDX)
522            continue;
523
524        *arm_exidx = (Elf_Addr*)(load_bias + phdr->p_vaddr);
525        *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
526        return 0;
527    }
528    *arm_exidx = NULL;
529    *arm_exidx_count = 0;
530    return -1;
531}
532#endif /* ANDROID_ARM_LINKER */
533
534/* Return the address and size of the ELF file's .dynamic section in memory,
535 * or NULL if missing.
536 *
537 * Input:
538 *   phdr_table  -> program header table
539 *   phdr_count  -> number of entries in tables
540 *   load_bias   -> load bias
541 * Output:
542 *   dynamic       -> address of table in memory (NULL on failure).
543 *   dynamic_count -> number of items in table (0 on failure).
544 *   dynamic_flags -> protection flags for section (unset on failure)
545 * Return:
546 *   void
547 */
548void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count,
549                                    Elf_Addr load_bias,
550                                    Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags) {
551    const Elf_Phdr* phdr = phdr_table;
552    const Elf_Phdr* phdr_limit = phdr + phdr_count;
553
554    for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
555        if (phdr->p_type != PT_DYNAMIC) {
556            continue;
557        }
558
559        *dynamic = reinterpret_cast<Elf_Dyn*>(load_bias + phdr->p_vaddr);
560        if (dynamic_count) {
561            *dynamic_count = (unsigned)(phdr->p_memsz / 8);
562        }
563        if (dynamic_flags) {
564            *dynamic_flags = phdr->p_flags;
565        }
566        return;
567    }
568    *dynamic = NULL;
569    if (dynamic_count) {
570        *dynamic_count = 0;
571    }
572}
573
574// Returns the address of the program header table as it appears in the loaded
575// segments in memory. This is in contrast with 'phdr_table_' which
576// is temporary and will be released before the library is relocated.
577bool ElfReader::FindPhdr() {
578  const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
579
580  // If there is a PT_PHDR, use it directly.
581  for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
582    if (phdr->p_type == PT_PHDR) {
583      return CheckPhdr(load_bias_ + phdr->p_vaddr);
584    }
585  }
586
587  // Otherwise, check the first loadable segment. If its file offset
588  // is 0, it starts with the ELF header, and we can trivially find the
589  // loaded program header from it.
590  for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
591    if (phdr->p_type == PT_LOAD) {
592      if (phdr->p_offset == 0) {
593        Elf_Addr  elf_addr = load_bias_ + phdr->p_vaddr;
594        const Elf_Ehdr* ehdr = (const Elf_Ehdr*)(void*)elf_addr;
595        Elf_Addr  offset = ehdr->e_phoff;
596        return CheckPhdr((Elf_Addr)ehdr + offset);
597      }
598      break;
599    }
600  }
601
602  DL_ERR("can't find loaded phdr for \"%s\"", name_);
603  return false;
604}
605
606// Ensures that our program header is actually within a loadable
607// segment. This should help catch badly-formed ELF files that
608// would cause the linker to crash later when trying to access it.
609bool ElfReader::CheckPhdr(Elf_Addr loaded) {
610  const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
611  Elf_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf_Phdr));
612  for (Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
613    if (phdr->p_type != PT_LOAD) {
614      continue;
615    }
616    Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
617    Elf_Addr seg_end = phdr->p_filesz + seg_start;
618    if (seg_start <= loaded && loaded_end <= seg_end) {
619      loaded_phdr_ = reinterpret_cast<const Elf_Phdr*>(loaded);
620      return true;
621    }
622  }
623  DL_ERR("\"%s\" loaded phdr %x not in loadable segment", name_, loaded);
624  return false;
625}
626