linker_phdr.cpp revision 0266ae5f884d72da58f33a072e865ba131234a5e
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include "linker_phdr.h"
30
31#include <errno.h>
32#include <machine/exec.h>
33#include <sys/mman.h>
34
35#include "linker.h"
36#include "linker_debug.h"
37
38/**
39  TECHNICAL NOTE ON ELF LOADING.
40
41  An ELF file's program header table contains one or more PT_LOAD
42  segments, which corresponds to portions of the file that need to
43  be mapped into the process' address space.
44
45  Each loadable segment has the following important properties:
46
47    p_offset  -> segment file offset
48    p_filesz  -> segment file size
49    p_memsz   -> segment memory size (always >= p_filesz)
50    p_vaddr   -> segment's virtual address
51    p_flags   -> segment flags (e.g. readable, writable, executable)
52
53  We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
54
55  The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
56  ranges of virtual addresses. A few rules apply:
57
58  - the virtual address ranges should not overlap.
59
60  - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
61    between them should always be initialized to 0.
62
63  - ranges do not necessarily start or end at page boundaries. Two distinct
64    segments can have their start and end on the same page. In this case, the
65    page inherits the mapping flags of the latter segment.
66
67  Finally, the real load addrs of each segment is not p_vaddr. Instead the
68  loader decides where to load the first segment, then will load all others
69  relative to the first one to respect the initial range layout.
70
71  For example, consider the following list:
72
73    [ offset:0,      filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
74    [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
75
76  This corresponds to two segments that cover these virtual address ranges:
77
78       0x30000...0x34000
79       0x40000...0x48000
80
81  If the loader decides to load the first segment at address 0xa0000000
82  then the segments' load address ranges will be:
83
84       0xa0030000...0xa0034000
85       0xa0040000...0xa0048000
86
87  In other words, all segments must be loaded at an address that has the same
88  constant offset from their p_vaddr value. This offset is computed as the
89  difference between the first segment's load address, and its p_vaddr value.
90
91  However, in practice, segments do _not_ start at page boundaries. Since we
92  can only memory-map at page boundaries, this means that the bias is
93  computed as:
94
95       load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
96
97  (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
98          possible wrap around UINT32_MAX for possible large p_vaddr values).
99
100  And that the phdr0_load_address must start at a page boundary, with
101  the segment's real content starting at:
102
103       phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
104
105  Note that ELF requires the following condition to make the mmap()-ing work:
106
107      PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
108
109  The load_bias must be added to any p_vaddr value read from the ELF file to
110  determine the corresponding memory address.
111
112 **/
113
114#define MAYBE_MAP_FLAG(x,from,to)    (((x) & (from)) ? (to) : 0)
115#define PFLAGS_TO_PROT(x)            (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
116                                      MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
117                                      MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
118
119ElfReader::ElfReader(const char* name, int fd)
120    : name_(name), fd_(fd),
121      phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0),
122      load_start_(NULL), load_size_(0), load_bias_(0),
123      loaded_phdr_(NULL) {
124}
125
126ElfReader::~ElfReader() {
127  if (fd_ != -1) {
128    close(fd_);
129  }
130  if (phdr_mmap_ != NULL) {
131    munmap(phdr_mmap_, phdr_size_);
132  }
133}
134
135bool ElfReader::Load() {
136  return ReadElfHeader() &&
137         VerifyElfHeader() &&
138         ReadProgramHeader() &&
139         ReserveAddressSpace() &&
140         LoadSegments() &&
141         FindPhdr();
142}
143
144bool ElfReader::ReadElfHeader() {
145  ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
146  if (rc < 0) {
147    DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
148    return false;
149  }
150  if (rc != sizeof(header_)) {
151    DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
152           static_cast<size_t>(rc));
153    return false;
154  }
155  return true;
156}
157
158bool ElfReader::VerifyElfHeader() {
159  if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
160      header_.e_ident[EI_MAG1] != ELFMAG1 ||
161      header_.e_ident[EI_MAG2] != ELFMAG2 ||
162      header_.e_ident[EI_MAG3] != ELFMAG3) {
163    DL_ERR("\"%s\" has bad ELF magic", name_);
164    return false;
165  }
166
167  // Try to give a clear diagnostic for ELF class mismatches, since they're
168  // an easy mistake to make during the 32-bit/64-bit transition period.
169  int elf_class = header_.e_ident[EI_CLASS];
170#if defined(__LP64__)
171  if (elf_class != ELFCLASS64) {
172    if (elf_class == ELFCLASS32) {
173      DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_);
174    } else {
175      DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
176    }
177    return false;
178  }
179#else
180  if (elf_class != ELFCLASS32) {
181    if (elf_class == ELFCLASS64) {
182      DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_);
183    } else {
184      DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
185    }
186    return false;
187  }
188#endif
189
190  if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
191    DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
192    return false;
193  }
194
195  if (header_.e_type != ET_DYN) {
196    DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
197    return false;
198  }
199
200  if (header_.e_version != EV_CURRENT) {
201    DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
202    return false;
203  }
204
205  if (header_.e_machine != ELF_TARG_MACH) {
206    DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
207    return false;
208  }
209
210  return true;
211}
212
213// Loads the program header table from an ELF file into a read-only private
214// anonymous mmap-ed block.
215bool ElfReader::ReadProgramHeader() {
216  phdr_num_ = header_.e_phnum;
217
218  // Like the kernel, we only accept program header tables that
219  // are smaller than 64KiB.
220  if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
221    DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
222    return false;
223  }
224
225  ElfW(Addr) page_min = PAGE_START(header_.e_phoff);
226  ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr))));
227  ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff);
228
229  phdr_size_ = page_max - page_min;
230
231  void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
232  if (mmap_result == MAP_FAILED) {
233    DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
234    return false;
235  }
236
237  phdr_mmap_ = mmap_result;
238  phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset);
239  return true;
240}
241
242/* Returns the size of the extent of all the possibly non-contiguous
243 * loadable segments in an ELF program header table. This corresponds
244 * to the page-aligned size in bytes that needs to be reserved in the
245 * process' address space. If there are no loadable segments, 0 is
246 * returned.
247 *
248 * If out_min_vaddr or out_max_vaddr are non-NULL, they will be
249 * set to the minimum and maximum addresses of pages to be reserved,
250 * or 0 if there is nothing to load.
251 */
252size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
253                                ElfW(Addr)* out_min_vaddr,
254                                ElfW(Addr)* out_max_vaddr) {
255  ElfW(Addr) min_vaddr = UINTPTR_MAX;
256  ElfW(Addr) max_vaddr = 0;
257
258  bool found_pt_load = false;
259  for (size_t i = 0; i < phdr_count; ++i) {
260    const ElfW(Phdr)* phdr = &phdr_table[i];
261
262    if (phdr->p_type != PT_LOAD) {
263      continue;
264    }
265    found_pt_load = true;
266
267    if (phdr->p_vaddr < min_vaddr) {
268      min_vaddr = phdr->p_vaddr;
269    }
270
271    if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
272      max_vaddr = phdr->p_vaddr + phdr->p_memsz;
273    }
274  }
275  if (!found_pt_load) {
276    min_vaddr = 0;
277  }
278
279  min_vaddr = PAGE_START(min_vaddr);
280  max_vaddr = PAGE_END(max_vaddr);
281
282  if (out_min_vaddr != NULL) {
283    *out_min_vaddr = min_vaddr;
284  }
285  if (out_max_vaddr != NULL) {
286    *out_max_vaddr = max_vaddr;
287  }
288  return max_vaddr - min_vaddr;
289}
290
291// Reserve a virtual address range big enough to hold all loadable
292// segments of a program header table. This is done by creating a
293// private anonymous mmap() with PROT_NONE.
294bool ElfReader::ReserveAddressSpace() {
295  ElfW(Addr) min_vaddr;
296  load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
297  if (load_size_ == 0) {
298    DL_ERR("\"%s\" has no loadable segments", name_);
299    return false;
300  }
301
302  uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
303  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
304  void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
305  if (start == MAP_FAILED) {
306    DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_);
307    return false;
308  }
309
310  load_start_ = start;
311  load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
312  return true;
313}
314
315bool ElfReader::LoadSegments() {
316  for (size_t i = 0; i < phdr_num_; ++i) {
317    const ElfW(Phdr)* phdr = &phdr_table_[i];
318
319    if (phdr->p_type != PT_LOAD) {
320      continue;
321    }
322
323    // Segment addresses in memory.
324    ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
325    ElfW(Addr) seg_end   = seg_start + phdr->p_memsz;
326
327    ElfW(Addr) seg_page_start = PAGE_START(seg_start);
328    ElfW(Addr) seg_page_end   = PAGE_END(seg_end);
329
330    ElfW(Addr) seg_file_end   = seg_start + phdr->p_filesz;
331
332    // File offsets.
333    ElfW(Addr) file_start = phdr->p_offset;
334    ElfW(Addr) file_end   = file_start + phdr->p_filesz;
335
336    ElfW(Addr) file_page_start = PAGE_START(file_start);
337    ElfW(Addr) file_length = file_end - file_page_start;
338
339    if (file_length != 0) {
340      void* seg_addr = mmap((void*)seg_page_start,
341                            file_length,
342                            PFLAGS_TO_PROT(phdr->p_flags),
343                            MAP_FIXED|MAP_PRIVATE,
344                            fd_,
345                            file_page_start);
346      if (seg_addr == MAP_FAILED) {
347        DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
348        return false;
349      }
350    }
351
352    // if the segment is writable, and does not end on a page boundary,
353    // zero-fill it until the page limit.
354    if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
355      memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
356    }
357
358    seg_file_end = PAGE_END(seg_file_end);
359
360    // seg_file_end is now the first page address after the file
361    // content. If seg_end is larger, we need to zero anything
362    // between them. This is done by using a private anonymous
363    // map for all extra pages.
364    if (seg_page_end > seg_file_end) {
365      void* zeromap = mmap((void*)seg_file_end,
366                           seg_page_end - seg_file_end,
367                           PFLAGS_TO_PROT(phdr->p_flags),
368                           MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
369                           -1,
370                           0);
371      if (zeromap == MAP_FAILED) {
372        DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
373        return false;
374      }
375    }
376  }
377  return true;
378}
379
380/* Used internally. Used to set the protection bits of all loaded segments
381 * with optional extra flags (i.e. really PROT_WRITE). Used by
382 * phdr_table_protect_segments and phdr_table_unprotect_segments.
383 */
384static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
385                                     ElfW(Addr) load_bias, int extra_prot_flags) {
386  const ElfW(Phdr)* phdr = phdr_table;
387  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
388
389  for (; phdr < phdr_limit; phdr++) {
390    if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
391      continue;
392    }
393
394    ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
395    ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
396
397    int ret = mprotect((void*)seg_page_start,
398                       seg_page_end - seg_page_start,
399                       PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
400    if (ret < 0) {
401      return -1;
402    }
403  }
404  return 0;
405}
406
407/* Restore the original protection modes for all loadable segments.
408 * You should only call this after phdr_table_unprotect_segments and
409 * applying all relocations.
410 *
411 * Input:
412 *   phdr_table  -> program header table
413 *   phdr_count  -> number of entries in tables
414 *   load_bias   -> load bias
415 * Return:
416 *   0 on error, -1 on failure (error code in errno).
417 */
418int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
419  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
420}
421
422/* Change the protection of all loaded segments in memory to writable.
423 * This is useful before performing relocations. Once completed, you
424 * will have to call phdr_table_protect_segments to restore the original
425 * protection flags on all segments.
426 *
427 * Note that some writable segments can also have their content turned
428 * to read-only by calling phdr_table_protect_gnu_relro. This is no
429 * performed here.
430 *
431 * Input:
432 *   phdr_table  -> program header table
433 *   phdr_count  -> number of entries in tables
434 *   load_bias   -> load bias
435 * Return:
436 *   0 on error, -1 on failure (error code in errno).
437 */
438int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
439  return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
440}
441
442/* Used internally by phdr_table_protect_gnu_relro and
443 * phdr_table_unprotect_gnu_relro.
444 */
445static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
446                                          ElfW(Addr) load_bias, int prot_flags) {
447  const ElfW(Phdr)* phdr = phdr_table;
448  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
449
450  for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
451    if (phdr->p_type != PT_GNU_RELRO) {
452      continue;
453    }
454
455    // Tricky: what happens when the relro segment does not start
456    // or end at page boundaries? We're going to be over-protective
457    // here and put every page touched by the segment as read-only.
458
459    // This seems to match Ian Lance Taylor's description of the
460    // feature at http://www.airs.com/blog/archives/189.
461
462    //    Extract:
463    //       Note that the current dynamic linker code will only work
464    //       correctly if the PT_GNU_RELRO segment starts on a page
465    //       boundary. This is because the dynamic linker rounds the
466    //       p_vaddr field down to the previous page boundary. If
467    //       there is anything on the page which should not be read-only,
468    //       the program is likely to fail at runtime. So in effect the
469    //       linker must only emit a PT_GNU_RELRO segment if it ensures
470    //       that it starts on a page boundary.
471    ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
472    ElfW(Addr) seg_page_end   = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
473
474    int ret = mprotect((void*)seg_page_start,
475                       seg_page_end - seg_page_start,
476                       prot_flags);
477    if (ret < 0) {
478      return -1;
479    }
480  }
481  return 0;
482}
483
484/* Apply GNU relro protection if specified by the program header. This will
485 * turn some of the pages of a writable PT_LOAD segment to read-only, as
486 * specified by one or more PT_GNU_RELRO segments. This must be always
487 * performed after relocations.
488 *
489 * The areas typically covered are .got and .data.rel.ro, these are
490 * read-only from the program's POV, but contain absolute addresses
491 * that need to be relocated before use.
492 *
493 * Input:
494 *   phdr_table  -> program header table
495 *   phdr_count  -> number of entries in tables
496 *   load_bias   -> load bias
497 * Return:
498 *   0 on error, -1 on failure (error code in errno).
499 */
500int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
501  return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
502}
503
504#if defined(__arm__)
505
506#  ifndef PT_ARM_EXIDX
507#    define PT_ARM_EXIDX    0x70000001      /* .ARM.exidx segment */
508#  endif
509
510/* Return the address and size of the .ARM.exidx section in memory,
511 * if present.
512 *
513 * Input:
514 *   phdr_table  -> program header table
515 *   phdr_count  -> number of entries in tables
516 *   load_bias   -> load bias
517 * Output:
518 *   arm_exidx       -> address of table in memory (NULL on failure).
519 *   arm_exidx_count -> number of items in table (0 on failure).
520 * Return:
521 *   0 on error, -1 on failure (_no_ error code in errno)
522 */
523int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
524                             ElfW(Addr) load_bias,
525                             ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) {
526  const ElfW(Phdr)* phdr = phdr_table;
527  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
528
529  for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
530    if (phdr->p_type != PT_ARM_EXIDX) {
531      continue;
532    }
533
534    *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
535    *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
536    return 0;
537  }
538  *arm_exidx = NULL;
539  *arm_exidx_count = 0;
540  return -1;
541}
542#endif
543
544/* Return the address and size of the ELF file's .dynamic section in memory,
545 * or NULL if missing.
546 *
547 * Input:
548 *   phdr_table  -> program header table
549 *   phdr_count  -> number of entries in tables
550 *   load_bias   -> load bias
551 * Output:
552 *   dynamic       -> address of table in memory (NULL on failure).
553 *   dynamic_count -> number of items in table (0 on failure).
554 *   dynamic_flags -> protection flags for section (unset on failure)
555 * Return:
556 *   void
557 */
558void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
559                                    ElfW(Addr) load_bias,
560                                    ElfW(Dyn)** dynamic, size_t* dynamic_count, ElfW(Word)* dynamic_flags) {
561  const ElfW(Phdr)* phdr = phdr_table;
562  const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
563
564  for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
565    if (phdr->p_type != PT_DYNAMIC) {
566      continue;
567    }
568
569    *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr);
570    if (dynamic_count) {
571      *dynamic_count = (unsigned)(phdr->p_memsz / 8);
572    }
573    if (dynamic_flags) {
574      *dynamic_flags = phdr->p_flags;
575    }
576    return;
577  }
578  *dynamic = NULL;
579  if (dynamic_count) {
580    *dynamic_count = 0;
581  }
582}
583
584// Returns the address of the program header table as it appears in the loaded
585// segments in memory. This is in contrast with 'phdr_table_' which
586// is temporary and will be released before the library is relocated.
587bool ElfReader::FindPhdr() {
588  const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
589
590  // If there is a PT_PHDR, use it directly.
591  for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
592    if (phdr->p_type == PT_PHDR) {
593      return CheckPhdr(load_bias_ + phdr->p_vaddr);
594    }
595  }
596
597  // Otherwise, check the first loadable segment. If its file offset
598  // is 0, it starts with the ELF header, and we can trivially find the
599  // loaded program header from it.
600  for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
601    if (phdr->p_type == PT_LOAD) {
602      if (phdr->p_offset == 0) {
603        ElfW(Addr)  elf_addr = load_bias_ + phdr->p_vaddr;
604        const ElfW(Ehdr)* ehdr = (const ElfW(Ehdr)*)(void*)elf_addr;
605        ElfW(Addr)  offset = ehdr->e_phoff;
606        return CheckPhdr((ElfW(Addr))ehdr + offset);
607      }
608      break;
609    }
610  }
611
612  DL_ERR("can't find loaded phdr for \"%s\"", name_);
613  return false;
614}
615
616// Ensures that our program header is actually within a loadable
617// segment. This should help catch badly-formed ELF files that
618// would cause the linker to crash later when trying to access it.
619bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
620  const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
621  ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
622  for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
623    if (phdr->p_type != PT_LOAD) {
624      continue;
625    }
626    ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
627    ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
628    if (seg_start <= loaded && loaded_end <= seg_end) {
629      loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
630      return true;
631    }
632  }
633  DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded));
634  return false;
635}
636