plt.c revision d2fc09dccfc18680209a918dc8cbcc1f75e41118
1#include <gelf.h>
2#include <sys/ptrace.h>
3#include <errno.h>
4#include <error.h>
5#include <inttypes.h>
6#include <assert.h>
7#include <string.h>
8
9#include "proc.h"
10#include "common.h"
11#include "library.h"
12#include "breakpoint.h"
13#include "linux-gnu/trace.h"
14
15/* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and
16 * new-style "secure" PLT.  We can tell one from the other by the
17 * flags on the .plt section.  If it's +X (executable), it's BSS PLT,
18 * otherwise it's secure.
19 *
20 * BSS PLT works the same way as most architectures: the .plt section
21 * contains trampolines and we put breakpoints to those.  If not
22 * prelinked, .plt contains zeroes, and dynamic linker fills in the
23 * initial set of trampolines, which means that we need to delay
24 * enabling breakpoints until after binary entry point is hit.
25 * Additionally, after first call, dynamic linker updates .plt with
26 * branch to resolved address.  That means that on first hit, we must
27 * do something similar to the PPC64 gambit described below.
28 *
29 * With secure PLT, the .plt section doesn't contain instructions but
30 * addresses.  The real PLT table is stored in .text.  Addresses of
31 * those PLT entries can be computed, and apart from the fact that
32 * they are in .text, they are ordinary PLT entries.
33 *
34 * 64-bit PPC is more involved.  Program linker creates for each
35 * library call a _stub_ symbol named xxxxxxxx.plt_call.<callee>
36 * (where xxxxxxxx is a hexadecimal number).  That stub does the call
37 * dispatch: it loads an address of a function to call from the
38 * section .plt, and branches.  PLT entries themselves are essentially
39 * a curried call to the resolver.  When the symbol is resolved, the
40 * resolver updates the value stored in .plt, and the next time
41 * around, the stub calls the library function directly.  So we make
42 * at most one trip (none if the binary is prelinked) through each PLT
43 * entry, and correspondingly that is useless as a breakpoint site.
44 *
45 * Note the three confusing terms: stubs (that play the role of PLT
46 * entries), PLT entries, .plt section.
47 *
48 * We first check symbol tables and see if we happen to have stub
49 * symbols available.  If yes we just put breakpoints to those, and
50 * treat them as usual breakpoints.  The only tricky part is realizing
51 * that there can be more than one breakpoint per symbol.
52 *
53 * The case that we don't have the stub symbols available is harder.
54 * The following scheme uses two kinds of PLT breakpoints: unresolved
55 * and resolved (to some address).  When the process starts (or when
56 * we attach), we distribute unresolved PLT breakpoints to the PLT
57 * entries (not stubs).  Then we look in .plt, and for each entry
58 * whose value is different than the corresponding PLT entry address,
59 * we assume it was already resolved, and convert the breakpoint to
60 * resolved.  We also rewrite the resolved value in .plt back to the
61 * PLT address.
62 *
63 * When a PLT entry hits a resolved breakpoint (which happens because
64 * we rewrite .plt with the original unresolved addresses), we move
65 * the instruction pointer to the corresponding address and continue
66 * the process as if nothing happened.
67 *
68 * When unresolved PLT entry is called for the first time, we need to
69 * catch the new value that the resolver will write to a .plt slot.
70 * We also need to prevent another thread from racing through and
71 * taking the branch without ltrace noticing.  So when unresolved PLT
72 * entry hits, we have to stop all threads.  We then single-step
73 * through the resolver, until the .plt slot changes.  When it does,
74 * we treat it the same way as above: convert the PLT breakpoint to
75 * resolved, and rewrite the .plt value back to PLT address.  We then
76 * start all threads again.
77 *
78 * As an optimization, we remember the address where the address was
79 * resolved, and put a breakpoint there.  The next time around (when
80 * the next PLT entry is to be resolved), instead of single-stepping
81 * through half the dynamic linker, we just let the thread run and hit
82 * this breakpoint.  When it hits, we know the PLT entry was resolved.
83 *
84 * N.B. It's tempting to try to emulate the instruction that updates
85 * .plt.  We would compute the resolved address, and instead of
86 * letting the dynamic linker put it in .plt, we would resolve the
87 * breakpoint to that address.  This way we wouldn't need to stop
88 * other threads.  However that instruction may turn out to be a sync,
89 * and in general, may be any instruction between the actual write and
90 * the following sync.  XXX TODO that means that we need to put the
91 * post-enable breakpoint at the following sync, not to the
92 * instruction itself (unless it's a sync already).
93 *
94 * XXX TODO If we have hardware watch point, we might put a read watch
95 * on .plt slot, and discover the offenders this way.  I don't know
96 * the details, but I assume at most a handful (like, one or two, if
97 * available at all) addresses may be watched at a time, and thus this
98 * would be used as an amendment of the above rather than full-on
99 * solution to PLT tracing on PPC.
100 */
101
102#define PPC_PLT_STUB_SIZE 16
103#define PPC64_PLT_STUB_SIZE 8 //xxx
104
105static inline int
106host_powerpc64()
107{
108#ifdef __powerpc64__
109	return 1;
110#else
111	return 0;
112#endif
113}
114
115int
116read_target_4(struct Process *proc, target_address_t addr, uint32_t *lp)
117{
118	unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
119	if (l == -1UL && errno)
120		return -1;
121	if (host_powerpc64())
122		l >>= 32;
123	*lp = l;
124	return 0;
125}
126
127static int
128read_target_8(struct Process *proc, target_address_t addr, uint64_t *lp)
129{
130	unsigned long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
131	if (l == -1UL && errno)
132		return -1;
133	if (host_powerpc64()) {
134		*lp = l;
135	} else {
136		unsigned long l2 = ptrace(PTRACE_PEEKTEXT, proc->pid,
137					  addr + 4, 0);
138		if (l2 == -1UL && errno)
139			return -1;
140		*lp = (l << 32) | l2;
141	}
142	return 0;
143}
144
145int
146read_target_long(struct Process *proc, target_address_t addr, uint64_t *lp)
147{
148	if (proc->e_machine == EM_PPC) {
149		uint32_t w;
150		int ret = read_target_4(proc, addr, &w);
151		if (ret >= 0)
152			*lp = (uint64_t)w;
153		return ret;
154	} else {
155		return read_target_8(proc, addr, lp);
156	}
157}
158
159static enum callback_status
160reenable_breakpoint(struct Process *proc, struct breakpoint *bp, void *data)
161{
162	/* We don't need to re-enable non-PLT breakpoints and
163	 * breakpoints that are not PPC32 BSS unprelinked.  */
164	if (bp->libsym == NULL
165	    || bp->libsym->plt_type == LS_TOPLT_NONE
166	    || bp->libsym->lib->arch.bss_plt_prelinked != 0)
167		return CBS_CONT;
168
169	debug(DEBUG_PROCESS, "pid=%d reenable_breakpoint %s",
170	      proc->pid, breakpoint_name(bp));
171
172	assert(proc->e_machine == EM_PPC);
173	uint64_t l;
174	if (read_target_8(proc, bp->addr, &l) < 0) {
175		error(0, errno, "couldn't read PLT value for %s(%p)",
176		      breakpoint_name(bp), bp->addr);
177		return CBS_CONT;
178	}
179	bp->libsym->arch.plt_slot_addr = (GElf_Addr)bp->addr;
180	bp->libsym->arch.resolved_value = l;
181
182	/* Re-enable the breakpoint that was overwritten by the
183	 * dynamic linker.  */
184	enable_breakpoint(proc, bp);
185
186	return CBS_CONT;
187}
188
189void
190arch_dynlink_done(struct Process *proc)
191{
192	/* On PPC32, .plt of objects that use BSS PLT are overwritten
193	 * by the dynamic linker (unless that object was prelinked).
194	 * We need to re-enable breakpoints in those objects.  */
195	proc_each_breakpoint(proc, NULL, reenable_breakpoint, NULL);
196}
197
198GElf_Addr
199arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela)
200{
201	if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
202		assert(lte->arch.plt_stub_vma != 0);
203		return lte->arch.plt_stub_vma + PPC_PLT_STUB_SIZE * ndx;
204
205	} else if (lte->ehdr.e_machine == EM_PPC) {
206		return rela->r_offset;
207
208	} else {
209		/* If we get here, we don't have stub symbols.  In
210		 * that case we put brakpoints to PLT entries the same
211		 * as the PPC32 secure PLT case does.  */
212		assert(lte->arch.plt_stub_vma != 0);
213		return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx;
214	}
215}
216
217int
218arch_translate_address(struct Process *proc,
219		       target_address_t addr, target_address_t *ret)
220{
221	if (proc->e_machine == EM_PPC64) {
222		assert(host_powerpc64());
223		long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
224		if (l == -1 && errno) {
225			error(0, errno, ".opd translation of %p", addr);
226			return -1;
227		}
228		*ret = (target_address_t)l;
229		return 0;
230	}
231
232	*ret = addr;
233	return 0;
234}
235
236void *
237sym2addr(struct Process *proc, struct library_symbol *sym)
238{
239	return sym->enter_addr;
240}
241
242static GElf_Addr
243get_glink_vma(struct ltelf *lte, GElf_Addr ppcgot, Elf_Data *plt_data)
244{
245	Elf_Scn *ppcgot_sec = NULL;
246	GElf_Shdr ppcgot_shdr;
247	if (ppcgot != 0
248	    && elf_get_section_covering(lte, ppcgot,
249					&ppcgot_sec, &ppcgot_shdr) < 0)
250		error(0, 0, "DT_PPC_GOT=%#"PRIx64", but no such section found",
251		      ppcgot);
252
253	if (ppcgot_sec != NULL) {
254		Elf_Data *data = elf_loaddata(ppcgot_sec, &ppcgot_shdr);
255		if (data == NULL || data->d_size < 8 ) {
256			error(0, 0, "couldn't read GOT data");
257		} else {
258			// where PPCGOT begins in .got
259			size_t offset = ppcgot - ppcgot_shdr.sh_addr;
260			assert(offset % 4 == 0);
261			uint32_t glink_vma;
262			if (elf_read_u32(data, offset + 4, &glink_vma) < 0) {
263				error(0, 0, "couldn't read glink VMA address"
264				      " at %zd@GOT", offset);
265				return 0;
266			}
267			if (glink_vma != 0) {
268				debug(1, "PPC GOT glink_vma address: %#" PRIx32,
269				      glink_vma);
270				return (GElf_Addr)glink_vma;
271			}
272		}
273	}
274
275	if (plt_data != NULL) {
276		uint32_t glink_vma;
277		if (elf_read_u32(plt_data, 0, &glink_vma) < 0) {
278			error(0, 0, "couldn't read glink VMA address");
279			return 0;
280		}
281		debug(1, ".plt glink_vma address: %#" PRIx32, glink_vma);
282		return (GElf_Addr)glink_vma;
283	}
284
285	return 0;
286}
287
288static int
289load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep)
290{
291	Elf_Scn *scn;
292	GElf_Shdr shdr;
293	if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0
294	    || scn == NULL) {
295	fail:
296		error(0, 0, "Couldn't get SHT_DYNAMIC: %s",
297		      elf_errmsg(-1));
298		return -1;
299	}
300
301	Elf_Data *data = elf_loaddata(scn, &shdr);
302	if (data == NULL)
303		goto fail;
304
305	size_t j;
306	for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) {
307		GElf_Dyn dyn;
308		if (gelf_getdyn(data, j, &dyn) == NULL)
309			goto fail;
310
311		if(dyn.d_tag == tag) {
312			*valuep = dyn.d_un.d_ptr;
313			return 0;
314		}
315	}
316
317	return -1;
318}
319
320static int
321load_ppcgot(struct ltelf *lte, GElf_Addr *ppcgotp)
322{
323	return load_dynamic_entry(lte, DT_PPC_GOT, ppcgotp);
324}
325
326static int
327load_ppc64_glink(struct ltelf *lte, GElf_Addr *glinkp)
328{
329	return load_dynamic_entry(lte, DT_PPC64_GLINK, glinkp);
330}
331
332static int
333nonzero_data(Elf_Data *data)
334{
335	/* We are not supposed to get here if there's no PLT.  */
336	assert(data != NULL);
337
338	unsigned char *buf = data->d_buf;
339	if (buf == NULL)
340		return 0;
341
342	size_t i;
343	for (i = 0; i < data->d_size; ++i)
344		if (buf[i] != 0)
345			return 1;
346	return 0;
347}
348
349int
350arch_elf_init(struct ltelf *lte, struct library *lib)
351{
352	lte->arch.secure_plt = !(lte->plt_flags & SHF_EXECINSTR);
353
354	/* For PPC32 BSS, it is important whether the binary was
355	 * prelinked.  If .plt section is NODATA, or if it contains
356	 * zeroes, then this library is not prelinked, and we need to
357	 * delay breakpoints.  */
358	if (lte->ehdr.e_machine == EM_PPC && !lte->arch.secure_plt)
359		lib->arch.bss_plt_prelinked = nonzero_data(lte->plt_data);
360	else
361		/* For cases where it's irrelevant, initialize the
362		 * value to something conspicuous.  */
363		lib->arch.bss_plt_prelinked = -1;
364
365	if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
366		GElf_Addr ppcgot;
367		if (load_ppcgot(lte, &ppcgot) < 0) {
368			error(0, 0, "couldn't find DT_PPC_GOT");
369			return -1;
370		}
371		GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data);
372
373		assert (lte->relplt_size % 12 == 0);
374		size_t count = lte->relplt_size / 12; // size of RELA entry
375		lte->arch.plt_stub_vma = glink_vma
376			- (GElf_Addr)count * PPC_PLT_STUB_SIZE;
377		debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma);
378
379	} else if (lte->ehdr.e_machine == EM_PPC64) {
380		GElf_Addr glink_vma;
381		if (load_ppc64_glink(lte, &glink_vma) < 0) {
382			error(0, 0, "couldn't find DT_PPC64_GLINK");
383			return -1;
384		}
385
386		/* The first glink stub starts at offset 32.  */
387		lte->arch.plt_stub_vma = glink_vma + 32;
388	}
389
390	/* On PPC64, look for stub symbols in symbol table.  These are
391	 * called: xxxxxxxx.plt_call.callee_name@version+addend.  */
392	if (lte->ehdr.e_machine == EM_PPC64
393	    && lte->symtab != NULL && lte->strtab != NULL) {
394
395		/* N.B. We can't simply skip the symbols that we fail
396		 * to read or malloc.  There may be more than one stub
397		 * per symbol name, and if we failed in one but
398		 * succeeded in another, the PLT enabling code would
399		 * have no way to tell that something is missing.  We
400		 * could work around that, of course, but it doesn't
401		 * seem worth the trouble.  So if anything fails, we
402		 * just pretend that we don't have stub symbols at
403		 * all, as if the binary is stripped.  */
404
405		size_t i;
406		for (i = 0; i < lte->symtab_count; ++i) {
407			GElf_Sym sym;
408			if (gelf_getsym(lte->symtab, i, &sym) == NULL) {
409				struct library_symbol *sym, *next;
410			fail:
411				for (sym = lte->arch.stubs; sym != NULL; ) {
412					next = sym->next;
413					library_symbol_destroy(sym);
414					free(sym);
415					sym = next;
416				}
417				lte->arch.stubs = NULL;
418				break;
419			}
420
421			const char *name = lte->strtab + sym.st_name;
422
423#define STUBN ".plt_call."
424			if ((name = strstr(name, STUBN)) == NULL)
425				continue;
426			name += sizeof(STUBN) - 1;
427#undef STUBN
428
429			size_t len;
430			const char *ver = strchr(name, '@');
431			if (ver != NULL) {
432				len = ver - name;
433
434			} else {
435				/* If there is "+" at all, check that
436				 * the symbol name ends in "+0".  */
437				const char *add = strrchr(name, '+');
438				if (add != NULL) {
439					assert(strcmp(add, "+0") == 0);
440					len = add - name;
441				} else {
442					len = strlen(name);
443				}
444			}
445
446			char *sym_name = strndup(name, len);
447			struct library_symbol *libsym = malloc(sizeof(*libsym));
448			if (sym_name == NULL || libsym == NULL) {
449			fail2:
450				free(sym_name);
451				free(libsym);
452				goto fail;
453			}
454
455			/* XXX The double cast should be removed when
456			 * target_address_t becomes integral type.  */
457			target_address_t addr = (target_address_t)
458				(uintptr_t)sym.st_value + lte->bias;
459			if (library_symbol_init(libsym, addr, sym_name, 1,
460						LS_TOPLT_EXEC) < 0)
461				goto fail2;
462			libsym->arch.type = PPC64_PLT_STUB;
463			libsym->next = lte->arch.stubs;
464			lte->arch.stubs = libsym;
465		}
466	}
467
468	return 0;
469}
470
471static int
472read_plt_slot_value(struct Process *proc, GElf_Addr addr, GElf_Addr *valp)
473{
474	/* On PPC64, we read from .plt, which contains 8 byte
475	 * addresses.  On PPC32 we read from .plt, which contains 4
476	 * byte instructions, but the PLT is two instructions, and
477	 * either can change.  */
478	uint64_t l;
479	if (read_target_8(proc, (target_address_t)addr, &l) < 0) {
480		error(0, errno, "ptrace .plt slot value @%#" PRIx64, addr);
481		return -1;
482	}
483
484	*valp = (GElf_Addr)l;
485	return 0;
486}
487
488static int
489unresolve_plt_slot(struct Process *proc, GElf_Addr addr, GElf_Addr value)
490{
491	/* We only modify plt_entry[0], which holds the resolved
492	 * address of the routine.  We keep the TOC and environment
493	 * pointers intact.  Hence the only adjustment that we need to
494	 * do is to IP.  */
495	if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) {
496		error(0, errno, "unresolve .plt slot");
497		return -1;
498	}
499	return 0;
500}
501
502static void
503mark_as_resolved(struct library_symbol *libsym, GElf_Addr value)
504{
505	libsym->arch.type = PPC_PLT_RESOLVED;
506	libsym->arch.resolved_value = value;
507}
508
509enum plt_status
510arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte,
511		       const char *a_name, GElf_Rela *rela, size_t ndx,
512		       struct library_symbol **ret)
513{
514	if (lte->ehdr.e_machine == EM_PPC)
515		return plt_default;
516
517	/* PPC64.  If we have stubs, we return a chain of breakpoint
518	 * sites, one for each stub that corresponds to this PLT
519	 * entry.  */
520	struct library_symbol *chain = NULL;
521	struct library_symbol **symp;
522	for (symp = &lte->arch.stubs; *symp != NULL; ) {
523		struct library_symbol *sym = *symp;
524		if (strcmp(sym->name, a_name) != 0) {
525			symp = &(*symp)->next;
526			continue;
527		}
528
529		/* Re-chain the symbol from stubs to CHAIN.  */
530		*symp = sym->next;
531		sym->next = chain;
532		chain = sym;
533	}
534
535	if (chain != NULL) {
536		*ret = chain;
537		return plt_ok;
538	}
539
540	/* We don't have stub symbols.  Find corresponding .plt slot,
541	 * and check whether it contains the corresponding PLT address
542	 * (or 0 if the dynamic linker hasn't run yet).  N.B. we don't
543	 * want read this from ELF file, but from process image.  That
544	 * makes a difference if we are attaching to a running
545	 * process.  */
546
547	GElf_Addr plt_entry_addr = arch_plt_sym_val(lte, ndx, rela);
548	GElf_Addr plt_slot_addr = rela->r_offset;
549	assert(plt_slot_addr >= lte->plt_addr
550	       || plt_slot_addr < lte->plt_addr + lte->plt_size);
551
552	GElf_Addr plt_slot_value;
553	if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0)
554		return plt_fail;
555
556	char *name = strdup(a_name);
557	struct library_symbol *libsym = malloc(sizeof(*libsym));
558	if (name == NULL || libsym == NULL) {
559		error(0, errno, "allocation for .plt slot");
560	fail:
561		free(name);
562		free(libsym);
563		return plt_fail;
564	}
565
566	/* XXX The double cast should be removed when
567	 * target_address_t becomes integral type.  */
568	if (library_symbol_init(libsym,
569				(target_address_t)(uintptr_t)plt_entry_addr,
570				name, 1, LS_TOPLT_EXEC) < 0)
571		goto fail;
572	libsym->arch.plt_slot_addr = plt_slot_addr;
573
574	if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) {
575		libsym->arch.type = PPC_PLT_UNRESOLVED;
576		libsym->arch.resolved_value = plt_entry_addr;
577
578	} else {
579		/* Unresolve the .plt slot.  If the binary was
580		 * prelinked, this makes the code invalid, because in
581		 * case of prelinked binary, the dynamic linker
582		 * doesn't update .plt[0] and .plt[1] with addresses
583		 * of the resover.  But we don't care, we will never
584		 * need to enter the resolver.  That just means that
585		 * we have to un-un-resolve this back before we
586		 * detach.  */
587
588		if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) {
589			library_symbol_destroy(libsym);
590			goto fail;
591		}
592		mark_as_resolved(libsym, plt_slot_value);
593	}
594
595	*ret = libsym;
596	return plt_ok;
597}
598
599void
600arch_elf_destroy(struct ltelf *lte)
601{
602	struct library_symbol *sym;
603	for (sym = lte->arch.stubs; sym != NULL; ) {
604		struct library_symbol *next = sym->next;
605		library_symbol_destroy(sym);
606		free(sym);
607		sym = next;
608	}
609}
610
611static void
612dl_plt_update_bp_on_hit(struct breakpoint *bp, struct Process *proc)
613{
614	debug(DEBUG_PROCESS, "pid=%d dl_plt_update_bp_on_hit %s(%p)",
615	      proc->pid, breakpoint_name(bp), bp->addr);
616	struct process_stopping_handler *self = proc->arch.handler;
617	assert(self != NULL);
618
619	struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
620	GElf_Addr value;
621	if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
622		return;
623
624	/* On PPC64, we rewrite the slot value.  */
625	if (proc->e_machine == EM_PPC64)
626		unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
627				   libsym->arch.resolved_value);
628	/* We mark the breakpoint as resolved on both arches.  */
629	mark_as_resolved(libsym, value);
630
631	/* cb_on_all_stopped looks if HANDLER is set to NULL as a way
632	 * to check that this was run.  It's an error if it
633	 * wasn't.  */
634	breakpoint_turn_off(bp, proc);
635	proc->arch.handler = NULL;
636}
637
638static void
639cb_on_all_stopped(struct process_stopping_handler *self)
640{
641	/* Put that in for dl_plt_update_bp_on_hit to see.  */
642	assert(self->task_enabling_breakpoint->arch.handler == NULL);
643	self->task_enabling_breakpoint->arch.handler = self;
644
645	linux_ptrace_disable_and_continue(self);
646}
647
648static enum callback_status
649cb_keep_stepping_p(struct process_stopping_handler *self)
650{
651	struct Process *proc = self->task_enabling_breakpoint;
652	struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
653	GElf_Addr value;
654	if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
655		return CBS_FAIL;
656
657	/* In UNRESOLVED state, the RESOLVED_VALUE in fact contains
658	 * the PLT entry value.  */
659	if (value == libsym->arch.resolved_value)
660		return CBS_CONT;
661
662	debug(DEBUG_PROCESS, "pid=%d PLT got resolved to value %#"PRIx64,
663	      proc->pid, value);
664
665	/* The .plt slot got resolved!  We can migrate the breakpoint
666	 * to RESOLVED and stop single-stepping.  */
667	if (unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
668			       libsym->arch.resolved_value) < 0)
669		return CBS_FAIL;
670
671	/* Install breakpoint to the address where the change takes
672	 * place.  If we fail, then that just means that we'll have to
673	 * singlestep the next time around as well.  */
674	struct Process *leader = proc->leader;
675	if (leader == NULL || leader->arch.dl_plt_update_bp != NULL)
676		goto done;
677
678	/* We need to install to the next instruction.  ADDR points to
679	 * a store instruction, so moving the breakpoint one
680	 * instruction forward is safe.  */
681	target_address_t addr = get_instruction_pointer(proc) + 4;
682	leader->arch.dl_plt_update_bp = insert_breakpoint(proc, addr, NULL);
683	if (leader->arch.dl_plt_update_bp == NULL)
684		goto done;
685
686	/* Turn it off for now.  We will turn it on again when we hit
687	 * the PLT entry that needs this.  */
688	breakpoint_turn_off(leader->arch.dl_plt_update_bp, proc);
689
690	if (leader->arch.dl_plt_update_bp != NULL) {
691		static struct bp_callbacks dl_plt_update_cbs = {
692			.on_hit = dl_plt_update_bp_on_hit,
693		};
694		leader->arch.dl_plt_update_bp->cbs = &dl_plt_update_cbs;
695	}
696
697done:
698	mark_as_resolved(libsym, value);
699
700	return CBS_STOP;
701}
702
703static void
704ppc_plt_bp_continue(struct breakpoint *bp, struct Process *proc)
705{
706	switch (bp->libsym->arch.type) {
707		target_address_t rv;
708		struct Process *leader;
709		void (*on_all_stopped)(struct process_stopping_handler *);
710		enum callback_status (*keep_stepping_p)
711			(struct process_stopping_handler *);
712
713	case PPC_DEFAULT:
714		assert(proc->e_machine == EM_PPC);
715		assert(bp->libsym != NULL);
716		assert(bp->libsym->lib->arch.bss_plt_prelinked == 0);
717		/* fall-through */
718
719	case PPC_PLT_UNRESOLVED:
720		on_all_stopped = NULL;
721		keep_stepping_p = NULL;
722		leader = proc->leader;
723
724		if (leader != NULL && leader->arch.dl_plt_update_bp != NULL
725		    && breakpoint_turn_on(leader->arch.dl_plt_update_bp,
726					  proc) >= 0)
727			on_all_stopped = cb_on_all_stopped;
728		else
729			keep_stepping_p = cb_keep_stepping_p;
730
731		if (process_install_stopping_handler
732		    (proc, bp, on_all_stopped, keep_stepping_p, NULL) < 0) {
733			error(0, 0, "ppc_plt_bp_continue: couldn't install"
734			      " event handler");
735			continue_after_breakpoint(proc, bp);
736		}
737		return;
738
739	case PPC_PLT_RESOLVED:
740		if (proc->e_machine == EM_PPC) {
741			continue_after_breakpoint(proc, bp);
742			return;
743		}
744
745		/* XXX The double cast should be removed when
746		 * target_address_t becomes integral type.  */
747		rv = (target_address_t)
748			(uintptr_t)bp->libsym->arch.resolved_value;
749		set_instruction_pointer(proc, rv);
750		continue_process(proc->pid);
751		return;
752
753	case PPC64_PLT_STUB:
754		/* These should never hit here.  */
755		break;
756	}
757
758	assert(bp->libsym->arch.type != bp->libsym->arch.type);
759	abort();
760}
761
762void
763arch_library_init(struct library *lib)
764{
765}
766
767void
768arch_library_destroy(struct library *lib)
769{
770}
771
772void
773arch_library_clone(struct library *retp, struct library *lib)
774{
775}
776
777int
778arch_library_symbol_init(struct library_symbol *libsym)
779{
780	/* We set type explicitly in the code above, where we have the
781	 * necessary context.  This is for calls from ltrace-elf.c and
782	 * such.  */
783	libsym->arch.type = PPC_DEFAULT;
784	return 0;
785}
786
787void
788arch_library_symbol_destroy(struct library_symbol *libsym)
789{
790}
791
792int
793arch_library_symbol_clone(struct library_symbol *retp,
794			  struct library_symbol *libsym)
795{
796	retp->arch = libsym->arch;
797	return 0;
798}
799
800/* For some symbol types, we need to set up custom callbacks.  XXX we
801 * don't need PROC here, we can store the data in BP if it is of
802 * interest to us.  */
803int
804arch_breakpoint_init(struct Process *proc, struct breakpoint *bp)
805{
806	/* Artificial and entry-point breakpoints are plain.  */
807	if (bp->libsym == NULL || bp->libsym->plt_type != LS_TOPLT_EXEC)
808		return 0;
809
810	/* On PPC, secure PLT and prelinked BSS PLT are plain.  */
811	if (proc->e_machine == EM_PPC
812	    && bp->libsym->lib->arch.bss_plt_prelinked != 0)
813		return 0;
814
815	/* On PPC64, stub PLT breakpoints are plain.  */
816	if (proc->e_machine == EM_PPC64
817	    && bp->libsym->arch.type == PPC64_PLT_STUB)
818		return 0;
819
820	static struct bp_callbacks cbs = {
821		.on_continue = ppc_plt_bp_continue,
822	};
823	breakpoint_set_callbacks(bp, &cbs);
824	return 0;
825}
826
827void
828arch_breakpoint_destroy(struct breakpoint *bp)
829{
830}
831
832int
833arch_breakpoint_clone(struct breakpoint *retp, struct breakpoint *sbp)
834{
835	retp->arch = sbp->arch;
836	return 0;
837}
838
839int
840arch_process_init(struct Process *proc)
841{
842	proc->arch.dl_plt_update_bp = NULL;
843	proc->arch.handler = NULL;
844	return 0;
845}
846
847void
848arch_process_destroy(struct Process *proc)
849{
850}
851
852int
853arch_process_clone(struct Process *retp, struct Process *proc)
854{
855	retp->arch = proc->arch;
856	return 0;
857}
858
859int
860arch_process_exec(struct Process *proc)
861{
862	return arch_process_init(proc);
863}
864