plt.c revision d95733284377c0b186ba0c81a1158edc2b913e45
1#include <gelf.h>
2#include <sys/ptrace.h>
3#include <errno.h>
4#include <error.h>
5#include <inttypes.h>
6#include <assert.h>
7#include <string.h>
8
9#include "proc.h"
10#include "common.h"
11#include "library.h"
12#include "breakpoint.h"
13#include "linux-gnu/trace.h"
14
15/* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and
16 * new-style "secure" PLT.  We can tell one from the other by the
17 * flags on the .plt section.  If it's +X (executable), it's BSS PLT,
18 * otherwise it's secure.
19 *
20 * BSS PLT works the same way as most architectures: the .plt section
21 * contains trampolines and we put breakpoints to those.  With secure
22 * PLT, the .plt section doesn't contain instructions but addresses.
23 * The real PLT table is stored in .text.  Addresses of those PLT
24 * entries can be computed, and it fact that's what the glink deal
25 * below does.
26 *
27 * If not prelinked, BSS PLT entries in the .plt section contain
28 * zeroes that are overwritten by the dynamic linker during start-up.
29 * For that reason, ltrace realizes those breakpoints only after
30 * _start is hit.
31 *
32 * 64-bit PPC is more involved.  Program linker creates for each
33 * library call a _stub_ symbol named xxxxxxxx.plt_call.<callee>
34 * (where xxxxxxxx is a hexadecimal number).  That stub does the call
35 * dispatch: it loads an address of a function to call from the
36 * section .plt, and branches.  PLT entries themselves are essentially
37 * a curried call to the resolver.  When the symbol is resolved, the
38 * resolver updates the value stored in .plt, and the next time
39 * around, the stub calls the library function directly.  So we make
40 * at most one trip (none if the binary is prelinked) through each PLT
41 * entry, and correspondingly that is useless as a breakpoint site.
42 *
43 * Note the three confusing terms: stubs (that play the role of PLT
44 * entries), PLT entries, .plt section.
45 *
46 * We first check symbol tables and see if we happen to have stub
47 * symbols available.  If yes we just put breakpoints to those, and
48 * treat them as usual breakpoints.  The only tricky part is realizing
49 * that there can be more than one breakpoint per symbol.
50 *
51 * The case that we don't have the stub symbols available is harder.
52 * The following scheme uses two kinds of PLT breakpoints: unresolved
53 * and resolved (to some address).  When the process starts (or when
54 * we attach), we distribute unresolved PLT breakpoints to the PLT
55 * entries (not stubs).  Then we look in .plt, and for each entry
56 * whose value is different than the corresponding PLT entry address,
57 * we assume it was already resolved, and convert the breakpoint to
58 * resolved.  We also rewrite the resolved value in .plt back to the
59 * PLT address.
60 *
61 * When a PLT entry hits a resolved breakpoint (which happens because
62 * we rewrite .plt with the original unresolved addresses), we move
63 * the instruction pointer to the corresponding address and continue
64 * the process as if nothing happened.
65 *
66 * When unresolved PLT entry is called for the first time, we need to
67 * catch the new value that the resolver will write to a .plt slot.
68 * We also need to prevent another thread from racing through and
69 * taking the branch without ltrace noticing.  So when unresolved PLT
70 * entry hits, we have to stop all threads.  We then single-step
71 * through the resolver, until the .plt slot changes.  When it does,
72 * we treat it the same way as above: convert the PLT breakpoint to
73 * resolved, and rewrite the .plt value back to PLT address.  We then
74 * start all threads again.
75 *
76 * As an optimization, we remember the address where the address was
77 * resolved, and put a breakpoint there.  The next time around (when
78 * the next PLT entry is to be resolved), instead of single-stepping
79 * through half the dynamic linker, we just let the thread run and hit
80 * this breakpoint.  When it hits, we know the PLT entry was resolved.
81 *
82 * XXX TODO As an additional optimization, after the above is done, we
83 * might emulate the instruction that updates .plt.  We would compute
84 * the resolved address, and instead of letting the dynamic linker put
85 * it in .plt, we would resolve the breakpoint to that address.  This
86 * way we wouldn't need to stop other threads.  Otherwise there's no
87 * way around that.  Unless we know where the stubs are, we don't have
88 * a way to catch a thread that would use the window of opportunity
89 * between updating .plt and notifying ltrace that it happened.
90 *
91 * XXX TODO If we have hardware watch point, we might put a read watch
92 * on .plt slot, and discover the offenders this way.  I don't know
93 * the details, but I assume at most a handful (like, one or two, if
94 * available at all) addresses may be watched at a time, and thus this
95 * would be used as an amendment of the above rather than full-on
96 * solution to PLT tracing on PPC.
97 */
98
99#define PPC_PLT_STUB_SIZE 16
100#define PPC64_PLT_STUB_SIZE 8 //xxx
101
102static inline int
103host_powerpc64()
104{
105#ifdef __powerpc64__
106	return 1;
107#else
108	return 0;
109#endif
110}
111
112static enum callback_status
113reenable_breakpoint(struct Process *proc, struct breakpoint *bp, void *data)
114{
115	/* We don't need to re-enable non-PLT breakpoints and
116	 * breakpoints that are not PPC32 BSS unprelinked.  */
117	if (bp->libsym == NULL
118	    || bp->libsym->plt_type == LS_TOPLT_NONE
119	    || bp->libsym->lib->arch.bss_plt_prelinked != 0)
120		return CBS_CONT;
121
122	debug(DEBUG_PROCESS, "pid=%d reenable_breakpoint %s",
123	      proc->pid, breakpoint_name(bp));
124
125	/* Re-enable the breakpoint that was overwritten by the
126	 * dynamic linker.  XXX unfortunately it's overwritten
127	 * again after the first call :-/  */
128	enable_breakpoint(proc, bp);
129
130	return CBS_CONT;
131}
132
133void
134arch_dynlink_done(struct Process *proc)
135{
136	/* On PPC32, .plt of objects that use BSS PLT are overwritten
137	 * by the dynamic linker (unless that object was prelinked).
138	 * We need to re-enable breakpoints in those objects.  */
139	proc_each_breakpoint(proc, NULL, reenable_breakpoint, NULL);
140}
141
142GElf_Addr
143arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela)
144{
145	if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
146		assert(lte->arch.plt_stub_vma != 0);
147		return lte->arch.plt_stub_vma + PPC_PLT_STUB_SIZE * ndx;
148
149	} else if (lte->ehdr.e_machine == EM_PPC) {
150		return rela->r_offset;
151
152	} else {
153		/* If we get here, we don't have stub symbols.  In
154		 * that case we put brakpoints to PLT entries the same
155		 * as the PPC32 secure PLT case does.  */
156		assert(lte->arch.plt_stub_vma != 0);
157		return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx;
158	}
159}
160
161int
162arch_translate_address(struct Process *proc,
163		       target_address_t addr, target_address_t *ret)
164{
165	if (proc->e_machine == EM_PPC64) {
166		assert(host_powerpc64());
167		long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
168		if (l == -1 && errno) {
169			error(0, errno, ".opd translation of %p", addr);
170			return -1;
171		}
172		*ret = (target_address_t)l;
173		return 0;
174	}
175
176	*ret = addr;
177	return 0;
178}
179
180void *
181sym2addr(struct Process *proc, struct library_symbol *sym)
182{
183	return sym->enter_addr;
184}
185
186static GElf_Addr
187get_glink_vma(struct ltelf *lte, GElf_Addr ppcgot, Elf_Data *plt_data)
188{
189	Elf_Scn *ppcgot_sec = NULL;
190	GElf_Shdr ppcgot_shdr;
191	if (ppcgot != 0
192	    && elf_get_section_covering(lte, ppcgot,
193					&ppcgot_sec, &ppcgot_shdr) < 0)
194		error(0, 0, "DT_PPC_GOT=%#"PRIx64", but no such section found",
195		      ppcgot);
196
197	if (ppcgot_sec != NULL) {
198		Elf_Data *data = elf_loaddata(ppcgot_sec, &ppcgot_shdr);
199		if (data == NULL || data->d_size < 8 ) {
200			error(0, 0, "couldn't read GOT data");
201		} else {
202			// where PPCGOT begins in .got
203			size_t offset = ppcgot - ppcgot_shdr.sh_addr;
204			assert(offset % 4 == 0);
205			uint32_t glink_vma;
206			if (elf_read_u32(data, offset + 4, &glink_vma) < 0) {
207				error(0, 0, "couldn't read glink VMA address"
208				      " at %zd@GOT", offset);
209				return 0;
210			}
211			if (glink_vma != 0) {
212				debug(1, "PPC GOT glink_vma address: %#" PRIx32,
213				      glink_vma);
214				return (GElf_Addr)glink_vma;
215			}
216		}
217	}
218
219	if (plt_data != NULL) {
220		uint32_t glink_vma;
221		if (elf_read_u32(plt_data, 0, &glink_vma) < 0) {
222			error(0, 0, "couldn't read glink VMA address");
223			return 0;
224		}
225		debug(1, ".plt glink_vma address: %#" PRIx32, glink_vma);
226		return (GElf_Addr)glink_vma;
227	}
228
229	return 0;
230}
231
232static int
233load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep)
234{
235	Elf_Scn *scn;
236	GElf_Shdr shdr;
237	if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0
238	    || scn == NULL) {
239	fail:
240		error(0, 0, "Couldn't get SHT_DYNAMIC: %s",
241		      elf_errmsg(-1));
242		return -1;
243	}
244
245	Elf_Data *data = elf_loaddata(scn, &shdr);
246	if (data == NULL)
247		goto fail;
248
249	size_t j;
250	for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) {
251		GElf_Dyn dyn;
252		if (gelf_getdyn(data, j, &dyn) == NULL)
253			goto fail;
254
255		if(dyn.d_tag == tag) {
256			*valuep = dyn.d_un.d_ptr;
257			return 0;
258		}
259	}
260
261	return -1;
262}
263
264static int
265load_ppcgot(struct ltelf *lte, GElf_Addr *ppcgotp)
266{
267	return load_dynamic_entry(lte, DT_PPC_GOT, ppcgotp);
268}
269
270static int
271load_ppc64_glink(struct ltelf *lte, GElf_Addr *glinkp)
272{
273	return load_dynamic_entry(lte, DT_PPC64_GLINK, glinkp);
274}
275
276static int
277nonzero_data(Elf_Data *data)
278{
279	/* We are not supposed to get here if there are no PLT data in
280	 * the binary.  */
281	assert(data != NULL);
282
283	unsigned char *buf = data->d_buf;
284	if (buf == NULL)
285		return 0;
286
287	size_t i;
288	for (i = 0; i < data->d_size; ++i)
289		if (buf[i] != 0)
290			return 1;
291	return 0;
292}
293
294int
295arch_elf_init(struct ltelf *lte, struct library *lib)
296{
297	lte->arch.secure_plt = !(lte->plt_flags & SHF_EXECINSTR);
298
299	/* For PPC32 BSS, it is important whether the binary was
300	 * prelinked.  If .plt section is NODATA, or if it contains
301	 * zeroes, then this library is not prelinked, and we need to
302	 * delay breakpoints.  */
303	if (lte->ehdr.e_machine == EM_PPC && !lte->arch.secure_plt)
304		lib->arch.bss_plt_prelinked = nonzero_data(lte->plt_data);
305	else
306		/* For cases where it's irrelevant, initialize the
307		 * value to something conspicuous.  */
308		lib->arch.bss_plt_prelinked = -1;
309
310	if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) {
311		GElf_Addr ppcgot;
312		if (load_ppcgot(lte, &ppcgot) < 0) {
313			error(0, 0, "couldn't find DT_PPC_GOT");
314			return -1;
315		}
316		GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data);
317
318		assert (lte->relplt_size % 12 == 0);
319		size_t count = lte->relplt_size / 12; // size of RELA entry
320		lte->arch.plt_stub_vma = glink_vma
321			- (GElf_Addr)count * PPC_PLT_STUB_SIZE;
322		debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma);
323
324	} else if (lte->ehdr.e_machine == EM_PPC64) {
325		GElf_Addr glink_vma;
326		if (load_ppc64_glink(lte, &glink_vma) < 0) {
327			error(0, 0, "couldn't find DT_PPC64_GLINK");
328			return -1;
329		}
330
331		/* The first glink stub starts at offset 32.  */
332		lte->arch.plt_stub_vma = glink_vma + 32;
333	}
334
335	/* On PPC64, look for stub symbols in symbol table.  These are
336	 * called: xxxxxxxx.plt_call.callee_name@version+addend.  */
337	if (lte->ehdr.e_machine == EM_PPC64
338	    && lte->symtab != NULL && lte->strtab != NULL) {
339
340		/* N.B. We can't simply skip the symbols that we fail
341		 * to read or malloc.  There may be more than one stub
342		 * per symbol name, and if we failed in one but
343		 * succeeded in another, the PLT enabling code would
344		 * have no way to tell that something is missing.  We
345		 * could work around that, of course, but it doesn't
346		 * seem worth the trouble.  So if anything fails, we
347		 * just pretend that we don't have stub symbols at
348		 * all, as if the binary is stripped.  */
349
350		size_t i;
351		for (i = 0; i < lte->symtab_count; ++i) {
352			GElf_Sym sym;
353			if (gelf_getsym(lte->symtab, i, &sym) == NULL) {
354				struct library_symbol *sym, *next;
355			fail:
356				for (sym = lte->arch.stubs; sym != NULL; ) {
357					next = sym->next;
358					library_symbol_destroy(sym);
359					free(sym);
360					sym = next;
361				}
362				lte->arch.stubs = NULL;
363				break;
364			}
365
366			const char *name = lte->strtab + sym.st_name;
367
368#define STUBN ".plt_call."
369			if ((name = strstr(name, STUBN)) == NULL)
370				continue;
371			name += sizeof(STUBN) - 1;
372#undef STUBN
373
374			size_t len;
375			const char *ver = strchr(name, '@');
376			if (ver != NULL) {
377				len = ver - name;
378
379			} else {
380				/* If there is "+" at all, check that
381				 * the symbol name ends in "+0".  */
382				const char *add = strrchr(name, '+');
383				if (add != NULL) {
384					assert(strcmp(add, "+0") == 0);
385					len = add - name;
386				} else {
387					len = strlen(name);
388				}
389			}
390
391			char *sym_name = strndup(name, len);
392			struct library_symbol *libsym = malloc(sizeof(*libsym));
393			if (sym_name == NULL || libsym == NULL) {
394			fail2:
395				free(sym_name);
396				free(libsym);
397				goto fail;
398			}
399
400			/* XXX The double cast should be removed when
401			 * target_address_t becomes integral type.  */
402			target_address_t addr = (target_address_t)
403				(uintptr_t)sym.st_value + lte->bias;
404			if (library_symbol_init(libsym, addr, sym_name, 1,
405						LS_TOPLT_EXEC) < 0)
406				goto fail2;
407			libsym->arch.type = PPC64PLT_STUB;
408			libsym->next = lte->arch.stubs;
409			lte->arch.stubs = libsym;
410		}
411	}
412
413	return 0;
414}
415
416static int
417read_plt_slot_value(struct Process *proc, GElf_Addr addr, GElf_Addr *valp)
418{
419	/* on PPC32 we need to do things differently, but PPC64/PPC32
420	 * is currently not supported anyway.  */
421	assert(host_powerpc64());
422
423	long l = ptrace(PTRACE_PEEKTEXT, proc->pid, addr, 0);
424	if (l == -1 && errno != 0) {
425		error(0, errno, "ptrace .plt slot value @%#" PRIx64, addr);
426		return -1;
427	}
428
429	*valp = (GElf_Addr)l;
430	return 0;
431}
432
433static int
434unresolve_plt_slot(struct Process *proc, GElf_Addr addr, GElf_Addr value)
435{
436	/* We only modify plt_entry[0], which holds the resolved
437	 * address of the routine.  We keep the TOC and environment
438	 * pointers intact.  Hence the only adjustment that we need to
439	 * do is to IP.  */
440	if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) {
441		error(0, errno, "unresolve .plt slot");
442		return -1;
443	}
444	return 0;
445}
446
447enum plt_status
448arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte,
449		       const char *a_name, GElf_Rela *rela, size_t ndx,
450		       struct library_symbol **ret)
451{
452	if (lte->ehdr.e_machine == EM_PPC)
453		return plt_default;
454
455	/* PPC64.  If we have stubs, we return a chain of breakpoint
456	 * sites, one for each stub that corresponds to this PLT
457	 * entry.  */
458	struct library_symbol *chain = NULL;
459	struct library_symbol **symp;
460	for (symp = &lte->arch.stubs; *symp != NULL; ) {
461		struct library_symbol *sym = *symp;
462		if (strcmp(sym->name, a_name) != 0) {
463			symp = &(*symp)->next;
464			continue;
465		}
466
467		/* Re-chain the symbol from stubs to CHAIN.  */
468		*symp = sym->next;
469		sym->next = chain;
470		chain = sym;
471	}
472
473	if (chain != NULL) {
474		*ret = chain;
475		return plt_ok;
476	}
477
478	/* We don't have stub symbols.  Find corresponding .plt slot,
479	 * and check whether it contains the corresponding PLT address
480	 * (or 0 if the dynamic linker hasn't run yet).  N.B. we don't
481	 * want read this from ELF file, but from process image.  That
482	 * makes a difference if we are attaching to a running
483	 * process.  */
484
485	GElf_Addr plt_entry_addr = arch_plt_sym_val(lte, ndx, rela);
486	GElf_Addr plt_slot_addr = rela->r_offset;
487	assert(plt_slot_addr >= lte->plt_addr
488	       || plt_slot_addr < lte->plt_addr + lte->plt_size);
489
490	GElf_Addr plt_slot_value;
491	if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0)
492		return plt_fail;
493
494	char *name = strdup(a_name);
495	struct library_symbol *libsym = malloc(sizeof(*libsym));
496	if (name == NULL || libsym == NULL) {
497		error(0, errno, "allocation for .plt slot");
498	fail:
499		free(name);
500		free(libsym);
501		return plt_fail;
502	}
503
504	/* XXX The double cast should be removed when
505	 * target_address_t becomes integral type.  */
506	if (library_symbol_init(libsym,
507				(target_address_t)(uintptr_t)plt_entry_addr,
508				name, 1, LS_TOPLT_EXEC) < 0)
509		goto fail;
510	libsym->arch.plt_slot_addr = plt_slot_addr;
511
512	if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) {
513		libsym->arch.type = PPC64PLT_UNRESOLVED;
514		libsym->arch.resolved_value = plt_entry_addr;
515
516	} else {
517		/* Unresolve the .plt slot.  If the binary was
518		 * prelinked, this makes the code invalid, because in
519		 * case of prelinked binary, the dynamic linker
520		 * doesn't update .plt[0] and .plt[1] with addresses
521		 * of the resover.  But we don't care, we will never
522		 * need to enter the resolver.  That just means that
523		 * we have to un-un-resolve this back before we
524		 * detach.  */
525
526		if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) {
527			library_symbol_destroy(libsym);
528			goto fail;
529		}
530		libsym->arch.type = PPC64PLT_RESOLVED;
531		libsym->arch.resolved_value = plt_slot_value;
532	}
533
534	*ret = libsym;
535	return plt_ok;
536}
537
538void
539arch_elf_destroy(struct ltelf *lte)
540{
541	struct library_symbol *sym;
542	for (sym = lte->arch.stubs; sym != NULL; ) {
543		struct library_symbol *next = sym->next;
544		library_symbol_destroy(sym);
545		free(sym);
546		sym = next;
547	}
548}
549
550static void
551dl_plt_update_bp_on_hit(struct breakpoint *bp, struct Process *proc)
552{
553	struct process_stopping_handler *self = proc->arch.handler;
554	assert(self != NULL);
555
556	struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
557	GElf_Addr value;
558	if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
559		return;
560
561	/* cb_on_all_stopped looks if HANDLER is set to NULL as a way
562	 * to check that this was run.  It's an error if it
563	 * wasn't.  */
564	breakpoint_turn_off(bp, proc);
565	proc->arch.handler = NULL;
566}
567
568static void
569cb_on_all_stopped(struct process_stopping_handler *self)
570{
571	/* Put that in for dl_plt_update_bp_on_hit to see.  */
572	assert(self->task_enabling_breakpoint->arch.handler == NULL);
573	self->task_enabling_breakpoint->arch.handler = self;
574
575	linux_ptrace_disable_and_continue(self);
576}
577
578static enum callback_status
579cb_keep_stepping_p(struct process_stopping_handler *self)
580{
581	struct Process *proc = self->task_enabling_breakpoint;
582	struct library_symbol *libsym = self->breakpoint_being_enabled->libsym;
583	GElf_Addr value;
584	if (read_plt_slot_value(proc, libsym->arch.plt_slot_addr, &value) < 0)
585		return CBS_FAIL;
586
587	/* In UNRESOLVED state, the RESOLVED_VALUE in fact contains
588	 * the PLT entry value.  */
589	if (value == libsym->arch.resolved_value)
590		return CBS_CONT;
591
592	/* The .plt slot got resolved!  We can migrate the breakpoint
593	 * to RESOLVED and stop single-stepping.  */
594	if (unresolve_plt_slot(proc, libsym->arch.plt_slot_addr,
595			       libsym->arch.resolved_value) < 0)
596		return CBS_FAIL;
597
598	/* Install breakpoint to the address where the change takes
599	 * place.  If we fail, then that just means that we'll have to
600	 * singlestep the next time around as well.  */
601	struct Process *leader = proc->leader;
602	if (leader == NULL || leader->arch.dl_plt_update_bp != NULL)
603		goto resolve;
604
605	/* We need to install to the next instruction.  ADDR points to
606	 * a store instruction, so moving the breakpoint one
607	 * instruction forward is safe.  */
608	target_address_t addr = get_instruction_pointer(proc) + 4;
609	leader->arch.dl_plt_update_bp = insert_breakpoint(proc, addr, NULL);
610
611	/* Turn it off for now.  We will turn it on again when we hit
612	 * the PLT entry that needs this.  */
613	breakpoint_turn_off(leader->arch.dl_plt_update_bp, proc);
614
615	if (leader->arch.dl_plt_update_bp != NULL) {
616		static struct bp_callbacks dl_plt_update_cbs = {
617			.on_hit = dl_plt_update_bp_on_hit,
618		};
619		leader->arch.dl_plt_update_bp->cbs = &dl_plt_update_cbs;
620	}
621
622resolve:
623	libsym->arch.type = PPC64PLT_RESOLVED;
624	libsym->arch.resolved_value = value;
625
626	return CBS_STOP;
627}
628
629static void
630ppc64_plt_bp_continue(struct breakpoint *bp, struct Process *proc)
631{
632	switch (bp->libsym->arch.type) {
633		target_address_t rv;
634		struct Process *leader;
635		void (*on_all_stopped)(struct process_stopping_handler *);
636		enum callback_status (*keep_stepping_p)
637			(struct process_stopping_handler *);
638
639	case PPC64PLT_UNRESOLVED:
640		on_all_stopped = NULL;
641		keep_stepping_p = NULL;
642		leader = proc->leader;
643
644		if (leader != NULL && leader->arch.dl_plt_update_bp != NULL
645		    && breakpoint_turn_on(leader->arch.dl_plt_update_bp,
646					  proc) >= 0)
647			on_all_stopped = cb_on_all_stopped;
648		else
649			keep_stepping_p = cb_keep_stepping_p;
650
651		if (process_install_stopping_handler
652		    (proc, bp, on_all_stopped, keep_stepping_p, NULL) < 0) {
653			perror("ppc64_unresolved_bp_continue: couldn't install"
654			       " event handler");
655			continue_after_breakpoint(proc, bp);
656		}
657		return;
658
659	case PPC64PLT_RESOLVED:
660		/* XXX The double cast should be removed when
661		 * target_address_t becomes integral type.  */
662		rv = (target_address_t)
663			(uintptr_t)bp->libsym->arch.resolved_value;
664		set_instruction_pointer(proc, rv);
665		continue_process(proc->pid);
666		return;
667
668	case PPC_DEFAULT:
669	case PPC64PLT_STUB:
670		/* These should never hit here.  */
671		break;
672	}
673
674	assert(bp->libsym->arch.type != bp->libsym->arch.type);
675	abort();
676}
677
678void
679arch_library_init(struct library *lib)
680{
681}
682
683void
684arch_library_destroy(struct library *lib)
685{
686}
687
688void
689arch_library_clone(struct library *retp, struct library *lib)
690{
691}
692
693int
694arch_library_symbol_init(struct library_symbol *libsym)
695{
696	/* We set type explicitly in the code above, where we have the
697	 * necessary context.  This is for calls from ltrace-elf.c and
698	 * such.  */
699	libsym->arch.type = PPC_DEFAULT;
700	return 0;
701}
702
703void
704arch_library_symbol_destroy(struct library_symbol *libsym)
705{
706}
707
708int
709arch_library_symbol_clone(struct library_symbol *retp,
710			  struct library_symbol *libsym)
711{
712	retp->arch = libsym->arch;
713	return 0;
714}
715
716/* For some symbol types, we need to set up custom callbacks.  XXX we
717 * don't need PROC here, we can store the data in BP if it is of
718 * interest to us.  */
719int
720arch_breakpoint_init(struct Process *proc, struct breakpoint *bp)
721{
722	if (proc->e_machine == EM_PPC
723	    || bp->libsym == NULL)
724		return 0;
725
726	/* Entry point breakpoints (LS_TOPLT_NONE) and stub PLT
727	 * breakpoints need no special handling.  */
728	if (bp->libsym->plt_type != LS_TOPLT_EXEC
729	    || bp->libsym->arch.type == PPC64PLT_STUB)
730		return 0;
731
732	static struct bp_callbacks cbs = {
733		.on_continue = ppc64_plt_bp_continue,
734	};
735	breakpoint_set_callbacks(bp, &cbs);
736	return 0;
737}
738
739void
740arch_breakpoint_destroy(struct breakpoint *bp)
741{
742}
743
744int
745arch_breakpoint_clone(struct breakpoint *retp, struct breakpoint *sbp)
746{
747	retp->arch = sbp->arch;
748	return 0;
749}
750
751int
752arch_process_init(struct Process *proc)
753{
754	proc->arch.dl_plt_update_bp = NULL;
755	proc->arch.handler = NULL;
756	return 0;
757}
758
759void
760arch_process_destroy(struct Process *proc)
761{
762}
763
764int
765arch_process_clone(struct Process *retp, struct Process *proc)
766{
767	retp->arch = proc->arch;
768	return 0;
769}
770
771int
772arch_process_exec(struct Process *proc)
773{
774	return arch_process_init(proc);
775}
776