trace.c revision cbe29c6c0ad01839a81272c4715ea73d17e89611
1#include <stdio.h> 2#include <stdlib.h> 3#include <string.h> 4#include <errno.h> 5#include <unistd.h> 6#include <sys/types.h> 7#include <sys/wait.h> 8#include "ptrace.h" 9#include <asm/unistd.h> 10#include <assert.h> 11 12#include "common.h" 13 14/* If the system headers did not provide the constants, hard-code the normal 15 values. */ 16#ifndef PTRACE_EVENT_FORK 17 18#define PTRACE_OLDSETOPTIONS 21 19#define PTRACE_SETOPTIONS 0x4200 20#define PTRACE_GETEVENTMSG 0x4201 21 22/* options set using PTRACE_SETOPTIONS */ 23#define PTRACE_O_TRACESYSGOOD 0x00000001 24#define PTRACE_O_TRACEFORK 0x00000002 25#define PTRACE_O_TRACEVFORK 0x00000004 26#define PTRACE_O_TRACECLONE 0x00000008 27#define PTRACE_O_TRACEEXEC 0x00000010 28#define PTRACE_O_TRACEVFORKDONE 0x00000020 29#define PTRACE_O_TRACEEXIT 0x00000040 30 31/* Wait extended result codes for the above trace options. */ 32#define PTRACE_EVENT_FORK 1 33#define PTRACE_EVENT_VFORK 2 34#define PTRACE_EVENT_CLONE 3 35#define PTRACE_EVENT_EXEC 4 36#define PTRACE_EVENT_VFORK_DONE 5 37#define PTRACE_EVENT_EXIT 6 38 39#endif /* PTRACE_EVENT_FORK */ 40 41#ifdef ARCH_HAVE_UMOVELONG 42extern int arch_umovelong (Process *, void *, long *, arg_type_info *); 43int 44umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { 45 return arch_umovelong (proc, addr, result, info); 46} 47#else 48/* Read a single long from the process's memory address 'addr' */ 49int 50umovelong (Process *proc, void *addr, long *result, arg_type_info *info) { 51 long pointed_to; 52 53 errno = 0; 54 pointed_to = ptrace (PTRACE_PEEKTEXT, proc->pid, addr, 0); 55 if (pointed_to == -1 && errno) 56 return -errno; 57 58 *result = pointed_to; 59 if (info) { 60 switch(info->type) { 61 case ARGTYPE_INT: 62 *result &= 0x00000000ffffffffUL; 63 default: 64 break; 65 }; 66 } 67 return 0; 68} 69#endif 70 71void 72trace_me(void) { 73 debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid()); 74 if (ptrace(PTRACE_TRACEME, 0, 1, 0) < 0) { 75 perror("PTRACE_TRACEME"); 76 exit(1); 77 } 78} 79 80int 81trace_pid(pid_t pid) { 82 debug(DEBUG_PROCESS, "trace_pid: pid=%d", pid); 83 if (ptrace(PTRACE_ATTACH, pid, 1, 0) < 0) { 84 return -1; 85 } 86 87 /* man ptrace: PTRACE_ATTACH attaches to the process specified 88 in pid. The child is sent a SIGSTOP, but will not 89 necessarily have stopped by the completion of this call; 90 use wait() to wait for the child to stop. */ 91 if (waitpid (pid, NULL, __WALL) != pid) { 92 perror ("trace_pid: waitpid"); 93 return -1; 94 } 95 96 return 0; 97} 98 99void 100trace_set_options(Process *proc, pid_t pid) { 101 if (proc->tracesysgood & 0x80) 102 return; 103 104 debug(DEBUG_PROCESS, "trace_set_options: pid=%d", pid); 105 106 long options = PTRACE_O_TRACESYSGOOD | PTRACE_O_TRACEFORK | 107 PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE | 108 PTRACE_O_TRACEEXEC; 109 if (ptrace(PTRACE_SETOPTIONS, pid, 0, options) < 0 && 110 ptrace(PTRACE_OLDSETOPTIONS, pid, 0, options) < 0) { 111 perror("PTRACE_SETOPTIONS"); 112 return; 113 } 114 proc->tracesysgood |= 0x80; 115} 116 117void 118untrace_pid(pid_t pid) { 119 debug(DEBUG_PROCESS, "untrace_pid: pid=%d", pid); 120 ptrace(PTRACE_DETACH, pid, 1, 0); 121} 122 123void 124continue_after_signal(pid_t pid, int signum) { 125 debug(DEBUG_PROCESS, "continue_after_signal: pid=%d, signum=%d", pid, signum); 126 ptrace(PTRACE_SYSCALL, pid, 0, signum); 127} 128 129static enum ecb_status 130event_for_pid(Event * event, void * data) 131{ 132 if (event->proc != NULL && event->proc->pid == (pid_t)(uintptr_t)data) 133 return ecb_yield; 134 return ecb_cont; 135} 136 137static int 138have_events_for(pid_t pid) 139{ 140 return each_qd_event(event_for_pid, (void *)(uintptr_t)pid) != NULL; 141} 142 143void 144continue_process(pid_t pid) 145{ 146 debug(DEBUG_PROCESS, "continue_process: pid=%d", pid); 147 148 /* Only really continue the process if there are no events in 149 the queue for this process. Otherwise just for the other 150 events to arrive. */ 151 if (!have_events_for(pid)) 152 /* We always trace syscalls to control fork(), 153 * clone(), execve()... */ 154 ptrace(PTRACE_SYSCALL, pid, 0, 0); 155 else 156 debug(DEBUG_PROCESS, 157 "putting off the continue, events in que."); 158} 159 160/** 161 * This is used for bookkeeping related to PIDs that the event 162 * handlers work with. 163 */ 164struct pid_task { 165 pid_t pid; /* This may be 0 for tasks that exited 166 * mid-handling. */ 167 int sigstopped : 1; 168 int got_event : 1; 169 int delivered : 1; 170 int vforked : 1; 171} * pids; 172 173struct pid_set { 174 struct pid_task * tasks; 175 size_t count; 176 size_t alloc; 177}; 178 179/** 180 * Breakpoint re-enablement. When we hit a breakpoint, we must 181 * disable it, single-step, and re-enable it. That single-step can be 182 * done only by one task in a task group, while others are stopped, 183 * otherwise the processes would race for who sees the breakpoint 184 * disabled and who doesn't. The following is to keep track of it 185 * all. 186 */ 187struct process_stopping_handler 188{ 189 Event_Handler super; 190 191 /* The task that is doing the re-enablement. */ 192 Process * task_enabling_breakpoint; 193 194 /* The pointer being re-enabled. */ 195 Breakpoint * breakpoint_being_enabled; 196 197 enum { 198 /* We are waiting for everyone to land in t/T. */ 199 psh_stopping = 0, 200 201 /* We are doing the PTRACE_SINGLESTEP. */ 202 psh_singlestep, 203 204 /* We are waiting for all the SIGSTOPs to arrive so 205 * that we can sink them. */ 206 psh_sinking, 207 208 /* This is for tracking the ugly workaround. */ 209 psh_ugly_workaround, 210 } state; 211 212 int exiting; 213 214 struct pid_set pids; 215}; 216 217static struct pid_task * 218get_task_info(struct pid_set * pids, pid_t pid) 219{ 220 assert(pid != 0); 221 size_t i; 222 for (i = 0; i < pids->count; ++i) 223 if (pids->tasks[i].pid == pid) 224 return &pids->tasks[i]; 225 226 return NULL; 227} 228 229static struct pid_task * 230add_task_info(struct pid_set * pids, pid_t pid) 231{ 232 if (pids->count == pids->alloc) { 233 size_t ns = (2 * pids->alloc) ?: 4; 234 struct pid_task * n = realloc(pids->tasks, 235 sizeof(*pids->tasks) * ns); 236 if (n == NULL) 237 return NULL; 238 pids->tasks = n; 239 pids->alloc = ns; 240 } 241 struct pid_task * task_info = &pids->tasks[pids->count++]; 242 memset(task_info, 0, sizeof(*task_info)); 243 task_info->pid = pid; 244 return task_info; 245} 246 247static enum pcb_status 248task_stopped(Process * task, void * data) 249{ 250 enum process_status st = process_status(task->pid); 251 if (data != NULL) 252 *(enum process_status *)data = st; 253 254 /* If the task is already stopped, don't worry about it. 255 * Likewise if it managed to become a zombie or terminate in 256 * the meantime. This can happen when the whole thread group 257 * is terminating. */ 258 switch (st) { 259 case ps_invalid: 260 case ps_tracing_stop: 261 case ps_zombie: 262 return pcb_cont; 263 default: 264 return pcb_stop; 265 } 266} 267 268/* Task is blocked if it's stopped, or if it's a vfork parent. */ 269static enum pcb_status 270task_blocked(Process * task, void * data) 271{ 272 struct pid_set * pids = data; 273 struct pid_task * task_info = get_task_info(pids, task->pid); 274 if (task_info != NULL 275 && task_info->vforked) 276 return pcb_cont; 277 278 return task_stopped(task, NULL); 279} 280 281static Event * process_vfork_on_event(Event_Handler * super, Event * event); 282 283static enum pcb_status 284task_vforked(Process * task, void * data) 285{ 286 if (task->event_handler != NULL 287 && task->event_handler->on_event == &process_vfork_on_event) 288 return pcb_stop; 289 return pcb_cont; 290} 291 292static int 293is_vfork_parent(Process * task) 294{ 295 return each_task(task->leader, &task_vforked, NULL) != NULL; 296} 297 298static enum pcb_status 299send_sigstop(Process * task, void * data) 300{ 301 Process * leader = task->leader; 302 struct pid_set * pids = data; 303 304 /* Look for pre-existing task record, or add new. */ 305 struct pid_task * task_info = get_task_info(pids, task->pid); 306 if (task_info == NULL) 307 task_info = add_task_info(pids, task->pid); 308 if (task_info == NULL) { 309 perror("send_sigstop: add_task_info"); 310 destroy_event_handler(leader); 311 /* Signal failure upwards. */ 312 return pcb_stop; 313 } 314 315 /* This task still has not been attached to. It should be 316 stopped by the kernel. */ 317 if (task->state == STATE_BEING_CREATED) 318 return pcb_cont; 319 320 /* Don't bother sending SIGSTOP if we are already stopped, or 321 * if we sent the SIGSTOP already, which happens when we are 322 * handling "onexit" and inherited the handler from breakpoint 323 * re-enablement. */ 324 enum process_status st; 325 if (task_stopped(task, &st) == pcb_cont) 326 return pcb_cont; 327 if (task_info->sigstopped) { 328 if (!task_info->delivered) 329 return pcb_cont; 330 task_info->delivered = 0; 331 } 332 333 /* Also don't attempt to stop the process if it's a parent of 334 * vforked process. We set up event handler specially to hint 335 * us. In that case parent is in D state, which we use to 336 * weed out unnecessary looping. */ 337 if (st == ps_sleeping 338 && is_vfork_parent (task)) { 339 task_info->vforked = 1; 340 return pcb_cont; 341 } 342 343 if (task_kill(task->pid, SIGSTOP) >= 0) { 344 debug(DEBUG_PROCESS, "send SIGSTOP to %d", task->pid); 345 task_info->sigstopped = 1; 346 } else 347 fprintf(stderr, 348 "Warning: couldn't send SIGSTOP to %d\n", task->pid); 349 350 return pcb_cont; 351} 352 353/* On certain kernels, detaching right after a singlestep causes the 354 tracee to be killed with a SIGTRAP (that even though the singlestep 355 was properly caught by waitpid. The ugly workaround is to put a 356 breakpoint where IP points and let the process continue. After 357 this the breakpoint can be retracted and the process detached. */ 358static void 359ugly_workaround(Process * proc) 360{ 361 void * ip = get_instruction_pointer(proc); 362 Breakpoint * sbp = dict_find_entry(proc->leader->breakpoints, ip); 363 if (sbp != NULL) 364 enable_breakpoint(proc, sbp); 365 else 366 insert_breakpoint(proc, ip, NULL, 1); 367 ptrace(PTRACE_CONT, proc->pid, 0, 0); 368} 369 370static void 371process_stopping_done(struct process_stopping_handler * self, Process * leader) 372{ 373 debug(DEBUG_PROCESS, "process stopping done %d", 374 self->task_enabling_breakpoint->pid); 375 size_t i; 376 if (!self->exiting) { 377 for (i = 0; i < self->pids.count; ++i) 378 if (self->pids.tasks[i].pid != 0 379 && self->pids.tasks[i].delivered) 380 continue_process(self->pids.tasks[i].pid); 381 continue_process(self->task_enabling_breakpoint->pid); 382 destroy_event_handler(leader); 383 } else { 384 self->state = psh_ugly_workaround; 385 ugly_workaround(self->task_enabling_breakpoint); 386 } 387} 388 389/* Before we detach, we need to make sure that task's IP is on the 390 * edge of an instruction. So for tasks that have a breakpoint event 391 * in the queue, we adjust the instruction pointer, just like 392 * continue_after_breakpoint does. */ 393static enum ecb_status 394undo_breakpoint(Event * event, void * data) 395{ 396 if (event != NULL 397 && event->proc->leader == data 398 && event->type == EVENT_BREAKPOINT) 399 set_instruction_pointer(event->proc, event->e_un.brk_addr); 400 return ecb_cont; 401} 402 403static enum pcb_status 404untrace_task(Process * task, void * data) 405{ 406 if (task != data) 407 untrace_pid(task->pid); 408 return pcb_cont; 409} 410 411static enum pcb_status 412remove_task(Process * task, void * data) 413{ 414 /* Don't untrace leader just yet. */ 415 if (task != data) 416 remove_process(task); 417 return pcb_cont; 418} 419 420static void 421detach_process(Process * leader) 422{ 423 each_qd_event(&undo_breakpoint, leader); 424 disable_all_breakpoints(leader); 425 426 /* Now untrace the process, if it was attached to by -p. */ 427 struct opt_p_t * it; 428 for (it = opt_p; it != NULL; it = it->next) { 429 Process * proc = pid2proc(it->pid); 430 if (proc == NULL) 431 continue; 432 if (proc->leader == leader) { 433 each_task(leader, &untrace_task, NULL); 434 break; 435 } 436 } 437 each_task(leader, &remove_task, leader); 438 destroy_event_handler(leader); 439 remove_task(leader, NULL); 440} 441 442static void 443handle_stopping_event(struct pid_task * task_info, Event ** eventp) 444{ 445 /* Mark all events, so that we know whom to SIGCONT later. */ 446 if (task_info != NULL) 447 task_info->got_event = 1; 448 449 Event * event = *eventp; 450 451 /* In every state, sink SIGSTOP events for tasks that it was 452 * sent to. */ 453 if (task_info != NULL 454 && event->type == EVENT_SIGNAL 455 && event->e_un.signum == SIGSTOP) { 456 debug(DEBUG_PROCESS, "SIGSTOP delivered to %d", task_info->pid); 457 if (task_info->sigstopped 458 && !task_info->delivered) { 459 task_info->delivered = 1; 460 *eventp = NULL; // sink the event 461 } else 462 fprintf(stderr, "suspicious: %d got SIGSTOP, but " 463 "sigstopped=%d and delivered=%d\n", 464 task_info->pid, task_info->sigstopped, 465 task_info->delivered); 466 } 467} 468 469/* Some SIGSTOPs may have not been delivered to their respective tasks 470 * yet. They are still in the queue. If we have seen an event for 471 * that process, continue it, so that the SIGSTOP can be delivered and 472 * caught by ltrace. */ 473static void 474continue_for_sigstop_delivery(struct pid_set * pids) 475{ 476 size_t i; 477 for (i = 0; i < pids->count; ++i) { 478 if (pids->tasks[i].pid != 0 479 && pids->tasks[i].sigstopped 480 && !pids->tasks[i].delivered 481 && pids->tasks[i].got_event) { 482 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery", 483 pids->tasks[i].pid); 484 ptrace(PTRACE_SYSCALL, pids->tasks[i].pid, 0, 0); 485 } 486 } 487} 488 489static int 490event_exit_p(Event * event) 491{ 492 return event != NULL && (event->type == EVENT_EXIT 493 || event->type == EVENT_EXIT_SIGNAL); 494} 495 496static int 497event_exit_or_none_p(Event * event) 498{ 499 return event == NULL || event_exit_p(event) 500 || event->type == EVENT_NONE; 501} 502 503static int 504await_sigstop_delivery(struct pid_set * pids, struct pid_task * task_info, 505 Event * event) 506{ 507 /* If we still didn't get our SIGSTOP, continue the process 508 * and carry on. */ 509 if (event != NULL && !event_exit_or_none_p(event) 510 && task_info != NULL && task_info->sigstopped) { 511 debug(DEBUG_PROCESS, "continue %d for SIGSTOP delivery", 512 task_info->pid); 513 /* We should get the signal the first thing 514 * after this, so it should be OK to continue 515 * even if we are over a breakpoint. */ 516 ptrace(PTRACE_SYSCALL, task_info->pid, 0, 0); 517 518 } else { 519 /* If all SIGSTOPs were delivered, uninstall the 520 * handler and continue everyone. */ 521 /* XXX I suspect that we should check tasks that are 522 * still around. Is things are now, there should be a 523 * race between waiting for everyone to stop and one 524 * of the tasks exiting. */ 525 int all_clear = 1; 526 size_t i; 527 for (i = 0; i < pids->count; ++i) 528 if (pids->tasks[i].pid != 0 529 && pids->tasks[i].sigstopped 530 && !pids->tasks[i].delivered) { 531 all_clear = 0; 532 break; 533 } 534 return all_clear; 535 } 536 537 return 0; 538} 539 540static int 541all_stops_accountable(struct pid_set * pids) 542{ 543 size_t i; 544 for (i = 0; i < pids->count; ++i) 545 if (pids->tasks[i].pid != 0 546 && !pids->tasks[i].got_event 547 && !have_events_for(pids->tasks[i].pid)) 548 return 0; 549 return 1; 550} 551 552/* This event handler is installed when we are in the process of 553 * stopping the whole thread group to do the pointer re-enablement for 554 * one of the threads. We pump all events to the queue for later 555 * processing while we wait for all the threads to stop. When this 556 * happens, we let the re-enablement thread to PTRACE_SINGLESTEP, 557 * re-enable, and continue everyone. */ 558static Event * 559process_stopping_on_event(Event_Handler * super, Event * event) 560{ 561 struct process_stopping_handler * self = (void *)super; 562 Process * task = event->proc; 563 Process * leader = task->leader; 564 Breakpoint * sbp = self->breakpoint_being_enabled; 565 Process * teb = self->task_enabling_breakpoint; 566 567 debug(DEBUG_PROCESS, 568 "pid %d; event type %d; state %d", 569 task->pid, event->type, self->state); 570 571 struct pid_task * task_info = get_task_info(&self->pids, task->pid); 572 if (task_info == NULL) 573 fprintf(stderr, "new task??? %d\n", task->pid); 574 handle_stopping_event(task_info, &event); 575 576 int state = self->state; 577 int event_to_queue = !event_exit_or_none_p(event); 578 579 /* Deactivate the entry if the task exits. */ 580 if (event_exit_p(event) && task_info != NULL) 581 task_info->pid = 0; 582 583 switch (state) { 584 case psh_stopping: 585 /* If everyone is stopped, singlestep. */ 586 if (each_task(leader, &task_blocked, &self->pids) == NULL) { 587 debug(DEBUG_PROCESS, "all stopped, now SINGLESTEP %d", 588 teb->pid); 589 if (sbp->enabled) 590 disable_breakpoint(teb, sbp); 591 if (ptrace(PTRACE_SINGLESTEP, teb->pid, 0, 0)) 592 perror("PTRACE_SINGLESTEP"); 593 self->state = state = psh_singlestep; 594 } 595 break; 596 597 case psh_singlestep: { 598 /* In singlestep state, breakpoint signifies that we 599 * have now stepped, and can re-enable the breakpoint. */ 600 if (event != NULL && task == teb) { 601 /* Essentially we don't care what event caused 602 * the thread to stop. We can do the 603 * re-enablement now. */ 604 if (sbp->enabled) 605 enable_breakpoint(teb, sbp); 606 607 continue_for_sigstop_delivery(&self->pids); 608 609 self->breakpoint_being_enabled = NULL; 610 self->state = state = psh_sinking; 611 612 if (event->type == EVENT_BREAKPOINT) 613 event = NULL; // handled 614 } else 615 break; 616 } 617 618 /* fall-through */ 619 620 case psh_sinking: 621 if (await_sigstop_delivery(&self->pids, task_info, event)) 622 process_stopping_done(self, leader); 623 break; 624 625 case psh_ugly_workaround: 626 if (event == NULL) 627 break; 628 if (event->type == EVENT_BREAKPOINT) { 629 undo_breakpoint(event, leader); 630 if (task == teb) 631 self->task_enabling_breakpoint = NULL; 632 } 633 if (self->task_enabling_breakpoint == NULL 634 && all_stops_accountable(&self->pids)) { 635 undo_breakpoint(event, leader); 636 detach_process(leader); 637 event = NULL; // handled 638 } 639 } 640 641 if (event != NULL && event_to_queue) { 642 enque_event(event); 643 event = NULL; // sink the event 644 } 645 646 return event; 647} 648 649static void 650process_stopping_destroy(Event_Handler * super) 651{ 652 struct process_stopping_handler * self = (void *)super; 653 free(self->pids.tasks); 654} 655 656void 657continue_after_breakpoint(Process *proc, Breakpoint *sbp) 658{ 659 set_instruction_pointer(proc, sbp->addr); 660 if (sbp->enabled == 0) { 661 continue_process(proc->pid); 662 } else { 663 debug(DEBUG_PROCESS, 664 "continue_after_breakpoint: pid=%d, addr=%p", 665 proc->pid, sbp->addr); 666#if defined __sparc__ || defined __ia64___ || defined __mips__ 667 /* we don't want to singlestep here */ 668 continue_process(proc->pid); 669#else 670 struct process_stopping_handler * handler 671 = calloc(sizeof(*handler), 1); 672 if (handler == NULL) { 673 perror("malloc breakpoint disable handler"); 674 fatal: 675 /* Carry on not bothering to re-enable. */ 676 continue_process(proc->pid); 677 return; 678 } 679 680 handler->super.on_event = process_stopping_on_event; 681 handler->super.destroy = process_stopping_destroy; 682 handler->task_enabling_breakpoint = proc; 683 handler->breakpoint_being_enabled = sbp; 684 install_event_handler(proc->leader, &handler->super); 685 686 if (each_task(proc->leader, &send_sigstop, 687 &handler->pids) != NULL) 688 goto fatal; 689 690 /* And deliver the first fake event, in case all the 691 * conditions are already fulfilled. */ 692 Event ev; 693 ev.type = EVENT_NONE; 694 ev.proc = proc; 695 process_stopping_on_event(&handler->super, &ev); 696#endif 697 } 698} 699 700/** 701 * Ltrace exit. When we are about to exit, we have to go through all 702 * the processes, stop them all, remove all the breakpoints, and then 703 * detach the processes that we attached to using -p. If we left the 704 * other tasks running, they might hit stray return breakpoints and 705 * produce artifacts, so we better stop everyone, even if it's a bit 706 * of extra work. 707 */ 708struct ltrace_exiting_handler 709{ 710 Event_Handler super; 711 struct pid_set pids; 712}; 713 714static Event * 715ltrace_exiting_on_event(Event_Handler * super, Event * event) 716{ 717 struct ltrace_exiting_handler * self = (void *)super; 718 Process * task = event->proc; 719 Process * leader = task->leader; 720 721 debug(DEBUG_PROCESS, "pid %d; event type %d", task->pid, event->type); 722 723 struct pid_task * task_info = get_task_info(&self->pids, task->pid); 724 handle_stopping_event(task_info, &event); 725 726 if (event != NULL && event->type == EVENT_BREAKPOINT) 727 undo_breakpoint(event, leader); 728 729 if (await_sigstop_delivery(&self->pids, task_info, event) 730 && all_stops_accountable(&self->pids)) 731 detach_process(leader); 732 733 /* Sink all non-exit events. We are about to exit, so we 734 * don't bother with queuing them. */ 735 if (event_exit_or_none_p(event)) 736 return event; 737 738 return NULL; 739} 740 741static void 742ltrace_exiting_destroy(Event_Handler * super) 743{ 744 struct ltrace_exiting_handler * self = (void *)super; 745 free(self->pids.tasks); 746} 747 748static int 749ltrace_exiting_install_handler(Process * proc) 750{ 751 /* Only install to leader. */ 752 if (proc->leader != proc) 753 return 0; 754 755 /* Perhaps we are already installed, if the user passed 756 * several -p options that are tasks of one process. */ 757 if (proc->event_handler != NULL 758 && proc->event_handler->on_event == <race_exiting_on_event) 759 return 0; 760 761 /* If stopping handler is already present, let it do the 762 * work. */ 763 if (proc->event_handler != NULL) { 764 assert(proc->event_handler->on_event 765 == &process_stopping_on_event); 766 struct process_stopping_handler * other 767 = (void *)proc->event_handler; 768 other->exiting = 1; 769 return 0; 770 } 771 772 struct ltrace_exiting_handler * handler 773 = calloc(sizeof(*handler), 1); 774 if (handler == NULL) { 775 perror("malloc exiting handler"); 776 fatal: 777 /* XXXXXXXXXXXXXXXXXXX fixme */ 778 return -1; 779 } 780 781 handler->super.on_event = ltrace_exiting_on_event; 782 handler->super.destroy = ltrace_exiting_destroy; 783 install_event_handler(proc->leader, &handler->super); 784 785 if (each_task(proc->leader, &send_sigstop, 786 &handler->pids) != NULL) 787 goto fatal; 788 789 return 0; 790} 791 792/* 793 * When the traced process vforks, it's suspended until the child 794 * process calls _exit or exec*. In the meantime, the two share the 795 * address space. 796 * 797 * The child process should only ever call _exit or exec*, but we 798 * can't count on that (it's not the role of ltrace to policy, but to 799 * observe). In any case, we will _at least_ have to deal with 800 * removal of vfork return breakpoint (which we have to smuggle back 801 * in, so that the parent can see it, too), and introduction of exec* 802 * return breakpoint. Since we already have both breakpoint actions 803 * to deal with, we might as well support it all. 804 * 805 * The gist is that we pretend that the child is in a thread group 806 * with its parent, and handle it as a multi-threaded case, with the 807 * exception that we know that the parent is blocked, and don't 808 * attempt to stop it. When the child execs, we undo the setup. 809 * 810 * XXX The parent process could be un-suspended before ltrace gets 811 * child exec/exit event. Make sure this is taken care of. 812 */ 813 814static Event * 815process_vfork_on_event(Event_Handler * super, Event * event) 816{ 817 struct process_vfork_handler * self = (void *)super; 818 assert(self != NULL); 819 820 switch (event->type) { 821 case EVENT_EXIT: 822 case EVENT_EXIT_SIGNAL: 823 case EVENT_EXEC: 824 /* Now is the time to remove the leader that we 825 * artificially set up earlier. XXX and do all the 826 * other fun stuff. */ 827 change_process_leader(event->proc, event->proc); 828 destroy_event_handler(event->proc); 829 830 /* XXXXX this could happen in the middle of handling 831 * multi-threaded breakpoint. We must be careful to 832 * undo the effects that we introduced above (vforked 833 * = 1 et.al.). */ 834 835 default: 836 ; 837 } 838 839 return event; 840} 841 842void 843continue_after_vfork(Process * proc) 844{ 845 debug(DEBUG_PROCESS, "continue_after_vfork: pid=%d", proc->pid); 846 Event_Handler * handler = calloc(sizeof(*handler), 1); 847 if (handler == NULL) { 848 perror("malloc vfork handler"); 849 /* Carry on not bothering to treat the process as 850 * necessary. */ 851 continue_process(proc->parent->pid); 852 return; 853 } 854 855 /* We must set up custom event handler, so that we see 856 * exec/exit events for the task itself. */ 857 handler->on_event = process_vfork_on_event; 858 install_event_handler(proc, handler); 859 860 /* Make sure that the child is sole thread. */ 861 assert(proc->leader == proc); 862 assert(proc->next == NULL || proc->next->leader != proc); 863 864 /* Make sure that the child's parent is properly set up. */ 865 assert(proc->parent != NULL); 866 assert(proc->parent->leader != NULL); 867 868 change_process_leader(proc, proc->parent->leader); 869 continue_process(proc->parent->pid); 870} 871 872/* If ltrace gets SIGINT, the processes directly or indirectly run by 873 * ltrace get it too. We just have to wait long enough for the signal 874 * to be delivered and the process terminated, which we notice and 875 * exit ltrace, too. So there's not much we need to do there. We 876 * want to keep tracing those processes as usual, in case they just 877 * SIG_IGN the SIGINT to do their shutdown etc. 878 * 879 * For processes ran on the background, we want to install an exit 880 * handler that stops all the threads, removes all breakpoints, and 881 * detaches. 882 */ 883void 884ltrace_exiting(void) 885{ 886 struct opt_p_t * it; 887 for (it = opt_p; it != NULL; it = it->next) { 888 Process * proc = pid2proc(it->pid); 889 if (proc == NULL || proc->leader == NULL) 890 continue; 891 if (ltrace_exiting_install_handler(proc->leader) < 0) 892 fprintf(stderr, 893 "Couldn't install exiting handler for %d.\n", 894 proc->pid); 895 } 896} 897 898size_t 899umovebytes(Process *proc, void *addr, void *laddr, size_t len) { 900 901 union { 902 long a; 903 char c[sizeof(long)]; 904 } a; 905 int started = 0; 906 size_t offset = 0, bytes_read = 0; 907 908 while (offset < len) { 909 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0); 910 if (a.a == -1 && errno) { 911 if (started && errno == EIO) 912 return bytes_read; 913 else 914 return -1; 915 } 916 started = 1; 917 918 if (len - offset >= sizeof(long)) { 919 memcpy(laddr + offset, &a.c[0], sizeof(long)); 920 bytes_read += sizeof(long); 921 } 922 else { 923 memcpy(laddr + offset, &a.c[0], len - offset); 924 bytes_read += (len - offset); 925 } 926 offset += sizeof(long); 927 } 928 929 return bytes_read; 930} 931 932/* Read a series of bytes starting at the process's memory address 933 'addr' and continuing until a NUL ('\0') is seen or 'len' bytes 934 have been read. 935*/ 936int 937umovestr(Process *proc, void *addr, int len, void *laddr) { 938 union { 939 long a; 940 char c[sizeof(long)]; 941 } a; 942 unsigned i; 943 int offset = 0; 944 945 while (offset < len) { 946 a.a = ptrace(PTRACE_PEEKTEXT, proc->pid, addr + offset, 0); 947 for (i = 0; i < sizeof(long); i++) { 948 if (a.c[i] && offset + (signed)i < len) { 949 *(char *)(laddr + offset + i) = a.c[i]; 950 } else { 951 *(char *)(laddr + offset + i) = '\0'; 952 return 0; 953 } 954 } 955 offset += sizeof(long); 956 } 957 *(char *)(laddr + offset) = '\0'; 958 return 0; 959} 960