1/* Low level interface to valgrind, for the remote server for GDB integrated
2   in valgrind.
3   Copyright (C) 2011
4   Free Software Foundation, Inc.
5
6   This file is part of VALGRIND.
7   It has been inspired from a file from gdbserver in gdb 6.6.
8
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 2 of the License, or
12   (at your option) any later version.
13
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18
19   You should have received a copy of the GNU General Public License
20   along with this program; if not, write to the Free Software
21   Foundation, Inc., 51 Franklin Street, Fifth Floor,
22   Boston, MA 02110-1301, USA.  */
23
24#include "server.h"
25#include "target.h"
26#include "regdef.h"
27#include "regcache.h"
28#include "valgrind_low.h"
29#include "gdb/signals.h"
30#include "pub_core_aspacemgr.h"
31#include "pub_tool_machine.h"
32#include "pub_core_threadstate.h"
33#include "pub_core_transtab.h"
34#include "pub_core_gdbserver.h"
35#include "pub_tool_debuginfo.h"
36
37/* the_low_target defines the architecture specific aspects depending
38   on the cpu */
39static struct valgrind_target_ops the_low_target;
40
41/* builds an image of bin according to byte order of the architecture
42   Useful for register and int image */
43char* heximage (char *buf, char *bin, int count)
44{
45#if defined(VGA_x86) || defined(VGA_amd64)
46   char rev[count];
47   /* note: no need for trailing \0, length is known with count */
48  int i;
49  for (i = 0; i < count; i++)
50    rev[i] = bin[count - i - 1];
51  hexify (buf, rev, count);
52#else
53  hexify (buf, bin, count);
54#endif
55  return buf;
56}
57
58void* C2v(CORE_ADDR addr)
59{
60   return (void*) addr;
61}
62
63static
64char *image_ptid(unsigned long ptid)
65{
66  static char result[100];
67  VG_(sprintf) (result, "id %ld", ptid);
68  return result;
69}
70#define get_thread(inf) ((struct thread_info *)(inf))
71static
72void remove_thread_if_not_in_vg_threads (struct inferior_list_entry *inf)
73{
74  struct thread_info *thread = get_thread (inf);
75  if (!VG_(lwpid_to_vgtid)(thread_to_gdb_id(thread))) {
76     dlog(1, "removing gdb ptid %s\n",
77          image_ptid(thread_to_gdb_id(thread)));
78     remove_thread (thread);
79  }
80}
81
82/* synchronize threads known by valgrind and threads known by gdbserver */
83static
84void valgrind_update_threads (int pid)
85{
86  ThreadId tid;
87  ThreadState *ts;
88  unsigned long ptid;
89  struct thread_info *ti;
90
91  /* call remove_thread for all gdb threads not in valgrind threads */
92  for_each_inferior (&all_threads, remove_thread_if_not_in_vg_threads);
93
94  /* call add_thread for all valgrind threads not known in gdb all_threads */
95  for (tid = 1; tid < VG_N_THREADS; tid++) {
96
97#define LOCAL_THREAD_TRACE " ti* %p vgtid %d status %s as gdb ptid %s lwpid %d\n", \
98        ti, tid, VG_(name_of_ThreadStatus) (ts->status), \
99        image_ptid (ptid), ts->os_state.lwpid
100
101     if (VG_(is_valid_tid) (tid)) {
102        ts = VG_(get_ThreadState) (tid);
103        ptid = ts->os_state.lwpid;
104        ti = gdb_id_to_thread (ptid);
105        if (!ti) {
106           /* we do not report the threads which are not yet fully
107              initialized otherwise this creates duplicated threads
108              in gdb: once with pid xxx lwpid 0, then after that
109              with pid xxx lwpid yyy. */
110           if (ts->status != VgTs_Init) {
111              dlog(1, "adding_thread" LOCAL_THREAD_TRACE);
112              add_thread (ptid, ts, ptid);
113           }
114        } else {
115           dlog(2, "(known thread)" LOCAL_THREAD_TRACE);
116        }
117     }
118#undef LOCAL_THREAD_TRACE
119  }
120}
121
122/* Return nonzero if the given thread is still alive.  */
123static
124int valgrind_thread_alive (unsigned long tid)
125{
126  struct thread_info *ti =  gdb_id_to_thread(tid);
127  ThreadState *tst;
128
129  if (ti != NULL) {
130     tst = (ThreadState *) inferior_target_data (ti);
131     return tst->status != VgTs_Zombie;
132  }
133  else {
134    return 0;
135  }
136}
137
138/* allocate and build a register structure containing the shadow registers.
139   reg_defs is the normal registers, n is their numbers */
140static
141struct reg* build_shadow_arch (struct reg *reg_defs, int n) {
142   int i, r;
143   static char *postfix[3] = { "", "s1", "s2" };
144   struct reg *new_regs = malloc(3 * n * sizeof(reg_defs[0]));
145   int reg_set_len = reg_defs[n-1].offset + reg_defs[n-1].size;
146
147   for (i = 0; i < 3; i++) {
148      for (r = 0; r < n; r++) {
149         new_regs[i*n + r].name = malloc(strlen(reg_defs[r].name)
150                                         + strlen (postfix[i]) + 1);
151         strcpy (new_regs[i*n + r].name, reg_defs[r].name);
152         strcat (new_regs[i*n + r].name, postfix[i]);
153         new_regs[i*n + r].offset = i*reg_set_len + reg_defs[r].offset;
154         new_regs[i*n + r].size = reg_defs[r].size;
155         dlog(1,
156              "%10s Nr %d offset(bit) %d offset(byte) %d  size(bit) %d\n",
157              new_regs[i*n + r].name, i*n + r, new_regs[i*n + r].offset,
158              (new_regs[i*n + r].offset) / 8, new_regs[i*n + r].size);
159      }
160   }
161
162   return new_regs;
163}
164
165/* Fetch one register from valgrind VEX guest state.  */
166static
167void fetch_register (int regno)
168{
169   int size;
170   ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
171   ThreadId tid = tst->tid;
172
173   if (regno >= the_low_target.num_regs) {
174      dlog(0, "error fetch_register regno %d max %d\n",
175           regno, the_low_target.num_regs);
176      return;
177   }
178   size = register_size (regno);
179   if (size > 0) {
180      Bool mod;
181      char buf [size];
182      VG_(memset) (buf, 0, size); // registers not fetched will be seen as 0.
183      (*the_low_target.transfer_register) (tid, regno, buf,
184                                           valgrind_to_gdbserver, size, &mod);
185      // Note: the *mod received from transfer_register is not interesting.
186      // We are interested to see if the register data in the register cache is modified.
187      supply_register (regno, buf, &mod);
188      if (mod && VG_(debugLog_getLevel)() > 1) {
189         char bufimage [2*size + 1];
190         heximage (bufimage, buf, size);
191         dlog(2, "fetched register %d size %d name %s value %s tid %d status %s\n",
192              regno, size, the_low_target.reg_defs[regno].name, bufimage,
193              tid, VG_(name_of_ThreadStatus) (tst->status));
194      }
195   }
196}
197
198/* Fetch all registers, or just one, from the child process.  */
199static
200void usr_fetch_inferior_registers (int regno)
201{
202   if (regno == -1 || regno == 0)
203      for (regno = 0; regno < the_low_target.num_regs; regno++)
204         fetch_register (regno);
205   else
206      fetch_register (regno);
207}
208
209/* Store our register values back into the inferior.
210   If REGNO is -1, do this for all registers.
211   Otherwise, REGNO specifies which register (so we can save time).  */
212static
213void usr_store_inferior_registers (int regno)
214{
215   int size;
216   ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
217   ThreadId tid = tst->tid;
218
219   if (regno >= 0) {
220
221      if (regno >= the_low_target.num_regs) {
222         dlog(0, "error store_register regno %d max %d\n",
223              regno, the_low_target.num_regs);
224         return;
225      }
226
227      size = register_size (regno);
228      if (size > 0) {
229         Bool mod;
230         Addr old_SP, new_SP;
231         char buf[size];
232
233         if (regno == the_low_target.stack_pointer_regno) {
234            /* When the stack pointer register is changed such that
235               the stack is extended, we better inform the tool of the
236               stack increase.  This is needed in particular to avoid
237               spurious Memcheck errors during Inferior calls. So, we
238               save in old_SP the SP before the change. A change of
239               stack pointer is also assumed to have initialised this
240               new stack space. For the typical example of an inferior
241               call, gdb writes arguments on the stack, and then
242               changes the stack pointer. As the stack increase tool
243               function might mark it as undefined, we have to call it
244               at the good moment. */
245            VG_(memset) ((void *) &old_SP, 0, size);
246            (*the_low_target.transfer_register) (tid, regno, (void *) &old_SP,
247                                                 valgrind_to_gdbserver, size, &mod);
248         }
249
250         VG_(memset) (buf, 0, size);
251         collect_register (regno, buf);
252         (*the_low_target.transfer_register) (tid, regno, buf,
253                                              gdbserver_to_valgrind, size, &mod);
254         if (mod && VG_(debugLog_getLevel)() > 1) {
255            char bufimage [2*size + 1];
256            heximage (bufimage, buf, size);
257            dlog(2,
258                 "stored register %d size %d name %s value %s "
259                 "tid %d status %s\n",
260                 regno, size, the_low_target.reg_defs[regno].name, bufimage,
261                 tid, VG_(name_of_ThreadStatus) (tst->status));
262         }
263         if (regno == the_low_target.stack_pointer_regno) {
264            VG_(memcpy) (&new_SP, buf, size);
265            if (old_SP > new_SP) {
266               Word delta  = (Word)new_SP - (Word)old_SP;
267               dlog(1,
268                    "   stack increase by stack pointer changed from %p to %p "
269                    "delta %ld\n",
270                    (void*) old_SP, (void *) new_SP,
271                    delta);
272               VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, 0 );
273               VG_TRACK( new_mem_stack,       new_SP, -delta );
274               if (VG_(tdict).track_post_mem_write) {
275                  VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid,
276                                                   new_SP, -delta);
277               }
278            }
279         }
280      }
281   }
282   else {
283      for (regno = 0; regno < the_low_target.num_regs; regno++)
284         usr_store_inferior_registers (regno);
285   }
286}
287
288static
289void valgrind_fetch_registers (int regno)
290{
291   usr_fetch_inferior_registers (regno);
292}
293
294static
295void valgrind_store_registers (int regno)
296{
297   usr_store_inferior_registers (regno);
298}
299
300/* Copy LEN bytes from inferior's memory starting at MEMADDR
301   to debugger memory starting at MYADDR.  */
302
303static
304int valgrind_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
305{
306   const void *sourceaddr = C2v (memaddr);
307   dlog(2, "reading memory %p size %d\n", sourceaddr, len);
308   if (!VG_(am_is_valid_for_client_or_free_or_resvn) ((Addr) sourceaddr,
309                                                      len, VKI_PROT_READ)) {
310      dlog(1, "error reading memory %p size %d\n", sourceaddr, len);
311      return -1;
312   }
313   VG_(memcpy) (myaddr, sourceaddr, len);
314   return 0;
315}
316
317/* Copy LEN bytes of data from debugger memory at MYADDR
318   to inferior's memory at MEMADDR.
319   On failure (cannot write the inferior)
320   returns the value of errno.  */
321
322static
323int valgrind_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
324{
325   void *targetaddr = C2v (memaddr);
326   dlog(2, "writing memory %p size %d\n", targetaddr, len);
327   if (!VG_(am_is_valid_for_client_or_free_or_resvn) ((Addr)targetaddr,
328                                                      len, VKI_PROT_WRITE)) {
329      dlog(1, "error writing memory %p size %d\n", targetaddr, len);
330      return -1;
331   }
332   if (len > 0) {
333      VG_(memcpy) (targetaddr, myaddr, len);
334      if (VG_(tdict).track_post_mem_write) {
335         /* Inform the tool of the post memwrite.  Note that we do the
336            minimum necessary to avoid complains from e.g.
337            memcheck. The idea is that the debugger is as least
338            intrusive as possible.  So, we do not inform of the pre
339            mem write (and in any case, this would cause problems with
340            memcheck that does not like our CorePart in
341            pre_mem_write. */
342         ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
343         ThreadId tid = tst->tid;
344         VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid, (Addr) targetaddr, len );
345      }
346   }
347   return 0;
348}
349
350/* insert or remove a breakpoint */
351static
352int valgrind_point (Bool insert, char type, CORE_ADDR addr, int len)
353{
354   PointKind kind;
355   switch (type) {
356   case '0': /* implemented by inserting checks at each instruction in sb */
357      kind = software_breakpoint;
358      break;
359   case '1': /* hw breakpoint, same implementation as sw breakpoint */
360      kind = hardware_breakpoint;
361      break;
362   case '2':
363      kind = write_watchpoint;
364      break;
365   case '3':
366      kind = read_watchpoint;
367      break;
368   case '4':
369      kind = access_watchpoint;
370      break;
371   default:
372      vg_assert (0);
373   }
374
375   /* Attention: gdbserver convention differs: 0 means ok; 1 means not ok */
376   if (VG_(gdbserver_point) (kind, insert, addr, len))
377      return 0;
378   else
379      return 1; /* error or unsupported */
380}
381
382static
383void valgrind_send_signal (int sig)
384{
385   dlog(1, "valgrind_send_signal %d called ????\n", sig);
386}
387
388static
389char* valgrind_target_xml (void)
390{
391   return (char *) the_low_target.target_xml;
392}
393
394static
395char* valgrind_shadow_target_xml (void)
396{
397   return (char *) the_low_target.shadow_target_xml;
398}
399
400static
401int valgrind_insert_point (char type, CORE_ADDR addr, int len)
402{
403   return valgrind_point (/* insert */ True, type, addr, len);
404}
405
406static
407int valgrind_remove_point (char type, CORE_ADDR addr, int len)
408{
409   return valgrind_point (/* insert*/ False, type, addr, len);
410}
411
412static CORE_ADDR stopped_data_address = 0;
413void VG_(set_watchpoint_stop_address) (Addr addr)
414{
415   stopped_data_address = addr;
416}
417
418static
419int valgrind_stopped_by_watchpoint (void)
420{
421   return stopped_data_address != 0;
422}
423
424static
425CORE_ADDR valgrind_stopped_data_address (void)
426{
427   return stopped_data_address;
428}
429
430/* pc at which we last stopped */
431static CORE_ADDR stop_pc;
432
433/* pc at which we resume.
434   If stop_pc != resume_pc, it means
435      gdb/gdbserver has changed the pc so as to have either
436      a    "continue by jumping at that address"
437      or a "continue at that address to call some code from gdb".
438*/
439static CORE_ADDR resume_pc;
440
441static int signal_to_report;
442
443void gdbserver_signal_encountered (Int sigNo)
444{
445   signal_to_report = sigNo;
446}
447
448static int signal_to_deliver;
449Bool gdbserver_deliver_signal (Int sigNo)
450{
451   return sigNo == signal_to_deliver;
452}
453
454static
455char* sym (Addr addr)
456{
457   static char buf[200];
458   VG_(describe_IP) (addr, buf, 200);
459   return buf;
460}
461
462ThreadId vgdb_interrupted_tid = 0;
463/* called to wait for the process to stop */
464static
465unsigned char valgrind_wait (char *ourstatus)
466{
467   int pid;
468   unsigned long wptid;
469   ThreadState *tst;
470   enum target_signal sig;
471
472   pid = VG_(getpid) ();
473   dlog(1, "enter valgrind_wait pid %d\n", pid);
474
475   regcache_invalidate();
476   valgrind_update_threads(pid);
477
478   /* in valgrind, we consider that a wait always succeeds with STOPPED 'T'
479      and with a signal TRAP (i.e. a breakpoint), unless there is
480      a signal to report. */
481   *ourstatus = 'T';
482   if (signal_to_report == 0)
483      sig = TARGET_SIGNAL_TRAP;
484   else
485      sig = target_signal_from_host(signal_to_report);
486
487   if (vgdb_interrupted_tid != 0)
488      tst = VG_(get_ThreadState) (vgdb_interrupted_tid);
489   else
490      tst = VG_(get_ThreadState) (VG_(running_tid));
491   wptid = tst->os_state.lwpid;
492   /* we can only change the current_inferior when the wptid references
493      an existing thread. Otherwise, we are still in the init phase.
494      (hack similar to main thread hack in valgrind_update_threads) */
495   if (tst->os_state.lwpid)
496      current_inferior = gdb_id_to_thread (wptid);
497   stop_pc = (*the_low_target.get_pc) ();
498
499   dlog(1,
500        "exit valgrind_wait returns ptid %s stop_pc %s signal %d\n",
501        image_ptid (wptid), sym (stop_pc), sig);
502   return sig;
503}
504
505/* 0 => not single stepping.
506   1 => single stepping asked by gdb
507   2 => single stepping asked by valgrind (watchpoint) */
508static int stepping = 0;
509
510/* called when the process is to be resumed */
511static
512void valgrind_resume (struct thread_resume *resume_info)
513{
514   dlog(1,
515        "resume_info thread %ld leave_stopped %d step %d sig %d stepping %d\n",
516        resume_info->thread,
517        resume_info->leave_stopped,
518        resume_info->step,
519        resume_info->sig,
520        stepping);
521   if (valgrind_stopped_by_watchpoint()) {
522      dlog(1, "clearing watchpoint stopped_data_address %p\n",
523           C2v(stopped_data_address));
524      VG_(set_watchpoint_stop_address) ((Addr) 0);
525   }
526   signal_to_deliver = resume_info->sig;
527
528   stepping = resume_info->step;
529   resume_pc = (*the_low_target.get_pc) ();
530   if (resume_pc != stop_pc) {
531      dlog(1,
532           "stop_pc %p changed to be resume_pc %s\n",
533           C2v(stop_pc), sym(resume_pc));
534   }
535   regcache_invalidate();
536}
537
538Addr valgrind_get_ignore_break_once(void)
539{
540   if (valgrind_single_stepping())
541      return resume_pc;
542   else
543      return 0;
544}
545
546
547void valgrind_set_single_stepping(Bool set)
548{
549   if (set)
550      stepping = 2;
551   else
552      stepping = 0;
553}
554
555Bool valgrind_single_stepping(void)
556{
557   if (stepping)
558      return True;
559   else
560      return False;
561}
562
563static struct target_ops valgrind_target_ops = {
564   valgrind_thread_alive,
565   valgrind_resume,
566   valgrind_wait,
567   valgrind_fetch_registers,
568   valgrind_store_registers,
569   valgrind_read_memory,
570   valgrind_write_memory,
571   valgrind_send_signal,
572   valgrind_target_xml,
573   valgrind_shadow_target_xml,
574   valgrind_insert_point,
575   valgrind_remove_point,
576   valgrind_stopped_by_watchpoint,
577   valgrind_stopped_data_address,
578};
579
580
581/* returns a pointer to the architecture state corresponding to
582   the provided register set: 0 => normal guest registers,
583                              1 => shadow1
584                              2 => shadow2
585*/
586VexGuestArchState* get_arch (int set, ThreadState* tst)
587{
588  switch (set) {
589  case 0: return &tst->arch.vex;
590  case 1: return &tst->arch.vex_shadow1;
591  case 2: return &tst->arch.vex_shadow2;
592  default: vg_assert(0);
593  }
594}
595
596static int non_shadow_num_regs = 0;
597static struct reg *non_shadow_reg_defs = NULL;
598void initialize_shadow_low(Bool shadow_mode)
599{
600  if (non_shadow_reg_defs == NULL) {
601    non_shadow_reg_defs = the_low_target.reg_defs;
602    non_shadow_num_regs = the_low_target.num_regs;
603  }
604
605  regcache_invalidate();
606  if (the_low_target.reg_defs != non_shadow_reg_defs) {
607     free (the_low_target.reg_defs);
608  }
609  if (shadow_mode) {
610    the_low_target.num_regs = 3 * non_shadow_num_regs;
611    the_low_target.reg_defs = build_shadow_arch (non_shadow_reg_defs, non_shadow_num_regs);
612  } else {
613    the_low_target.num_regs = non_shadow_num_regs;
614    the_low_target.reg_defs = non_shadow_reg_defs;
615  }
616  set_register_cache (the_low_target.reg_defs, the_low_target.num_regs);
617}
618
619void initialize_low(void)
620{
621   set_target_ops (&valgrind_target_ops);
622
623#if defined(VGA_x86)
624   x86_init_architecture(&the_low_target);
625#elif defined(VGA_amd64)
626   amd64_init_architecture(&the_low_target);
627#elif defined(VGA_arm)
628   arm_init_architecture(&the_low_target);
629#elif defined(VGA_ppc32)
630   ppc32_init_architecture(&the_low_target);
631#elif defined(VGA_ppc64)
632   ppc64_init_architecture(&the_low_target);
633#elif defined(VGA_s390x)
634   s390x_init_architecture(&the_low_target);
635#else
636   architecture missing in valgrind-low.c
637#endif
638
639}
640