1/* Target operations for the remote server for GDB.
2   Copyright (C) 2002, 2004, 2005, 2011
3   Free Software Foundation, Inc.
4
5   Contributed by MontaVista Software.
6
7   This file is part of GDB.
8   It has been modified to integrate it in valgrind
9
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2 of the License, or
13   (at your option) any later version.
14
15   This program is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19
20   You should have received a copy of the GNU General Public License
21   along with this program; if not, write to the Free Software
22   Foundation, Inc., 51 Franklin Street, Fifth Floor,
23   Boston, MA 02110-1301, USA.  */
24
25#include "server.h"
26#include "target.h"
27#include "regdef.h"
28#include "regcache.h"
29#include "valgrind_low.h"
30#include "gdb/signals.h"
31#include "pub_core_aspacemgr.h"
32#include "pub_core_machine.h"
33#include "pub_core_threadstate.h"
34#include "pub_core_transtab.h"
35#include "pub_core_gdbserver.h"
36#include "pub_core_debuginfo.h"
37
38
39/* the_low_target defines the architecture specific aspects depending
40   on the cpu */
41static struct valgrind_target_ops the_low_target;
42
43static
44char *image_ptid(unsigned long ptid)
45{
46  static char result[50];    // large enough
47  VG_(sprintf) (result, "id %lu", ptid);
48  return result;
49}
50#define get_thread(inf) ((struct thread_info *)(inf))
51static
52void remove_thread_if_not_in_vg_threads (struct inferior_list_entry *inf)
53{
54  struct thread_info *thread = get_thread (inf);
55  if (!VG_(lwpid_to_vgtid)(thread_to_gdb_id(thread))) {
56     dlog(1, "removing gdb ptid %s\n",
57          image_ptid(thread_to_gdb_id(thread)));
58     remove_thread (thread);
59  }
60}
61
62/* synchronize threads known by valgrind and threads known by gdbserver */
63static
64void valgrind_update_threads (int pid)
65{
66  ThreadId tid;
67  ThreadState *ts;
68  unsigned long ptid;
69  struct thread_info *ti;
70
71  /* call remove_thread for all gdb threads not in valgrind threads */
72  for_each_inferior (&all_threads, remove_thread_if_not_in_vg_threads);
73
74  /* call add_thread for all valgrind threads not known in gdb all_threads */
75  for (tid = 1; tid < VG_N_THREADS; tid++) {
76
77#define LOCAL_THREAD_TRACE " ti* %p vgtid %u status %s as gdb ptid %s lwpid %d\n", \
78        ti, tid, VG_(name_of_ThreadStatus) (ts->status), \
79        image_ptid (ptid), ts->os_state.lwpid
80
81     if (VG_(is_valid_tid) (tid)) {
82        ts = VG_(get_ThreadState) (tid);
83        ptid = ts->os_state.lwpid;
84        ti = gdb_id_to_thread (ptid);
85        if (!ti) {
86           /* we do not report the threads which are not yet fully
87              initialized otherwise this creates duplicated threads
88              in gdb: once with pid xxx lwpid 0, then after that
89              with pid xxx lwpid yyy. */
90           if (ts->status != VgTs_Init) {
91              dlog(1, "adding_thread" LOCAL_THREAD_TRACE);
92              add_thread (ptid, ts, ptid);
93           }
94        } else {
95           dlog(2, "(known thread)" LOCAL_THREAD_TRACE);
96        }
97     }
98#undef LOCAL_THREAD_TRACE
99  }
100}
101
102static
103struct reg* build_shadow_arch (struct reg *reg_defs, int n) {
104   int i, r;
105   static const char *postfix[3] = { "", "s1", "s2" };
106   struct reg *new_regs = malloc(3 * n * sizeof(reg_defs[0]));
107   int reg_set_len = reg_defs[n-1].offset + reg_defs[n-1].size;
108
109   for (i = 0; i < 3; i++) {
110      for (r = 0; r < n; r++) {
111         char *regname = malloc(strlen(reg_defs[r].name)
112                                + strlen (postfix[i]) + 1);
113         strcpy (regname, reg_defs[r].name);
114         strcat (regname, postfix[i]);
115         new_regs[i*n + r].name = regname;
116         new_regs[i*n + r].offset = i*reg_set_len + reg_defs[r].offset;
117         new_regs[i*n + r].size = reg_defs[r].size;
118         dlog(1,
119              "%-10s Nr %d offset(bit) %d offset(byte) %d  size(bit) %d\n",
120              new_regs[i*n + r].name, i*n + r, new_regs[i*n + r].offset,
121              (new_regs[i*n + r].offset) / 8, new_regs[i*n + r].size);
122      }
123   }
124
125   return new_regs;
126}
127
128
129static CORE_ADDR stopped_data_address = 0;
130void VG_(set_watchpoint_stop_address) (Addr addr)
131{
132   stopped_data_address = addr;
133}
134
135int valgrind_stopped_by_watchpoint (void)
136{
137   return stopped_data_address != 0;
138}
139
140CORE_ADDR valgrind_stopped_data_address (void)
141{
142   return stopped_data_address;
143}
144
145/* pc at which we last stopped */
146static CORE_ADDR stop_pc;
147
148/* pc at which we resume.
149   If stop_pc != resume_pc, it means
150      gdb/gdbserver has changed the pc so as to have either
151      a    "continue by jumping at that address"
152      or a "continue at that address to call some code from gdb".
153*/
154static CORE_ADDR resume_pc;
155
156static vki_siginfo_t vki_signal_to_report;
157static vki_siginfo_t vki_signal_to_deliver;
158
159void gdbserver_signal_encountered (const vki_siginfo_t *info)
160{
161   vki_signal_to_report = *info;
162   vki_signal_to_deliver = *info;
163}
164
165void gdbserver_pending_signal_to_report (vki_siginfo_t *info)
166{
167   *info = vki_signal_to_report;
168}
169
170Bool gdbserver_deliver_signal (vki_siginfo_t *info)
171{
172   if (info->si_signo != vki_signal_to_deliver.si_signo)
173      dlog(1, "GDB changed signal  info %d to_report %d to_deliver %d\n",
174           info->si_signo, vki_signal_to_report.si_signo,
175           vki_signal_to_deliver.si_signo);
176   *info = vki_signal_to_deliver;
177   return vki_signal_to_deliver.si_signo != 0;
178}
179
180static Bool before_syscall;
181static Int sysno_to_report = -1;
182void gdbserver_syscall_encountered (Bool before, Int sysno)
183{
184   before_syscall = before;
185   sysno_to_report = sysno;
186}
187
188Int valgrind_stopped_by_syscall (void)
189{
190   return sysno_to_report;
191}
192
193Bool valgrind_stopped_before_syscall()
194{
195   vg_assert (sysno_to_report >= 0);
196   return before_syscall;
197}
198
199
200static unsigned char exit_status_to_report;
201static int exit_code_to_report;
202void gdbserver_process_exit_encountered (unsigned char status, Int code)
203{
204   vg_assert (status == 'W' || status == 'X');
205   exit_status_to_report = status;
206   exit_code_to_report = code;
207}
208
209static
210const HChar* sym (Addr addr)
211{
212   return VG_(describe_IP) (addr, NULL);
213}
214
215ThreadId vgdb_interrupted_tid = 0;
216
217/* 0 => not single stepping.
218   1 => single stepping asked by gdb
219   2 => single stepping asked by valgrind (watchpoint) */
220static int stepping = 0;
221
222Addr valgrind_get_ignore_break_once(void)
223{
224   if (valgrind_single_stepping())
225      return resume_pc;
226   else
227      return 0;
228}
229
230void valgrind_set_single_stepping(Bool set)
231{
232   if (set)
233      stepping = 2;
234   else
235      stepping = 0;
236}
237
238Bool valgrind_single_stepping(void)
239{
240   if (stepping)
241      return True;
242   else
243      return False;
244}
245
246int valgrind_thread_alive (unsigned long tid)
247{
248  struct thread_info *ti =  gdb_id_to_thread(tid);
249  ThreadState *tst;
250
251  if (ti != NULL) {
252     tst = (ThreadState *) inferior_target_data (ti);
253     return tst->status != VgTs_Zombie;
254  }
255  else {
256    return 0;
257  }
258}
259
260void valgrind_resume (struct thread_resume *resume_info)
261{
262   dlog(1,
263        "resume_info step %d sig %d stepping %d\n",
264        resume_info->step,
265        resume_info->sig,
266        stepping);
267   if (valgrind_stopped_by_watchpoint()) {
268      dlog(1, "clearing watchpoint stopped_data_address %p\n",
269           C2v(stopped_data_address));
270      VG_(set_watchpoint_stop_address) ((Addr) 0);
271   }
272   if (valgrind_stopped_by_syscall () >= 0) {
273      dlog(1, "clearing stopped by syscall %d\n",
274           valgrind_stopped_by_syscall ());
275      gdbserver_syscall_encountered (False, -1);
276   }
277
278   vki_signal_to_deliver.si_signo = resume_info->sig;
279   /* signal was reported to GDB, GDB told us to resume execution.
280      So, reset the signal to report to 0. */
281   VG_(memset) (&vki_signal_to_report, 0, sizeof(vki_signal_to_report));
282
283   stepping = resume_info->step;
284   resume_pc = (*the_low_target.get_pc) ();
285   if (resume_pc != stop_pc) {
286      dlog(1,
287           "stop_pc %p changed to be resume_pc %s\n",
288           C2v(stop_pc), sym(resume_pc));
289   }
290   regcache_invalidate();
291}
292
293unsigned char valgrind_wait (char *ourstatus)
294{
295   int pid;
296   unsigned long wptid;
297   ThreadState *tst;
298   enum target_signal sig;
299   int code;
300
301   pid = VG_(getpid) ();
302   dlog(1, "enter valgrind_wait pid %d\n", pid);
303
304   regcache_invalidate();
305   valgrind_update_threads(pid);
306
307   /* First see if we are done with this process. */
308   if (exit_status_to_report != 0) {
309      *ourstatus = exit_status_to_report;
310      exit_status_to_report = 0;
311
312      if (*ourstatus == 'W') {
313         code = exit_code_to_report;
314         exit_code_to_report = 0;
315         dlog(1, "exit valgrind_wait status W exit code %d\n", code);
316         return code;
317      }
318
319      if (*ourstatus == 'X') {
320         sig = target_signal_from_host(exit_code_to_report);
321         exit_code_to_report = 0;
322         dlog(1, "exit valgrind_wait status X signal %u\n", sig);
323         return sig;
324      }
325   }
326
327   /* in valgrind, we consider that a wait always succeeds with STOPPED 'T'
328      and with a signal TRAP (i.e. a breakpoint), unless there is
329      a signal to report. */
330   *ourstatus = 'T';
331   if (vki_signal_to_report.si_signo == 0)
332      sig = TARGET_SIGNAL_TRAP;
333   else
334      sig = target_signal_from_host(vki_signal_to_report.si_signo);
335
336   if (vgdb_interrupted_tid != 0)
337      tst = VG_(get_ThreadState) (vgdb_interrupted_tid);
338   else
339      tst = VG_(get_ThreadState) (VG_(running_tid));
340   wptid = tst->os_state.lwpid;
341   /* we can only change the current_inferior when the wptid references
342      an existing thread. Otherwise, we are still in the init phase.
343      (hack similar to main thread hack in valgrind_update_threads) */
344   if (tst->os_state.lwpid)
345      current_inferior = gdb_id_to_thread (wptid);
346   stop_pc = (*the_low_target.get_pc) ();
347
348   dlog(1,
349        "exit valgrind_wait status T ptid %s stop_pc %s signal %u\n",
350        image_ptid (wptid), sym (stop_pc), sig);
351   return sig;
352}
353
354/* Fetch one register from valgrind VEX guest state.  */
355static
356void fetch_register (int regno)
357{
358   int size;
359   ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
360   ThreadId tid = tst->tid;
361
362   if (regno >= the_low_target.num_regs) {
363      dlog(0, "error fetch_register regno %d max %d\n",
364           regno, the_low_target.num_regs);
365      return;
366   }
367   size = register_size (regno);
368   if (size > 0) {
369      Bool mod;
370      char buf [size];
371      VG_(memset) (buf, 0, size); // registers not fetched will be seen as 0.
372      (*the_low_target.transfer_register) (tid, regno, buf,
373                                           valgrind_to_gdbserver, size, &mod);
374      // Note: the *mod received from transfer_register is not interesting.
375      // We are interested to see if the register data in the register cache is modified.
376      supply_register (regno, buf, &mod);
377      if (mod && VG_(debugLog_getLevel)() > 1) {
378         char bufimage [2*size + 1];
379         heximage (bufimage, buf, size);
380         dlog(3, "fetched register %d size %d name %s value %s tid %u status %s\n",
381              regno, size, the_low_target.reg_defs[regno].name, bufimage,
382              tid, VG_(name_of_ThreadStatus) (tst->status));
383      }
384   }
385}
386
387/* Fetch all registers, or just one, from the child process.  */
388static
389void usr_fetch_inferior_registers (int regno)
390{
391   if (regno == -1 || regno == 0)
392      for (regno = 0; regno < the_low_target.num_regs; regno++)
393         fetch_register (regno);
394   else
395      fetch_register (regno);
396}
397
398/* Store our register values back into the inferior.
399   If REGNO is -1, do this for all registers.
400   Otherwise, REGNO specifies which register (so we can save time).  */
401static
402void usr_store_inferior_registers (int regno)
403{
404   int size;
405   ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
406   ThreadId tid = tst->tid;
407
408   if (regno >= 0) {
409
410      if (regno >= the_low_target.num_regs) {
411         dlog(0, "error store_register regno %d max %d\n",
412              regno, the_low_target.num_regs);
413         return;
414      }
415
416      size = register_size (regno);
417      if (size > 0) {
418         Bool mod;
419         Addr old_SP, new_SP;
420         char buf[size];
421
422         if (regno == the_low_target.stack_pointer_regno) {
423            /* When the stack pointer register is changed such that
424               the stack is extended, we better inform the tool of the
425               stack increase.  This is needed in particular to avoid
426               spurious Memcheck errors during Inferior calls. So, we
427               save in old_SP the SP before the change. A change of
428               stack pointer is also assumed to have initialised this
429               new stack space. For the typical example of an inferior
430               call, gdb writes arguments on the stack, and then
431               changes the stack pointer. As the stack increase tool
432               function might mark it as undefined, we have to call it
433               at the good moment. */
434            VG_(memset) ((void *) &old_SP, 0, size);
435            (*the_low_target.transfer_register) (tid, regno, (void *) &old_SP,
436                                                 valgrind_to_gdbserver, size, &mod);
437         }
438
439         VG_(memset) (buf, 0, size);
440         collect_register (regno, buf);
441         (*the_low_target.transfer_register) (tid, regno, buf,
442                                              gdbserver_to_valgrind, size, &mod);
443         if (mod && VG_(debugLog_getLevel)() > 1) {
444            char bufimage [2*size + 1];
445            heximage (bufimage, buf, size);
446            dlog(2,
447                 "stored register %d size %d name %s value %s "
448                 "tid %u status %s\n",
449                 regno, size, the_low_target.reg_defs[regno].name, bufimage,
450                 tid, VG_(name_of_ThreadStatus) (tst->status));
451         }
452         if (regno == the_low_target.stack_pointer_regno) {
453            VG_(memcpy) (&new_SP, buf, size);
454            if (old_SP > new_SP) {
455               Word delta  = (Word)new_SP - (Word)old_SP;
456               dlog(1,
457                    "   stack increase by stack pointer changed from %p to %p "
458                    "delta %ld\n",
459                    (void*) old_SP, (void *) new_SP,
460                    delta);
461               VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, 0 );
462               VG_TRACK( new_mem_stack,       new_SP, -delta );
463               VG_TRACK( post_mem_write, Vg_CoreClientReq, tid,
464                         new_SP, -delta);
465            }
466         }
467      }
468   }
469   else {
470      for (regno = 0; regno < the_low_target.num_regs; regno++)
471         usr_store_inferior_registers (regno);
472   }
473}
474
475void valgrind_fetch_registers (int regno)
476{
477   usr_fetch_inferior_registers (regno);
478}
479
480void valgrind_store_registers (int regno)
481{
482   usr_store_inferior_registers (regno);
483}
484
485Bool hostvisibility = False;
486
487int valgrind_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
488{
489   const void *sourceaddr = C2v (memaddr);
490   dlog(3, "reading memory %p size %d\n", sourceaddr, len);
491   if (VG_(am_is_valid_for_client) ((Addr) sourceaddr,
492                                    len, VKI_PROT_READ)
493       || (hostvisibility
494           && VG_(am_is_valid_for_valgrind) ((Addr) sourceaddr,
495                                             len, VKI_PROT_READ))) {
496      VG_(memcpy) (myaddr, sourceaddr, len);
497      return 0;
498   } else {
499      dlog(1, "error reading memory %p size %d\n", sourceaddr, len);
500      return -1;
501   }
502}
503
504int valgrind_write_memory (CORE_ADDR memaddr,
505                           const unsigned char *myaddr, int len)
506{
507   Bool is_valid_client_memory;
508   void *targetaddr = C2v (memaddr);
509   dlog(3, "writing memory %p size %d\n", targetaddr, len);
510   is_valid_client_memory
511      = VG_(am_is_valid_for_client) ((Addr)targetaddr, len, VKI_PROT_WRITE);
512   if (is_valid_client_memory
513       || (hostvisibility
514           && VG_(am_is_valid_for_valgrind) ((Addr) targetaddr,
515                                             len, VKI_PROT_READ))) {
516      if (len > 0) {
517         VG_(memcpy) (targetaddr, myaddr, len);
518         if (is_valid_client_memory && VG_(tdict).track_post_mem_write) {
519            /* Inform the tool of the post memwrite.  Note that we do the
520               minimum necessary to avoid complains from e.g.
521               memcheck. The idea is that the debugger is as least
522               intrusive as possible.  So, we do not inform of the pre
523               mem write (and in any case, this would cause problems with
524               memcheck that does not like our CorePart in
525               pre_mem_write. */
526            ThreadState *tst =
527               (ThreadState *) inferior_target_data (current_inferior);
528            ThreadId tid = tst->tid;
529            VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid,
530                                             (Addr) targetaddr, len );
531         }
532      }
533      return 0;
534   } else {
535      dlog(1, "error writing memory %p size %d\n", targetaddr, len);
536      return -1;
537   }
538}
539
540/* insert or remove a breakpoint */
541static
542int valgrind_point (Bool insert, char type, CORE_ADDR addr, int len)
543{
544   PointKind kind;
545   switch (type) {
546   case '0': /* implemented by inserting checks at each instruction in sb */
547      kind = software_breakpoint;
548      break;
549   case '1': /* hw breakpoint, same implementation as sw breakpoint */
550      kind = hardware_breakpoint;
551      break;
552   case '2':
553      kind = write_watchpoint;
554      break;
555   case '3':
556      kind = read_watchpoint;
557      break;
558   case '4':
559      kind = access_watchpoint;
560      break;
561   default:
562      vg_assert (0);
563   }
564
565   /* Attention: gdbserver convention differs: 0 means ok; 1 means not ok */
566   if (VG_(gdbserver_point) (kind, insert, addr, len))
567      return 0;
568   else
569      return 1; /* error or unsupported */
570}
571
572const char* valgrind_target_xml (Bool shadow_mode)
573{
574   return (*the_low_target.target_xml) (shadow_mode);
575}
576
577int valgrind_insert_watchpoint (char type, CORE_ADDR addr, int len)
578{
579   return valgrind_point (/* insert */ True, type, addr, len);
580}
581
582int valgrind_remove_watchpoint (char type, CORE_ADDR addr, int len)
583{
584   return valgrind_point (/* insert*/ False, type, addr, len);
585}
586
587/* Returns the (platform specific) offset of lm_modid field in the link map
588   struct.
589   Stores the offset in *result and returns True if offset can be determined.
590   Returns False otherwise. *result is not to be used then. */
591static Bool getplatformoffset (SizeT *result)
592{
593   static Bool getplatformoffset_called = False;
594
595   static Bool lm_modid_offset_found = False;
596   static SizeT lm_modid_offset = 1u << 31; // Rubbish initial value.
597   // lm_modid_offset is a magic offset, retrieved using an external program.
598
599   if (!getplatformoffset_called) {
600      getplatformoffset_called = True;
601      const HChar *platform = VG_PLATFORM;
602      const HChar *cmdformat = "%s/%s-%s -o %s";
603      const HChar *getoff = "getoff";
604      HChar outfile[VG_(mkstemp_fullname_bufsz) (VG_(strlen)(getoff))];
605      Int fd = VG_(mkstemp) (getoff, outfile);
606      if (fd == -1)
607         return False;
608      HChar cmd[ VG_(strlen)(cmdformat)
609                 + VG_(strlen)(VG_(libdir)) - 2
610                 + VG_(strlen)(getoff)      - 2
611                 + VG_(strlen)(platform)    - 2
612                 + VG_(strlen)(outfile)     - 2
613                 + 1];
614      UInt cmdlen;
615      struct vg_stat stat_buf;
616      Int ret;
617
618      cmdlen = VG_(snprintf)(cmd, sizeof(cmd),
619                             cmdformat,
620                             VG_(libdir), getoff, platform, outfile);
621      vg_assert (cmdlen == sizeof(cmd) - 1);
622      ret = VG_(system) (cmd);
623      if (ret != 0 || VG_(debugLog_getLevel)() >= 1)
624         VG_(dmsg) ("command %s exit code %d\n", cmd, ret);
625      ret = VG_(fstat)( fd, &stat_buf );
626      if (ret != 0)
627         VG_(dmsg) ("error VG_(fstat) %d %s\n", fd, outfile);
628      else {
629         HChar *w;
630         HChar *ssaveptr;
631         HChar *os;
632         HChar *str;
633         HChar *endptr;
634
635         os = malloc (stat_buf.size+1);
636         vg_assert (os);
637         ret = VG_(read)(fd, os, stat_buf.size);
638         vg_assert(ret == stat_buf.size);
639         os[ret] = '\0';
640         str = os;
641         while ((w = VG_(strtok_r)(str, " \n", &ssaveptr)) != NULL) {
642            if (VG_(strcmp) (w, "lm_modid_offset") == 0) {
643               w = VG_(strtok_r)(NULL, " \n", &ssaveptr);
644               lm_modid_offset = (SizeT) VG_(strtoull16) ( w, &endptr );
645               if (endptr == w)
646                  VG_(dmsg) ("%s lm_modid_offset unexpected hex value %s\n",
647                             cmd, w);
648               else
649                  lm_modid_offset_found = True;
650            } else {
651               VG_(dmsg) ("%s produced unexpected %s\n", cmd, w);
652            }
653            str = NULL; // ensure next  VG_(strtok_r) continues the parsing.
654         }
655         VG_(free) (os);
656      }
657
658      VG_(close)(fd);
659      ret = VG_(unlink)( outfile );
660      if (ret != 0)
661         VG_(umsg) ("error: could not unlink %s\n", outfile);
662   }
663
664   *result = lm_modid_offset;
665   return lm_modid_offset_found;
666}
667
668Bool valgrind_get_tls_addr (ThreadState *tst,
669                            CORE_ADDR offset,
670                            CORE_ADDR lm,
671                            CORE_ADDR *tls_addr)
672{
673   CORE_ADDR **dtv_loc;
674   CORE_ADDR *dtv;
675   SizeT lm_modid_offset;
676   unsigned long int modid;
677
678#define CHECK_DEREF(addr, len, name) \
679   if (!VG_(am_is_valid_for_client) ((Addr)(addr), (len), VKI_PROT_READ)) { \
680      dlog(0, "get_tls_addr: %s at %p len %lu not addressable\n",       \
681           name, (void*)(addr), (unsigned long)(len));                  \
682      return False;                                                     \
683   }
684
685   *tls_addr = 0;
686
687   if (the_low_target.target_get_dtv == NULL) {
688      dlog(1, "low level dtv support not available\n");
689      return False;
690   }
691
692   if (!getplatformoffset (&lm_modid_offset)) {
693      dlog(0, "link_map modid field offset not available\n");
694      return False;
695   }
696   dlog (2, "link_map modid offset %p\n", (void*)lm_modid_offset);
697   vg_assert (lm_modid_offset < 0x10000); // let's say
698
699   dtv_loc = (*the_low_target.target_get_dtv)(tst);
700   if (dtv_loc == NULL) {
701      dlog(0, "low level dtv support returned NULL\n");
702      return False;
703   }
704
705   CHECK_DEREF(dtv_loc, sizeof(CORE_ADDR), "dtv_loc");
706   dtv = *dtv_loc;
707
708   // Check we can read at least 2 address at the beginning of dtv.
709   CHECK_DEREF(dtv, 2*sizeof(CORE_ADDR), "dtv 2 first entries");
710   dlog (2, "tid %u dtv %p\n", tst->tid, (void*)dtv);
711
712   // Check we can read the modid
713   CHECK_DEREF(lm+lm_modid_offset, sizeof(unsigned long int), "link_map modid");
714   modid = *(unsigned long int *)(lm+lm_modid_offset);
715
716   // Check we can access the dtv entry for modid
717   CHECK_DEREF(dtv + 2 * modid, sizeof(CORE_ADDR), "dtv[2*modid]");
718
719   // Compute the base address of the tls block.
720   *tls_addr = *(dtv + 2 * modid);
721
722#if defined(VGA_mips32) || defined(VGA_mips64)
723   if (*tls_addr & 1) {
724      /* This means that computed address is not valid, most probably
725         because given module uses Static TLS.
726         However, the best we can is to try to compute address using
727         static TLS. This is what libthread_db does.
728         Ref. GLIBC/nptl_db/td_thr_tlsbase.c:td_thr_tlsbase().
729      */
730
731      CORE_ADDR tls_offset_addr;
732      PtrdiffT tls_offset;
733
734      dlog(1, "computing tls_addr using static TLS\n");
735
736      /* Assumes that tls_offset is placed right before tls_modid.
737         To check the assumption, start a gdb on none/tests/tls and do:
738         p &((struct link_map*)0x0)->l_tls_modid
739         p &((struct link_map*)0x0)->l_tls_offset */
740      tls_offset_addr = lm + lm_modid_offset - sizeof(PtrdiffT);
741
742      // Check we can read the tls_offset.
743      CHECK_DEREF(tls_offset_addr, sizeof(PtrdiffT), "link_map tls_offset");
744      tls_offset = *(PtrdiffT *)(tls_offset_addr);
745
746      /* Following two values represent platform dependent constants
747         NO_TLS_OFFSET and FORCED_DYNAMIC_TLS_OFFSET, respectively. */
748      if ((tls_offset == -1) || (tls_offset == -2)) {
749         dlog(2, "link_map tls_offset is not valid for static TLS\n");
750         return False;
751      }
752
753      // This calculation is also platform dependent.
754      *tls_addr = ((CORE_ADDR)dtv_loc + 2 * sizeof(CORE_ADDR) + tls_offset);
755   }
756#endif
757
758   // Finally, add tls variable offset to tls block base address.
759   *tls_addr += offset;
760
761   return True;
762
763#undef CHECK_DEREF
764}
765
766/* returns a pointer to the architecture state corresponding to
767   the provided register set: 0 => normal guest registers,
768                              1 => shadow1
769                              2 => shadow2
770*/
771VexGuestArchState* get_arch (int set, ThreadState* tst)
772{
773  switch (set) {
774  case 0: return &tst->arch.vex;
775  case 1: return &tst->arch.vex_shadow1;
776  case 2: return &tst->arch.vex_shadow2;
777  default: vg_assert(0);
778  }
779}
780
781static int non_shadow_num_regs = 0;
782static struct reg *non_shadow_reg_defs = NULL;
783void initialize_shadow_low(Bool shadow_mode)
784{
785  if (non_shadow_reg_defs == NULL) {
786    non_shadow_reg_defs = the_low_target.reg_defs;
787    non_shadow_num_regs = the_low_target.num_regs;
788  }
789
790  regcache_invalidate();
791  if (the_low_target.reg_defs != non_shadow_reg_defs) {
792     free (the_low_target.reg_defs);
793  }
794  if (shadow_mode) {
795    the_low_target.num_regs = 3 * non_shadow_num_regs;
796    the_low_target.reg_defs = build_shadow_arch (non_shadow_reg_defs, non_shadow_num_regs);
797  } else {
798    the_low_target.num_regs = non_shadow_num_regs;
799    the_low_target.reg_defs = non_shadow_reg_defs;
800  }
801  set_register_cache (the_low_target.reg_defs, the_low_target.num_regs);
802}
803
804void set_desired_inferior (int use_general)
805{
806  struct thread_info *found;
807
808  if (use_general == 1) {
809     found = (struct thread_info *) find_inferior_id (&all_threads,
810                                                      general_thread);
811  } else {
812     found = NULL;
813
814     /* If we are continuing any (all) thread(s), use step_thread
815        to decide which thread to step and/or send the specified
816        signal to.  */
817     if ((step_thread != 0 && step_thread != -1)
818         && (cont_thread == 0 || cont_thread == -1))
819	found = (struct thread_info *) find_inferior_id (&all_threads,
820							 step_thread);
821
822     if (found == NULL)
823	found = (struct thread_info *) find_inferior_id (&all_threads,
824							 cont_thread);
825  }
826
827  if (found == NULL)
828     current_inferior = (struct thread_info *) all_threads.head;
829  else
830     current_inferior = found;
831  {
832     ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
833     ThreadId tid = tst->tid;
834     dlog(1, "set_desired_inferior use_general %d found %p tid %u lwpid %d\n",
835          use_general, found, tid, tst->os_state.lwpid);
836  }
837}
838
839void* VG_(dmemcpy) ( void *d, const void *s, SizeT sz, Bool *mod )
840{
841   if (VG_(memcmp) (d, s, sz)) {
842      *mod = True;
843      return VG_(memcpy) (d, s, sz);
844   } else {
845      *mod = False;
846      return d;
847   }
848}
849
850void VG_(transfer) (void *valgrind,
851                    void *gdbserver,
852                    transfer_direction dir,
853                    SizeT sz,
854                    Bool *mod)
855{
856   if (dir == valgrind_to_gdbserver)
857      VG_(dmemcpy) (gdbserver, valgrind, sz, mod);
858   else if (dir == gdbserver_to_valgrind)
859      VG_(dmemcpy) (valgrind, gdbserver, sz, mod);
860   else
861      vg_assert (0);
862}
863
864void valgrind_initialize_target(void)
865{
866#if defined(VGA_x86)
867   x86_init_architecture(&the_low_target);
868#elif defined(VGA_amd64)
869   amd64_init_architecture(&the_low_target);
870#elif defined(VGA_arm)
871   arm_init_architecture(&the_low_target);
872#elif defined(VGA_arm64)
873   arm64_init_architecture(&the_low_target);
874#elif defined(VGA_ppc32)
875   ppc32_init_architecture(&the_low_target);
876#elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
877   ppc64_init_architecture(&the_low_target);
878#elif defined(VGA_s390x)
879   s390x_init_architecture(&the_low_target);
880#elif defined(VGA_mips32)
881   mips32_init_architecture(&the_low_target);
882#elif defined(VGA_mips64)
883   mips64_init_architecture(&the_low_target);
884#else
885   #error "architecture missing in target.c valgrind_initialize_target"
886#endif
887}
888