1
2/*--------------------------------------------------------------------*/
3/*--- Xen Hypercalls                                 syswrap-xen.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2012 Citrix Systems
11      ian.campbell@citrix.com
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26   02111-1307, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29*/
30
31#include "pub_core_basics.h"
32#include "pub_core_vki.h"
33
34#if defined(ENABLE_XEN)
35
36#include "pub_core_vkiscnums.h"
37#include "pub_core_threadstate.h"
38#include "pub_core_aspacemgr.h"
39#include "pub_core_debuginfo.h"    // VG_(di_notify_*)
40#include "pub_core_transtab.h"     // VG_(discard_translations)
41#include "pub_core_xarray.h"
42#include "pub_core_clientstate.h"
43#include "pub_core_debuglog.h"
44#include "pub_core_libcbase.h"
45#include "pub_core_libcassert.h"
46#include "pub_core_libcfile.h"
47#include "pub_core_libcprint.h"
48#include "pub_core_libcproc.h"
49#include "pub_core_libcsignal.h"
50#include "pub_core_mallocfree.h"
51#include "pub_core_tooliface.h"
52#include "pub_core_options.h"
53#include "pub_core_scheduler.h"
54#include "pub_core_signals.h"
55#include "pub_core_syscall.h"
56#include "pub_core_syswrap.h"
57#include "pub_core_stacktrace.h"    // For VG_(get_and_pp_StackTrace)()
58
59#include "priv_types_n_macros.h"
60#include "priv_syswrap-generic.h"
61#include "priv_syswrap-xen.h"
62
63#include <inttypes.h>
64
65#define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
66#define POST(name) static DEFN_POST_TEMPLATE(xen, name)
67
68static void bad_intf_version ( ThreadId              tid,
69                               SyscallArgLayout*     layout,
70                               /*MOD*/SyscallArgs*   args,
71                               /*OUT*/SyscallStatus* status,
72                               /*OUT*/UWord*         flags,
73                               const HChar*          hypercall,
74                               UWord                 version)
75{
76   VG_(dmsg)("WARNING: %s version %#lx not supported\n",
77             hypercall, version);
78   if (VG_(clo_verbosity) > 1) {
79      VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
80   }
81   VG_(dmsg)("You may be able to write your own handler.\n");
82   VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
83   VG_(dmsg)("Nevertheless we consider this a bug.  Please report\n");
84   VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
85   VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
86
87   SET_STATUS_Failure(VKI_ENOSYS);
88}
89
90static void bad_subop ( ThreadId              tid,
91                        SyscallArgLayout*     layout,
92                        /*MOD*/SyscallArgs*   args,
93                        /*OUT*/SyscallStatus* status,
94                        /*OUT*/UWord*         flags,
95                        const HChar*          hypercall,
96                        UWord                 subop)
97{
98   VG_(dmsg)("WARNING: unhandled %s subop: %lu\n",
99             hypercall, subop);
100   if (VG_(clo_verbosity) > 1) {
101      VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
102   }
103   VG_(dmsg)("You may be able to write your own handler.\n");
104   VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
105   VG_(dmsg)("Nevertheless we consider this a bug.  Please report\n");
106   VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
107   VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
108
109   SET_STATUS_Failure(VKI_ENOSYS);
110}
111
112PRE(memory_op)
113{
114   PRINT("__HYPERVISOR_memory_op ( %lu, %#lx )", ARG1, ARG2);
115
116   switch (ARG1) {
117
118   case VKI_XENMEM_maximum_ram_page:
119       /* No inputs */
120       break;
121
122   case VKI_XENMEM_maximum_gpfn:
123       PRE_MEM_READ("XENMEM_maximum_gpfn domid",
124                    (Addr)ARG2, sizeof(vki_xen_domid_t));
125       break;
126
127   case VKI_XENMEM_machphys_mfn_list:
128   case VKI_XENMEM_machphys_compat_mfn_list: {
129       struct vki_xen_machphys_mfn_list *arg =
130           (struct vki_xen_machphys_mfn_list *)ARG2;
131       PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
132                    (Addr)&arg->max_extents, sizeof(arg->max_extents));
133       PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
134                    (Addr)&arg->extent_start, sizeof(arg->extent_start));
135       break;
136   }
137
138   case VKI_XENMEM_set_memory_map: {
139      struct vki_xen_foreign_memory_map *arg =
140	      (struct vki_xen_foreign_memory_map *)ARG2;
141      PRE_MEM_READ("XENMEM_set_memory_map domid",
142                   (Addr)&arg->domid, sizeof(arg->domid));
143      PRE_MEM_READ("XENMEM_set_memory_map map",
144                   (Addr)&arg->map, sizeof(arg->map));
145      break;
146   }
147
148   case VKI_XENMEM_memory_map:
149   case VKI_XENMEM_machine_memory_map: {
150      struct vki_xen_memory_map *arg =
151	      (struct vki_xen_memory_map *)ARG2;
152      PRE_MEM_READ("XENMEM_memory_map nr_entries",
153                   (Addr)&arg->nr_entries, sizeof(arg->nr_entries));
154      break;
155   }
156
157   case VKI_XENMEM_increase_reservation:
158   case VKI_XENMEM_decrease_reservation:
159   case VKI_XENMEM_populate_physmap:
160   case VKI_XENMEM_claim_pages: {
161      struct xen_memory_reservation *memory_reservation =
162         (struct xen_memory_reservation *)ARG2;
163      const HChar *which;
164
165      switch (ARG1) {
166      case VKI_XENMEM_increase_reservation:
167         which = "XENMEM_increase_reservation";
168         break;
169      case VKI_XENMEM_decrease_reservation:
170         which = "XENMEM_decrease_reservation";
171         PRE_MEM_READ(which,
172                      (Addr)memory_reservation->extent_start.p,
173                      sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
174	 break;
175      case VKI_XENMEM_populate_physmap:
176         which = "XENMEM_populate_physmap";
177         PRE_MEM_READ(which,
178                      (Addr)memory_reservation->extent_start.p,
179                      sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
180         break;
181      case VKI_XENMEM_claim_pages:
182         which = "XENMEM_claim_pages";
183         break;
184      default:
185         which = "XENMEM_unknown";
186         break;
187      }
188
189      PRE_MEM_READ(which,
190                   (Addr)&memory_reservation->extent_start,
191                   sizeof(memory_reservation->extent_start));
192      PRE_MEM_READ(which,
193                   (Addr)&memory_reservation->nr_extents,
194                   sizeof(memory_reservation->nr_extents));
195      PRE_MEM_READ(which,
196                   (Addr)&memory_reservation->extent_order,
197                   sizeof(memory_reservation->extent_order));
198      PRE_MEM_READ(which,
199                   (Addr)&memory_reservation->mem_flags,
200                   sizeof(memory_reservation->mem_flags));
201      PRE_MEM_READ(which,
202                   (Addr)&memory_reservation->domid,
203                   sizeof(memory_reservation->domid));
204      break;
205   }
206
207   case VKI_XENMEM_add_to_physmap: {
208       struct vki_xen_add_to_physmap *arg =
209           (struct vki_xen_add_to_physmap *)ARG2;
210       PRE_MEM_READ("XENMEM_add_to_physmap domid",
211                    (Addr)&arg->domid, sizeof(arg->domid));
212       PRE_MEM_READ("XENMEM_add_to_physmap size",
213                    (Addr)&arg->size, sizeof(arg->size));
214       PRE_MEM_READ("XENMEM_add_to_physmap space",
215                    (Addr)&arg->space, sizeof(arg->space));
216       PRE_MEM_READ("XENMEM_add_to_physmap idx",
217                    (Addr)&arg->idx, sizeof(arg->idx));
218       PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
219                    (Addr)&arg->gpfn, sizeof(arg->gpfn));
220       break;
221   }
222
223   case VKI_XENMEM_remove_from_physmap: {
224       struct vki_xen_remove_from_physmap *arg =
225           (struct vki_xen_remove_from_physmap *)ARG2;
226       PRE_MEM_READ("XENMEM_remove_from_physmap domid",
227                    (Addr)&arg->domid, sizeof(arg->domid));
228       PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
229                    (Addr)&arg->gpfn, sizeof(arg->gpfn));
230       break;
231   }
232
233   case VKI_XENMEM_get_sharing_freed_pages:
234   case VKI_XENMEM_get_sharing_shared_pages:
235      break;
236
237   case VKI_XENMEM_access_op: {
238       struct vki_xen_mem_event_op *arg =
239            (struct vki_xen_mem_event_op *)ARG2;
240       PRE_MEM_READ("XENMEM_access_op domid",
241                    (Addr)&arg->domain, sizeof(arg->domain));
242       PRE_MEM_READ("XENMEM_access_op op",
243                    (Addr)&arg->op, sizeof(arg->op));
244       PRE_MEM_READ("XENMEM_access_op gfn",
245                    (Addr)&arg->gfn, sizeof(arg->gfn));
246       break;
247   }
248   default:
249      bad_subop(tid, layout, arrghs, status, flags,
250                "__HYPERVISOR_memory_op", ARG1);
251      break;
252   }
253}
254
255PRE(mmuext_op)
256{
257   PRINT("__HYPERVISOR_mmuext_op ( %#lx, %ld, %#lx, %lu )",
258         ARG1, SARG2, ARG3, ARG4);
259
260   struct vki_xen_mmuext_op *ops = (struct vki_xen_mmuext_op *)ARG1;
261   unsigned int i, nr = ARG2;
262
263   for (i=0; i<nr; i++) {
264      struct vki_xen_mmuext_op *op = ops + i;
265      PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
266                   (Addr)&op->cmd, sizeof(op->cmd));
267      switch(op->cmd) {
268      case VKI_XEN_MMUEXT_PIN_L1_TABLE:
269      case VKI_XEN_MMUEXT_PIN_L2_TABLE:
270      case VKI_XEN_MMUEXT_PIN_L3_TABLE:
271      case VKI_XEN_MMUEXT_PIN_L4_TABLE:
272      case VKI_XEN_MMUEXT_UNPIN_TABLE:
273      case VKI_XEN_MMUEXT_NEW_BASEPTR:
274      case VKI_XEN_MMUEXT_CLEAR_PAGE:
275      case VKI_XEN_MMUEXT_COPY_PAGE:
276      case VKI_XEN_MMUEXT_MARK_SUPER:
277      case VKI_XEN_MMUEXT_UNMARK_SUPER:
278         PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
279                      (Addr)&op->arg1.mfn,
280                      sizeof(op->arg1.mfn));
281         break;
282
283      case VKI_XEN_MMUEXT_INVLPG_LOCAL:
284      case VKI_XEN_MMUEXT_INVLPG_ALL:
285      case VKI_XEN_MMUEXT_SET_LDT:
286         PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
287                      (Addr)&op->arg1.linear_addr,
288                      sizeof(op->arg1.linear_addr));
289         break;
290
291      case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
292      case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
293      case VKI_XEN_MMUEXT_INVLPG_MULTI:
294      case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
295      case VKI_XEN_MMUEXT_FLUSH_CACHE:
296      case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
297      case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
298         /* None */
299         break;
300      }
301
302      switch(op->cmd) {
303      case VKI_XEN_MMUEXT_SET_LDT:
304         PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
305                      (Addr)&op->arg2.nr_ents,
306                      sizeof(op->arg2.nr_ents));
307         break;
308
309      case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
310      case VKI_XEN_MMUEXT_INVLPG_MULTI:
311         /* How many??? */
312         PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
313                      (Addr)&op->arg2.vcpumask,
314                      sizeof(op->arg2.vcpumask));
315         break;
316
317      case VKI_XEN_MMUEXT_COPY_PAGE:
318         PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
319                      (Addr)&op->arg2.src_mfn,
320                      sizeof(op->arg2.src_mfn));
321         break;
322
323      case VKI_XEN_MMUEXT_PIN_L1_TABLE:
324      case VKI_XEN_MMUEXT_PIN_L2_TABLE:
325      case VKI_XEN_MMUEXT_PIN_L3_TABLE:
326      case VKI_XEN_MMUEXT_PIN_L4_TABLE:
327      case VKI_XEN_MMUEXT_UNPIN_TABLE:
328      case VKI_XEN_MMUEXT_NEW_BASEPTR:
329      case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
330      case VKI_XEN_MMUEXT_INVLPG_LOCAL:
331      case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
332      case VKI_XEN_MMUEXT_INVLPG_ALL:
333      case VKI_XEN_MMUEXT_FLUSH_CACHE:
334      case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
335      case VKI_XEN_MMUEXT_CLEAR_PAGE:
336      case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
337      case VKI_XEN_MMUEXT_MARK_SUPER:
338      case VKI_XEN_MMUEXT_UNMARK_SUPER:
339         /* None */
340         break;
341      }
342   }
343}
344
345PRE(xsm_op)
346{
347   /* XXX assuming flask, only actual XSM right now */
348   struct vki_xen_flask_op *op = (struct vki_xen_flask_op *)ARG1;
349
350   PRINT("__HYPERVISOR_xsm_op ( %u )", op->cmd);
351
352   /*
353    * Common part of xen_flask_op:
354    *    vki_uint32_t cmd;
355    *    vki_uint32_t interface_version;
356    */
357   PRE_MEM_READ("__HYPERVISOR_xsm_op", ARG1,
358                sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
359
360   if (!op)
361      return;
362
363   switch (op->interface_version) {
364   case 0x00000001:
365      break;
366   default:
367      bad_intf_version(tid, layout, arrghs, status, flags,
368                       "__HYPERVISOR_xsm_op", op->interface_version);
369      return;
370   }
371
372#define PRE_XEN_XSM_OP_READ(_xsm_op, _union, _field)            \
373   PRE_MEM_READ("FLASK_" #_xsm_op " u." #_union "." #_field,    \
374                (Addr)&op->u._union._field,                     \
375                sizeof(op->u._union._field))
376
377   switch (op->cmd) {
378   case VKI_FLASK_SID_TO_CONTEXT:
379      PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT, sid_context, sid);
380      PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT, sid_context, size);
381      PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT, sid_context, context.p);
382      break;
383   default:
384      bad_subop(tid, layout, arrghs, status, flags,
385                "__HYPERVISOR_xsm_op", op->cmd);
386      break;
387   }
388#undef __PRE_XEN_XSM_OP_READ
389#undef PRE_XEN_XSM_OP_READ
390}
391
392PRE(sched_op)
393{
394   PRINT("__HYPERVISOR_sched_op ( %ld, %#lx )", SARG1, ARG2);
395   void *arg = (void *)(unsigned long)ARG2;
396
397#define __PRE_XEN_SCHEDOP_READ(_schedop, _type, _field) \
398   PRE_MEM_READ("XEN_SCHEDOP_" # _schedop " " #_field,  \
399                (Addr)&((_type*)arg)->_field,           \
400                sizeof(((_type*)arg)->_field))
401#define PRE_XEN_SCHEDOP_READ(_schedop, _field)                          \
402   __PRE_XEN_SCHEDOP_READ(_schedop, vki_xen_ ## _schedop ## _t, _field)
403
404   switch (ARG1) {
405   case VKI_XEN_SCHEDOP_remote_shutdown:
406      PRE_XEN_SCHEDOP_READ(remote_shutdown, domain_id);
407      PRE_XEN_SCHEDOP_READ(remote_shutdown, reason);
408      break;
409
410   default:
411      bad_subop(tid, layout, arrghs, status, flags,
412                "__HYPERVISOR_sched_op", ARG1);
413      break;
414   }
415#undef __PRE_XEN_SCHEDOP_READ
416#undef PRE_XEN_SCHEDOP_READ
417}
418
419static void pre_evtchn_op(ThreadId tid,
420                          SyscallArgLayout*     layout,
421                          /*MOD*/SyscallArgs*   arrghs,
422                          /*OUT*/SyscallStatus* status,
423                          /*OUT*/UWord*         flags,
424                          __vki_u32 cmd, void *arg, int compat)
425{
426   PRINT("__HYPERVISOR_event_channel_op%s ( %u, %p )",
427         compat ? "_compat" : "", cmd, arg);
428
429   switch (cmd) {
430   case VKI_XEN_EVTCHNOP_alloc_unbound: {
431      struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
432      PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
433                   (Addr)&alloc_unbound->dom, sizeof(alloc_unbound->dom));
434      PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
435                   (Addr)&alloc_unbound->remote_dom,
436                   sizeof(alloc_unbound->remote_dom));
437      break;
438   }
439   default:
440      if ( compat )
441         bad_subop(tid, layout, arrghs, status, flags,
442                   "__HYPERVISOR_event_channel_op_compat", cmd);
443      else
444         bad_subop(tid, layout, arrghs, status, flags,
445                   "__HYPERVISOR_event_channel_op", cmd);
446      break;
447   }
448}
449
450PRE(evtchn_op)
451{
452   pre_evtchn_op(tid, layout, arrghs, status, flags,
453                 ARG1, (void *)ARG2, 0);
454}
455
456PRE(evtchn_op_compat)
457{
458   struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
459   PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
460                ARG1, sizeof(*evtchn));
461
462   pre_evtchn_op(tid, layout, arrghs, status, flags,
463                 evtchn->cmd, &evtchn->u, 1);
464}
465
466PRE(physdev_op)
467{
468   int cmd = ARG1;
469
470   PRINT("__HYPERVISOR_physdev_op ( %ld, %#lx )", SARG1, ARG2);
471
472#define PRE_XEN_PHYSDEVOP_READ(_op, _field)		\
473   PRE_MEM_READ("XEN_PHYSDEVOP_" #_op " ." #_field,	\
474                (Addr)&arg->_field,			\
475                sizeof(arg->_field))
476
477   switch (cmd) {
478   case VKI_XEN_PHYSDEVOP_map_pirq: {
479      struct vki_xen_physdev_map_pirq *arg =
480         (struct vki_xen_physdev_map_pirq *)ARG2;
481
482      PRE_XEN_PHYSDEVOP_READ("map_pirq", domid);
483      PRE_XEN_PHYSDEVOP_READ("map_pirq", type);
484
485      PRE_XEN_PHYSDEVOP_READ("map_pirq", bus);
486      PRE_XEN_PHYSDEVOP_READ("map_pirq", devfn);
487      PRE_XEN_PHYSDEVOP_READ("map_pirq", entry_nr);
488      PRE_XEN_PHYSDEVOP_READ("map_pirq", table_base);
489
490      switch(arg->type) {
491      case VKI_XEN_MAP_PIRQ_TYPE_MSI:
492         PRE_XEN_PHYSDEVOP_READ("map_pirq", index);
493         break;
494      case VKI_XEN_MAP_PIRQ_TYPE_GSI:
495         PRE_XEN_PHYSDEVOP_READ("map_pirq", index);
496         PRE_XEN_PHYSDEVOP_READ("map_pirq", pirq);
497         break;
498      case VKI_XEN_MAP_PIRQ_TYPE_MSI_SEG:
499         PRE_XEN_PHYSDEVOP_READ("map_pirq", index);
500         break;
501      case VKI_XEN_MAP_PIRQ_TYPE_MULTI_MSI:
502         break;
503      }
504      break;
505   }
506   case VKI_XEN_PHYSDEVOP_unmap_pirq: {
507      struct vki_xen_physdev_unmap_pirq *arg =
508         (struct vki_xen_physdev_unmap_pirq *)ARG2;
509      PRE_XEN_PHYSDEVOP_READ("unmap_pirq", domid);
510      PRE_XEN_PHYSDEVOP_READ("unmap_pirq", pirq);
511      break;
512   }
513   default:
514      bad_subop(tid, layout, arrghs, status, flags,
515                "__HYPERVISOR_physdev_op", cmd);
516   }
517#undef PRE_XEN_PHYSDEVOP_READ
518}
519
520PRE(xen_version)
521{
522   PRINT("__HYPERVISOR_xen_version ( %ld, %#lx )", SARG1, ARG2);
523
524   switch (ARG1) {
525   case VKI_XENVER_version:
526   case VKI_XENVER_extraversion:
527   case VKI_XENVER_compile_info:
528   case VKI_XENVER_capabilities:
529   case VKI_XENVER_changeset:
530   case VKI_XENVER_platform_parameters:
531   case VKI_XENVER_get_features:
532   case VKI_XENVER_pagesize:
533   case VKI_XENVER_guest_handle:
534   case VKI_XENVER_commandline:
535      /* No inputs */
536      break;
537
538   default:
539      bad_subop(tid, layout, arrghs, status, flags,
540                "__HYPERVISOR_xen_version", ARG1);
541      break;
542   }
543}
544
545PRE(grant_table_op)
546{
547   PRINT("__HYPERVISOR_grant_table_op ( %lu, %#lx, %lu )", ARG1, ARG2, ARG3);
548   switch (ARG1) {
549   case VKI_XEN_GNTTABOP_setup_table: {
550      struct vki_xen_gnttab_setup_table *gst =
551	      (struct vki_xen_gnttab_setup_table*)ARG2;
552      PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
553		   (Addr)&gst->dom, sizeof(gst->dom));
554      PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
555                   (Addr)&gst->nr_frames, sizeof(gst->nr_frames));
556      break;
557   }
558   default:
559      bad_subop(tid, layout, arrghs, status, flags,
560                "__HYPERVISOR_grant_table_op", ARG1);
561      break;
562   }
563}
564
565PRE(sysctl) {
566   struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
567
568   PRINT("__HYPERVISOR_sysctl ( %u )", sysctl->cmd);
569
570   /*
571    * Common part of xen_sysctl:
572    *    uint32_t cmd;
573    *    uint32_t interface_version;
574    */
575   PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1,
576                sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
577
578   if (!sysctl)
579      return;
580
581   switch (sysctl->interface_version)
582   {
583   case 0x00000008:
584   case 0x00000009:
585   case 0x0000000a:
586   case 0x0000000b:
587	   break;
588   default:
589      bad_intf_version(tid, layout, arrghs, status, flags,
590                       "__HYPERVISOR_sysctl", sysctl->interface_version);
591      return;
592   }
593
594#define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field)			\
595      PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field,	\
596                   (Addr)&sysctl->u._union._field,			\
597                   sizeof(sysctl->u._union._field))
598#define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
599      __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
600
601   switch (sysctl->cmd) {
602   case VKI_XEN_SYSCTL_readconsole:
603       /* These are all unconditionally read */
604       PRE_XEN_SYSCTL_READ(readconsole, clear);
605       PRE_XEN_SYSCTL_READ(readconsole, incremental);
606       PRE_XEN_SYSCTL_READ(readconsole, buffer);
607       PRE_XEN_SYSCTL_READ(readconsole, count);
608
609       /* 'index' only read if 'incremental' is nonzero */
610       if (sysctl->u.readconsole.incremental)
611           PRE_XEN_SYSCTL_READ(readconsole, index);
612       break;
613
614   case VKI_XEN_SYSCTL_getdomaininfolist:
615      switch (sysctl->interface_version)
616      {
617      case 0x00000008:
618	 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, first_domain);
619	 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, max_domains);
620	 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, buffer);
621	 break;
622      case 0x00000009:
623	 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, first_domain);
624	 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, max_domains);
625	 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, buffer);
626	 break;
627      case 0x0000000a:
628      case 0x0000000b:
629	 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
630	 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
631	 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
632	 break;
633      default:
634          VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
635                    "%"PRIx32" not implemented yet\n",
636                    sysctl->interface_version);
637          SET_STATUS_Failure(VKI_EINVAL);
638          return;
639      }
640      break;
641
642   case VKI_XEN_SYSCTL_debug_keys:
643       PRE_XEN_SYSCTL_READ(debug_keys, keys);
644       PRE_XEN_SYSCTL_READ(debug_keys, nr_keys);
645       PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
646                    (Addr)sysctl->u.debug_keys.keys.p,
647                    sysctl->u.debug_keys.nr_keys * sizeof(char));
648       break;
649
650   case VKI_XEN_SYSCTL_sched_id:
651       /* No inputs */
652       break;
653
654   case VKI_XEN_SYSCTL_cpupool_op:
655      PRE_XEN_SYSCTL_READ(cpupool_op, op);
656
657      switch(sysctl->u.cpupool_op.op) {
658      case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE:
659      case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY:
660      case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO:
661      case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
662      case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU:
663      case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
664         PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
665      }
666
667      if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE)
668         PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
669
670      if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
671         PRE_XEN_SYSCTL_READ(cpupool_op, domid);
672
673      if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
674          sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU)
675         PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
676
677      break;
678
679   case VKI_XEN_SYSCTL_physinfo:
680      /* No input params */
681      break;
682
683   case VKI_XEN_SYSCTL_topologyinfo:
684      PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
685      PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
686      PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
687      PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
688      break;
689
690   case VKI_XEN_SYSCTL_numainfo:
691      PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
692      PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
693      PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
694      PRE_XEN_SYSCTL_READ(numainfo, node_to_node_distance);
695      break;
696
697   default:
698      bad_subop(tid, layout, arrghs, status, flags,
699                "__HYPERVISOR_sysctl", sysctl->cmd);
700      break;
701   }
702#undef PRE_XEN_SYSCTL_READ
703#undef __PRE_XEN_SYSCTL_READ
704}
705
706PRE(domctl)
707{
708   struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
709
710   PRINT("__HYPERVISOR_domctl ( %u ) on dom%d", domctl->cmd, domctl->domain);
711
712   /*
713    * Common part of xen_domctl:
714    *    vki_uint32_t cmd;
715    *    vki_uint32_t interface_version;
716    *    vki_xen_domid_t  domain;
717    */
718   PRE_MEM_READ("__HYPERVISOR_domctl", ARG1,
719                sizeof(vki_uint32_t) + sizeof(vki_uint32_t)
720		+ sizeof(vki_xen_domid_t));
721
722   if (!domctl)
723      return;
724
725   switch (domctl->interface_version)
726   {
727   case 0x00000007:
728   case 0x00000008:
729   case 0x00000009:
730   case 0x0000000a:
731	   break;
732   default:
733      bad_intf_version(tid, layout, arrghs, status, flags,
734                       "__HYPERVISOR_domctl", domctl->interface_version);
735      return;
736   }
737
738#define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field)			\
739      PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field,	\
740                   (Addr)&domctl->u._union._field,			\
741                   sizeof(domctl->u._union._field))
742#define PRE_XEN_DOMCTL_READ(_domctl, _field) \
743      __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
744
745   switch (domctl->cmd) {
746   case VKI_XEN_DOMCTL_destroydomain:
747   case VKI_XEN_DOMCTL_pausedomain:
748   case VKI_XEN_DOMCTL_max_vcpus:
749   case VKI_XEN_DOMCTL_get_address_size:
750   case VKI_XEN_DOMCTL_gettscinfo:
751   case VKI_XEN_DOMCTL_getdomaininfo:
752   case VKI_XEN_DOMCTL_unpausedomain:
753   case VKI_XEN_DOMCTL_resumedomain:
754      /* No input fields. */
755      break;
756
757   case VKI_XEN_DOMCTL_createdomain:
758      PRE_XEN_DOMCTL_READ(createdomain, ssidref);
759      PRE_XEN_DOMCTL_READ(createdomain, handle);
760      PRE_XEN_DOMCTL_READ(createdomain, flags);
761      break;
762
763   case VKI_XEN_DOMCTL_gethvmcontext:
764       /* Xen unconditionally reads the 'buffer' pointer */
765       __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, buffer);
766       /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
767        * buffer is a request for the required size. */
768       if ( domctl->u.hvmcontext.buffer.p )
769           __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, size);
770       break;
771
772   case VKI_XEN_DOMCTL_sethvmcontext:
773       __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, size);
774       __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, buffer);
775       PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
776                    (Addr)domctl->u.hvmcontext.buffer.p,
777                    domctl->u.hvmcontext.size);
778       break;
779
780   case VKI_XEN_DOMCTL_gethvmcontext_partial:
781       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
782       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
783       __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
784
785       switch (domctl->u.hvmcontext_partial.type) {
786       case VKI_HVM_SAVE_CODE(CPU):
787           if ( domctl->u.hvmcontext_partial.buffer.p )
788                PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
789                   (Addr)domctl->u.hvmcontext_partial.buffer.p,
790                   VKI_HVM_SAVE_LENGTH(CPU));
791           break;
792       default:
793           bad_subop(tid, layout, arrghs, status, flags,
794                         "__HYPERVISOR_domctl_gethvmcontext_partial type",
795                         domctl->u.hvmcontext_partial.type);
796           break;
797       }
798       break;
799
800   case VKI_XEN_DOMCTL_max_mem:
801      PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
802      break;
803
804   case VKI_XEN_DOMCTL_set_address_size:
805      __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
806      break;
807
808   case VKI_XEN_DOMCTL_test_assign_device:
809      __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device, machine_sbdf);
810      break;
811   case VKI_XEN_DOMCTL_assign_device:
812      __PRE_XEN_DOMCTL_READ(assign_device, assign_device, machine_sbdf);
813      break;
814   case VKI_XEN_DOMCTL_deassign_device:
815      __PRE_XEN_DOMCTL_READ(deassign_device, assign_device, machine_sbdf);
816      break;
817
818   case VKI_XEN_DOMCTL_settscinfo:
819      __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
820      __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
821      __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
822      __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
823      break;
824
825   case VKI_XEN_DOMCTL_irq_permission:
826      PRE_XEN_DOMCTL_READ(irq_permission, pirq);
827      PRE_XEN_DOMCTL_READ(irq_permission, allow_access);
828      break;
829
830   case VKI_XEN_DOMCTL_iomem_permission:
831      PRE_XEN_DOMCTL_READ(iomem_permission, first_mfn);
832      PRE_XEN_DOMCTL_READ(iomem_permission, nr_mfns);
833      PRE_XEN_DOMCTL_READ(iomem_permission, allow_access);
834      break;
835
836   case VKI_XEN_DOMCTL_ioport_permission:
837      PRE_XEN_DOMCTL_READ(ioport_permission, first_port);
838      PRE_XEN_DOMCTL_READ(ioport_permission, nr_ports);
839      PRE_XEN_DOMCTL_READ(ioport_permission, allow_access);
840      break;
841
842   case VKI_XEN_DOMCTL_hypercall_init:
843      PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
844      break;
845
846   case VKI_XEN_DOMCTL_settimeoffset:
847       PRE_XEN_DOMCTL_READ(settimeoffset, time_offset_seconds);
848       break;
849
850   case VKI_XEN_DOMCTL_getvcpuinfo:
851      PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
852      break;
853
854   case VKI_XEN_DOMCTL_scheduler_op:
855      PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
856      PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
857      if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_putinfo ) {
858         switch(domctl->u.scheduler_op.sched_id) {
859         case VKI_XEN_SCHEDULER_SEDF:
860            PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.period);
861            PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.slice);
862            PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.latency);
863            PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.extratime);
864            PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.weight);
865            break;
866         case VKI_XEN_SCHEDULER_CREDIT:
867            PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.weight);
868            PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.cap);
869            break;
870         case VKI_XEN_SCHEDULER_CREDIT2:
871            PRE_XEN_DOMCTL_READ(scheduler_op, u.credit2.weight);
872            break;
873         case VKI_XEN_SCHEDULER_RTDS:
874            PRE_XEN_DOMCTL_READ(scheduler_op, u.rtds.period);
875            PRE_XEN_DOMCTL_READ(scheduler_op, u.rtds.budget);
876            break;
877         case VKI_XEN_SCHEDULER_ARINC653:
878            break;
879         }
880      }
881      break;
882
883   case VKI_XEN_DOMCTL_getvcpuaffinity:
884      switch (domctl->interface_version) {
885      case 0x00000007:
886      case 0x00000008:
887      case 0x00000009:
888         __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, vcpu);
889         __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
890         break;
891      case 0x0000000a:
892         __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_0000000a, vcpu);
893         if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
894            __PRE_XEN_DOMCTL_READ(
895               setvcpuaffinity, vcpuaffinity_0000000a, cpumap_hard.nr_bits);
896         if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_SOFT)
897            __PRE_XEN_DOMCTL_READ(
898               setvcpuaffinity, vcpuaffinity_0000000a, cpumap_soft.nr_bits);
899         break;
900      }
901      break;
902
903   case VKI_XEN_DOMCTL_setvcpuaffinity:
904      switch (domctl->interface_version) {
905      case 0x00000007:
906      case 0x00000008:
907      case 0x00000009:
908         __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_00000009, vcpu);
909         __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
910         PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
911                      (Addr)domctl->u.vcpuaffinity_00000009.cpumap.bitmap.p,
912                      domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
913         break;
914      case 0x0000000a:
915         __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, vcpu);
916         __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, flags);
917         if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD) {
918            __PRE_XEN_DOMCTL_READ(
919               setvcpuaffinity, vcpuaffinity_0000000a, cpumap_hard.nr_bits);
920            PRE_MEM_READ(
921               "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_hard.bitmap",
922               (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
923               domctl->u.vcpuaffinity_0000000a.cpumap_hard.nr_bits / 8);
924         }
925         if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_SOFT) {
926            __PRE_XEN_DOMCTL_READ(
927               setvcpuaffinity, vcpuaffinity_0000000a, cpumap_soft.nr_bits);
928            PRE_MEM_READ(
929               "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_soft.bitmap",
930               (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_soft.bitmap.p,
931               domctl->u.vcpuaffinity_0000000a.cpumap_soft.nr_bits / 8);
932         }
933      break;
934      }
935      break;
936
937   case VKI_XEN_DOMCTL_getnodeaffinity:
938      __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
939      break;
940   case VKI_XEN_DOMCTL_setnodeaffinity:
941      __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
942      PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
943                   (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
944                   domctl->u.nodeaffinity.nodemap.nr_bits / 8);
945      break;
946
947   case VKI_XEN_DOMCTL_getvcpucontext:
948      __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
949      break;
950
951   case VKI_XEN_DOMCTL_setvcpucontext:
952      __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
953      __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
954      break;
955
956   case VKI_XEN_DOMCTL_pin_mem_cacheattr:
957      PRE_XEN_DOMCTL_READ(pin_mem_cacheattr, start);
958      PRE_XEN_DOMCTL_READ(pin_mem_cacheattr, end);
959      PRE_XEN_DOMCTL_READ(pin_mem_cacheattr, type);
960      break;
961
962   case VKI_XEN_DOMCTL_get_ext_vcpucontext:
963      switch (domctl->interface_version)
964      {
965      case 0x00000007:
966      case 0x00000008:
967         __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000008, vcpu);
968         break;
969
970      case 0x00000009:
971         __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
972         break;
973
974      default:
975         VG_(dmsg)("WARNING: VKI_XEN_DOMCTL_get_ext_vcpucontext  domctl version %#"
976                   PRIx32" not implemented\n", domctl->interface_version);
977         SET_STATUS_Failure(VKI_EINVAL);
978         break;
979      }
980      break;
981
982   case VKI_XEN_DOMCTL_set_ext_vcpucontext:
983       switch (domctl->interface_version)
984       {
985       case 0x00000007:
986       case 0x00000008:
987           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008, vcpu);
988           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008, size);
989#if defined(__i386__) || defined(__x86_64__)
990           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
991                                 syscall32_callback_eip);
992           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
993                                 sysenter_callback_eip);
994           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
995                                 syscall32_callback_cs);
996           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
997                                 sysenter_callback_cs);
998           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
999                                 syscall32_disables_events);
1000           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
1001                                 sysenter_disables_events);
1002
1003           if ( domctl->u.ext_vcpucontext_00000008.size >=
1004                offsetof(struct vki_xen_domctl_ext_vcpucontext_00000008, mcg_cap) )
1005               __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
1006                                     mcg_cap);
1007#endif
1008           break;
1009
1010       case 0x00000009:
1011           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
1012           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, size);
1013#if defined(__i386__) || defined(__x86_64__)
1014           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1015                                 syscall32_callback_eip);
1016           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1017                                 sysenter_callback_eip);
1018           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1019                                 syscall32_callback_cs);
1020           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1021                                 sysenter_callback_cs);
1022           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1023                                 syscall32_disables_events);
1024           __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1025                                 sysenter_disables_events);
1026
1027           if ( domctl->u.ext_vcpucontext_00000009.size >=
1028                offsetof(struct vki_xen_domctl_ext_vcpucontext_00000009, caps) )
1029           {
1030               __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1031                                     caps);
1032               __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1033                                     mci_ctl2_bank0);
1034               __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
1035                                     mci_ctl2_bank1);
1036           }
1037#endif
1038	   break;
1039
1040       default:
1041           VG_(dmsg)("WARNING: VKI_XEN_DOMCTL_set_ext_vcpucontext  domctl version %#"
1042                     PRIx32" not implemented\n", domctl->interface_version);
1043           SET_STATUS_Failure(VKI_EINVAL);
1044           break;
1045       }
1046       break;
1047
1048   case VKI_XEN_DOMCTL_set_cpuid:
1049      PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
1050                   (Addr)&domctl->u.cpuid, sizeof(domctl->u.cpuid));
1051      break;
1052
1053   case VKI_XEN_DOMCTL_getpageframeinfo3:
1054       PRE_XEN_DOMCTL_READ(getpageframeinfo3, num);
1055       PRE_XEN_DOMCTL_READ(getpageframeinfo3, array.p);
1056       PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
1057                    (Addr)domctl->u.getpageframeinfo3.array.p,
1058                    domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
1059       break;
1060
1061   case VKI_XEN_DOMCTL_setvcpuextstate:
1062      __PRE_XEN_DOMCTL_READ(setvcpuextstate, vcpuextstate, vcpu);
1063      __PRE_XEN_DOMCTL_READ(setvcpuextstate, vcpuextstate, size);
1064      __PRE_XEN_DOMCTL_READ(setvcpuextstate, vcpuextstate, buffer);
1065      PRE_MEM_READ("XEN_DOMCTL_setvcpuextstate *u.vcpuextstate.buffer.p",
1066                   (Addr)domctl->u.vcpuextstate.buffer.p,
1067                   domctl->u.vcpuextstate.size);
1068      break;
1069
1070   case VKI_XEN_DOMCTL_getvcpuextstate:
1071      __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, vcpu);
1072      __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, xfeature_mask);
1073      __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, size);
1074      __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, buffer);
1075      break;
1076
1077   case VKI_XEN_DOMCTL_shadow_op:
1078       PRE_XEN_DOMCTL_READ(shadow_op, op);
1079
1080       switch(domctl->u.shadow_op.op)
1081       {
1082       case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
1083       case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
1084           /* No further inputs */
1085           break;
1086
1087       case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE:
1088           PRE_XEN_DOMCTL_READ(shadow_op, mode);
1089           switch(domctl->u.shadow_op.mode)
1090           {
1091           case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY:
1092               goto domctl_shadow_op_enable_logdirty;
1093
1094
1095           default:
1096               bad_subop(tid, layout, arrghs, status, flags,
1097                         "__HYPERVISOR_domctl shadowop mode",
1098                         domctl->u.shadow_op.mode);
1099               break;
1100           }
1101
1102       case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
1103       domctl_shadow_op_enable_logdirty:
1104           /* No further inputs */
1105           break;
1106
1107       case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1108       case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1109           PRE_XEN_DOMCTL_READ(shadow_op, dirty_bitmap);
1110           PRE_XEN_DOMCTL_READ(shadow_op, pages);
1111           break;
1112
1113       case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
1114           PRE_XEN_DOMCTL_READ(shadow_op, mb);
1115           break;
1116
1117       default:
1118           bad_subop(tid, layout, arrghs, status, flags,
1119                     "__HYPERVISOR_domctl shadow(10)",
1120                     domctl->u.shadow_op.op);
1121           break;
1122       }
1123       break;
1124
1125   case VKI_XEN_DOMCTL_set_max_evtchn:
1126      PRE_XEN_DOMCTL_READ(set_max_evtchn, max_port);
1127      break;
1128
1129   case VKI_XEN_DOMCTL_cacheflush:
1130      PRE_XEN_DOMCTL_READ(cacheflush, start_pfn);
1131      PRE_XEN_DOMCTL_READ(cacheflush, nr_pfns);
1132      break;
1133
1134   case VKI_XEN_DOMCTL_set_access_required:
1135      PRE_XEN_DOMCTL_READ(access_required, access_required);
1136      break;
1137
1138   case VKI_XEN_DOMCTL_mem_event_op:
1139      PRE_XEN_DOMCTL_READ(mem_event_op, op);
1140      PRE_XEN_DOMCTL_READ(mem_event_op, mode);
1141      break;
1142
1143   case VKI_XEN_DOMCTL_debug_op:
1144      PRE_XEN_DOMCTL_READ(debug_op, op);
1145      PRE_XEN_DOMCTL_READ(debug_op, vcpu);
1146      break;
1147
1148   case VKI_XEN_DOMCTL_get_vcpu_msrs:
1149      __PRE_XEN_DOMCTL_READ(get_vcpu_msrs, vcpu_msrs, vcpu);
1150      __PRE_XEN_DOMCTL_READ(get_vcpu_msrs, vcpu_msrs, msr_count);
1151      __PRE_XEN_DOMCTL_READ(get_vcpu_msrs, vcpu_msrs, msrs);
1152      break;
1153
1154   case VKI_XEN_DOMCTL_set_vcpu_msrs:
1155      __PRE_XEN_DOMCTL_READ(set_vcpu_msrs, vcpu_msrs, vcpu);
1156      __PRE_XEN_DOMCTL_READ(set_vcpu_msrs, vcpu_msrs, msr_count);
1157      __PRE_XEN_DOMCTL_READ(set_vcpu_msrs, vcpu_msrs, msrs);
1158      PRE_MEM_READ("XEN_DOMCTL_set_vcpu_msrs *u.vcpu_msrs.msrs.p",
1159                   (Addr)domctl->u.vcpu_msrs.msrs.p,
1160                   sizeof(vki_xen_domctl_vcpu_msr_t) *
1161                   domctl->u.vcpu_msrs.msr_count);
1162      break;
1163
1164   default:
1165      bad_subop(tid, layout, arrghs, status, flags,
1166                "__HYPERVISOR_domctl", domctl->cmd);
1167      break;
1168   }
1169#undef PRE_XEN_DOMCTL_READ
1170#undef __PRE_XEN_DOMCTL_READ
1171}
1172
1173PRE(hvm_op)
1174{
1175   unsigned long op = ARG1;
1176   void *arg = (void *)(unsigned long)ARG2;
1177
1178   PRINT("__HYPERVISOR_hvm_op ( %ld, %#lx )", SARG1, ARG2);
1179
1180#define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field)    \
1181   PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field,     \
1182                (Addr)&((_type*)arg)->_field,           \
1183                sizeof(((_type*)arg)->_field))
1184#define PRE_XEN_HVMOP_READ(_hvm_op, _field)                             \
1185   __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1186
1187   switch (op) {
1188   case VKI_XEN_HVMOP_set_param:
1189      __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, domid);
1190      __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, index);
1191      __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, value);
1192      break;
1193
1194   case VKI_XEN_HVMOP_get_param:
1195      __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, domid);
1196      __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, index);
1197      break;
1198
1199   case VKI_XEN_HVMOP_set_pci_intx_level:
1200      PRE_XEN_HVMOP_READ(set_pci_intx_level, domid);
1201      PRE_XEN_HVMOP_READ(set_pci_intx_level, domain);
1202      PRE_XEN_HVMOP_READ(set_pci_intx_level, bus);
1203      PRE_XEN_HVMOP_READ(set_pci_intx_level, device);
1204      PRE_XEN_HVMOP_READ(set_pci_intx_level, level);
1205      break;
1206
1207   case VKI_XEN_HVMOP_set_isa_irq_level:
1208      PRE_XEN_HVMOP_READ(set_isa_irq_level, domid);
1209      PRE_XEN_HVMOP_READ(set_isa_irq_level, isa_irq);
1210      PRE_XEN_HVMOP_READ(set_isa_irq_level, level);
1211      break;
1212
1213   case VKI_XEN_HVMOP_set_pci_link_route:
1214      PRE_XEN_HVMOP_READ(set_pci_link_route, domid);
1215      PRE_XEN_HVMOP_READ(set_pci_link_route, link);
1216      PRE_XEN_HVMOP_READ(set_pci_link_route, isa_irq);
1217      break;
1218
1219   case VKI_XEN_HVMOP_track_dirty_vram: {
1220      vki_xen_hvm_track_dirty_vram_t *Arg =
1221         (vki_xen_hvm_track_dirty_vram_t*)ARG2;
1222      PRE_XEN_HVMOP_READ(track_dirty_vram, domid);
1223      PRE_XEN_HVMOP_READ(track_dirty_vram, nr);
1224      if ( Arg->nr ) {
1225         PRE_XEN_HVMOP_READ(track_dirty_vram, first_pfn);
1226         PRE_XEN_HVMOP_READ(track_dirty_vram, dirty_bitmap);
1227      }
1228      break;
1229   }
1230
1231   case VKI_XEN_HVMOP_set_mem_type:
1232      PRE_XEN_HVMOP_READ(set_mem_type, domid);
1233      PRE_XEN_HVMOP_READ(set_mem_type, hvmmem_type);
1234      PRE_XEN_HVMOP_READ(set_mem_type, nr);
1235      PRE_XEN_HVMOP_READ(set_mem_type, first_pfn);
1236      break;
1237
1238   case VKI_XEN_HVMOP_set_mem_access:
1239      PRE_XEN_HVMOP_READ(set_mem_access, domid);
1240      PRE_XEN_HVMOP_READ(set_mem_access, hvmmem_access);
1241      PRE_XEN_HVMOP_READ(set_mem_access, first_pfn);
1242      /* if default access */
1243      if ( ((vki_xen_hvm_set_mem_access_t*)arg)->first_pfn != ~0ULL)
1244         PRE_XEN_HVMOP_READ(set_mem_access, nr);
1245      break;
1246
1247   case VKI_XEN_HVMOP_get_mem_access:
1248      PRE_XEN_HVMOP_READ(get_mem_access, domid);
1249      PRE_XEN_HVMOP_READ(get_mem_access, pfn);
1250
1251      PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
1252                    (Addr)&(((vki_xen_hvm_get_mem_access_t*)arg)->hvmmem_access),
1253                    sizeof(vki_uint16_t));
1254      break;
1255
1256   case VKI_XEN_HVMOP_inject_trap:
1257      PRE_XEN_HVMOP_READ(inject_trap, domid);
1258      PRE_XEN_HVMOP_READ(inject_trap, vcpuid);
1259      PRE_XEN_HVMOP_READ(inject_trap, vector);
1260      PRE_XEN_HVMOP_READ(inject_trap, type);
1261      PRE_XEN_HVMOP_READ(inject_trap, error_code);
1262      PRE_XEN_HVMOP_READ(inject_trap, insn_len);
1263      PRE_XEN_HVMOP_READ(inject_trap, cr2);
1264      break;
1265
1266   default:
1267      bad_subop(tid, layout, arrghs, status, flags,
1268                "__HYPERVISOR_hvm_op", op);
1269      break;
1270   }
1271#undef __PRE_XEN_HVMOP_READ
1272#undef PRE_XEN_HVMOP_READ
1273}
1274
1275PRE(tmem_op)
1276{
1277    struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
1278
1279    PRINT("__HYPERVISOR_tmem_op ( %u )", tmem->cmd);
1280
1281    /* Common part for xen_tmem_op:
1282     *    vki_uint32_t cmd;
1283     */
1284    PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1, sizeof(vki_uint32_t));
1285
1286
1287#define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field)                    \
1288    PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field,       \
1289                 (Addr)&tmem->u._union._field,                          \
1290                 sizeof(tmem->u._union._field))
1291#define PRE_XEN_TMEMOP_READ(_tmem, _field)                              \
1292    __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
1293
1294    switch(tmem->cmd) {
1295
1296    case VKI_XEN_TMEM_control:
1297
1298        /* Common part for control hypercall:
1299         *    vki_int32_t pool_id;
1300         *    vki_uint32_t subop;
1301         */
1302        PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
1303                     (Addr)&tmem->pool_id, sizeof(tmem->pool_id));
1304        PRE_XEN_TMEMOP_READ(ctrl, subop);
1305
1306        switch (tmem->u.ctrl.subop) {
1307
1308        case VKI_XEN_TMEMC_save_begin:
1309            PRE_XEN_TMEMOP_READ(ctrl, cli_id);
1310            PRE_XEN_TMEMOP_READ(ctrl, arg1);
1311            PRE_XEN_TMEMOP_READ(ctrl, buf);
1312            break;
1313
1314        default:
1315            bad_subop(tid, layout, arrghs, status, flags,
1316                      "__HYPERVISOR_tmem_op_control", tmem->u.ctrl.subop);
1317        }
1318
1319        break;
1320
1321    default:
1322        bad_subop(tid, layout, arrghs, status, flags,
1323                  "__HYPERVISOR_tmem_op", ARG1);
1324    }
1325
1326#undef PRE_XEN_TMEMOP_READ
1327#undef __PRE_XEN_TMEMOP_READ
1328}
1329
1330POST(memory_op)
1331{
1332   switch (ARG1) {
1333   case VKI_XENMEM_maximum_ram_page:
1334   case VKI_XENMEM_set_memory_map:
1335   case VKI_XENMEM_decrease_reservation:
1336   case VKI_XENMEM_claim_pages:
1337   case VKI_XENMEM_maximum_gpfn:
1338   case VKI_XENMEM_remove_from_physmap:
1339   case VKI_XENMEM_access_op:
1340      /* No outputs */
1341      break;
1342   case VKI_XENMEM_increase_reservation:
1343   case VKI_XENMEM_populate_physmap: {
1344      struct xen_memory_reservation *memory_reservation =
1345         (struct xen_memory_reservation *)ARG2;
1346
1347      POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
1348                     sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
1349      break;
1350   }
1351
1352   case VKI_XENMEM_machphys_mfn_list:
1353   case VKI_XENMEM_machphys_compat_mfn_list: {
1354       struct vki_xen_machphys_mfn_list *arg =
1355           (struct vki_xen_machphys_mfn_list *)ARG2;
1356       POST_MEM_WRITE((Addr)&arg->nr_extents, sizeof(arg->nr_extents));
1357       POST_MEM_WRITE((Addr)arg->extent_start.p,
1358                      sizeof(vki_xen_pfn_t) * arg->nr_extents);
1359       break;
1360   }
1361
1362   case VKI_XENMEM_memory_map:
1363   case VKI_XENMEM_machine_memory_map: {
1364      struct vki_xen_memory_map *arg =
1365         (struct vki_xen_memory_map *)ARG2;
1366      POST_MEM_WRITE(arg->nr_entries, sizeof(arg->nr_entries));
1367      POST_MEM_WRITE((Addr)arg->buffer.p,
1368                     arg->nr_entries * 20 /* size of an e820 entry */);
1369      break;
1370   }
1371
1372   case VKI_XENMEM_add_to_physmap: {
1373       struct vki_xen_add_to_physmap *arg =
1374           (struct vki_xen_add_to_physmap *)ARG2;
1375       if (arg->space == VKI_XENMAPSPACE_gmfn_range)
1376           POST_MEM_WRITE(ARG2, sizeof(*arg));
1377   }
1378
1379   case VKI_XENMEM_get_sharing_freed_pages:
1380   case VKI_XENMEM_get_sharing_shared_pages:
1381       /* No outputs */
1382       break;
1383   }
1384}
1385
1386POST(mmuext_op)
1387{
1388   unsigned int *pdone = (unsigned int *)ARG3;
1389   /* simplistic */
1390   POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
1391}
1392
1393POST(xsm_op)
1394{
1395   /* XXX assuming flask, only actual XSM right now */
1396   struct vki_xen_flask_op *op = (struct vki_xen_flask_op *)ARG1;
1397
1398   switch (op->interface_version) {
1399   case 0x00000001:
1400      break;
1401   default:
1402      return;
1403   }
1404
1405#define POST_XEN_XSM_OP_WRITE(_xsm_op, _union, _field)        \
1406      POST_MEM_WRITE((Addr)&op->u._union._field,              \
1407                     sizeof(op->u._union._field))
1408
1409   switch (op->cmd) {
1410   case VKI_FLASK_SID_TO_CONTEXT:
1411      POST_XEN_XSM_OP_WRITE(SID_TO_CONTEXT, sid_context, size);
1412      POST_MEM_WRITE((Addr)op->u.sid_context.context.p,
1413                     op->u.sid_context.size);
1414   }
1415}
1416
1417static void post_evtchn_op(ThreadId tid, __vki_u32 cmd, void *arg, int compat)
1418{
1419   switch (cmd) {
1420   case VKI_XEN_EVTCHNOP_alloc_unbound: {
1421      struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
1422      POST_MEM_WRITE((Addr)&alloc_unbound->port, sizeof(alloc_unbound->port));
1423      break;
1424   }
1425   }
1426}
1427
1428POST(sched_op)
1429{
1430   switch (ARG1) {
1431   case VKI_XEN_SCHEDOP_remote_shutdown:
1432      /* No outputs */
1433      break;
1434   }
1435}
1436
1437POST(evtchn_op)
1438{
1439   post_evtchn_op(tid, ARG1, (void *)ARG2, 0);
1440}
1441
1442POST(evtchn_op_compat)
1443{
1444   struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
1445   post_evtchn_op(tid, evtchn->cmd, &evtchn->u, 1);
1446}
1447
1448POST(physdev_op)
1449{
1450   int cmd = ARG1;
1451
1452#define POST_XEN_PHYSDEVOP_WRITE(_op, _field)                   \
1453   POST_MEM_WRITE((Addr)&arg->_field, sizeof(arg->_field))
1454
1455   switch (cmd) {
1456   case VKI_XEN_PHYSDEVOP_unmap_pirq:
1457      /* No outputs */
1458      break;
1459
1460   case VKI_XEN_PHYSDEVOP_map_pirq: {
1461      struct vki_xen_physdev_map_pirq *arg =
1462         (struct vki_xen_physdev_map_pirq *)ARG2;
1463      if (arg->type == VKI_XEN_MAP_PIRQ_TYPE_MULTI_MSI)
1464         POST_XEN_PHYSDEVOP_WRITE("map_pirq", entry_nr);
1465      POST_XEN_PHYSDEVOP_WRITE("map_pirq", pirq);
1466      break;
1467   }
1468#undef POST_XEN_PHYSDEVOP_WRITE
1469
1470   default:
1471      break;
1472   }
1473}
1474
1475POST(xen_version)
1476{
1477   switch (ARG1) {
1478   case VKI_XENVER_version:
1479      /* No outputs */
1480      break;
1481   case VKI_XENVER_extraversion:
1482      POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_extraversion_t));
1483      break;
1484   case VKI_XENVER_compile_info:
1485      POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_compile_info));
1486      break;
1487   case VKI_XENVER_capabilities:
1488      POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_capabilities_info_t));
1489      break;
1490   case VKI_XENVER_changeset:
1491      POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_changeset_info_t));
1492      break;
1493   case VKI_XENVER_platform_parameters:
1494      POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_platform_parameters));
1495      break;
1496   case VKI_XENVER_get_features:
1497      POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_feature_info));
1498      break;
1499   case VKI_XENVER_pagesize:
1500      /* No outputs */
1501      break;
1502   case VKI_XENVER_guest_handle:
1503      POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_domain_handle_t));
1504      break;
1505   case VKI_XENVER_commandline:
1506      POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
1507      break;
1508   }
1509}
1510
1511POST(grant_table_op)
1512{
1513   switch (ARG1) {
1514   case VKI_XEN_GNTTABOP_setup_table: {
1515      struct vki_xen_gnttab_setup_table *gst =
1516	      (struct vki_xen_gnttab_setup_table*)ARG2;
1517      PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1518                    (Addr)&gst->status, sizeof(gst->status));
1519      PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1520                    (Addr)gst->frame_list.p,
1521                    sizeof(*gst->frame_list.p) & gst->nr_frames);
1522      break;
1523   }
1524   }
1525}
1526
1527POST(sysctl)
1528{
1529   struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
1530
1531   switch (sysctl->interface_version)
1532   {
1533   case 0x00000008:
1534   case 0x00000009:
1535   case 0x0000000a:
1536   case 0x0000000b:
1537	   break;
1538   default:
1539      return;
1540   }
1541
1542#define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field)        \
1543      POST_MEM_WRITE((Addr)&sysctl->u._union._field,            \
1544                     sizeof(sysctl->u._union._field))
1545#define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1546      __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1547
1548   switch (sysctl->cmd) {
1549   case VKI_XEN_SYSCTL_readconsole:
1550       POST_MEM_WRITE((Addr)sysctl->u.readconsole.buffer.p,
1551                      sysctl->u.readconsole.count * sizeof(char));
1552       break;
1553
1554   case VKI_XEN_SYSCTL_getdomaininfolist:
1555      switch (sysctl->interface_version)
1556      {
1557      case 0x00000008:
1558	 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008, num_domains);
1559	 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000008.buffer.p,
1560			sizeof(*sysctl->u.getdomaininfolist_00000008.buffer.p)
1561			* sysctl->u.getdomaininfolist_00000008.num_domains);
1562	 break;
1563      case 0x00000009:
1564	 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009, num_domains);
1565	 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000009.buffer.p,
1566			sizeof(*sysctl->u.getdomaininfolist_00000009.buffer.p)
1567			* sysctl->u.getdomaininfolist_00000009.num_domains);
1568	 break;
1569      case 0x0000000a:
1570      case 0x0000000b:
1571	 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
1572	 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
1573			sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
1574			* sysctl->u.getdomaininfolist_0000000a.num_domains);
1575	 break;
1576      }
1577      break;
1578
1579   case VKI_XEN_SYSCTL_sched_id:
1580       POST_XEN_SYSCTL_WRITE(sched_id, sched_id);
1581       break;
1582
1583   case VKI_XEN_SYSCTL_cpupool_op:
1584      if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
1585          sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
1586         POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
1587      if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO) {
1588         POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
1589         POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
1590      }
1591      if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO ||
1592          sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
1593         POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
1594      break;
1595
1596   case VKI_XEN_SYSCTL_physinfo:
1597      switch (sysctl->interface_version)
1598      {
1599      case 0x00000008:
1600      case 0x00000009: /* Unchanged from version 8 */
1601         POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
1602         POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
1603         POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
1604         POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
1605         POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
1606         POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
1607         POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
1608         POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
1609         POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
1610         POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
1611         POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
1612         POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
1613         break;
1614      case 0x0000000a:
1615      case 0x0000000b:
1616         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
1617         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
1618         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
1619         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
1620         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
1621         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
1622         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
1623         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
1624         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
1625         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
1626         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
1627         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
1628         POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
1629         break;
1630      }
1631      break;
1632
1633   case VKI_XEN_SYSCTL_topologyinfo:
1634      POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
1635      if (sysctl->u.topologyinfo.cpu_to_core.p)
1636         POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
1637                     sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1638      if (sysctl->u.topologyinfo.cpu_to_socket.p)
1639         POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
1640                     sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1641      if (sysctl->u.topologyinfo.cpu_to_node.p)
1642         POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
1643                     sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1644      break;
1645
1646   case VKI_XEN_SYSCTL_numainfo:
1647      POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
1648      POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
1649                     sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1650      POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memfree.p,
1651                     sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1652      POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_node_distance.p,
1653                     sizeof(uint32_t) * sysctl->u.numainfo.max_node_index);
1654      break;
1655
1656   /* No outputs */
1657   case VKI_XEN_SYSCTL_debug_keys:
1658       break;
1659   }
1660#undef POST_XEN_SYSCTL_WRITE
1661#undef __POST_XEN_SYSCTL_WRITE
1662}
1663
1664POST(domctl){
1665   struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
1666
1667   switch (domctl->interface_version) {
1668   case 0x00000007:
1669   case 0x00000008:
1670   case 0x00000009:
1671   case 0x0000000a:
1672	   break;
1673   default:
1674	   return;
1675   }
1676
1677#define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field)        \
1678   POST_MEM_WRITE((Addr)&domctl->u._union._field,               \
1679                  sizeof(domctl->u._union._field));
1680#define POST_XEN_DOMCTL_WRITE(_domctl, _field)          \
1681   __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1682
1683   switch (domctl->cmd) {
1684   case VKI_XEN_DOMCTL_createdomain:
1685   case VKI_XEN_DOMCTL_destroydomain:
1686   case VKI_XEN_DOMCTL_pausedomain:
1687   case VKI_XEN_DOMCTL_max_mem:
1688   case VKI_XEN_DOMCTL_setvcpuextstate:
1689   case VKI_XEN_DOMCTL_set_address_size:
1690   case VKI_XEN_DOMCTL_test_assign_device:
1691   case VKI_XEN_DOMCTL_assign_device:
1692   case VKI_XEN_DOMCTL_deassign_device:
1693   case VKI_XEN_DOMCTL_settscinfo:
1694   case VKI_XEN_DOMCTL_irq_permission:
1695   case VKI_XEN_DOMCTL_iomem_permission:
1696   case VKI_XEN_DOMCTL_ioport_permission:
1697   case VKI_XEN_DOMCTL_hypercall_init:
1698   case VKI_XEN_DOMCTL_setvcpucontext:
1699   case VKI_XEN_DOMCTL_pin_mem_cacheattr:
1700   case VKI_XEN_DOMCTL_set_ext_vcpucontext:
1701   case VKI_XEN_DOMCTL_setnodeaffinity:
1702   case VKI_XEN_DOMCTL_set_cpuid:
1703   case VKI_XEN_DOMCTL_unpausedomain:
1704   case VKI_XEN_DOMCTL_sethvmcontext:
1705   case VKI_XEN_DOMCTL_debug_op:
1706   case VKI_XEN_DOMCTL_set_max_evtchn:
1707   case VKI_XEN_DOMCTL_cacheflush:
1708   case VKI_XEN_DOMCTL_resumedomain:
1709   case VKI_XEN_DOMCTL_set_vcpu_msrs:
1710   case VKI_XEN_DOMCTL_set_access_required:
1711      /* No output fields */
1712      break;
1713
1714   case VKI_XEN_DOMCTL_max_vcpus:
1715      POST_XEN_DOMCTL_WRITE(max_vcpus, max);
1716      break;
1717
1718   case VKI_XEN_DOMCTL_get_address_size:
1719      __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
1720      break;
1721
1722   case VKI_XEN_DOMCTL_gettscinfo:
1723      __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
1724      __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
1725      __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
1726      __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
1727      break;
1728
1729   case VKI_XEN_DOMCTL_getvcpuinfo:
1730      POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
1731      POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
1732      POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
1733      POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
1734      POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
1735      break;
1736
1737   case VKI_XEN_DOMCTL_gethvmcontext:
1738       /* Xen unconditionally writes size... */
1739       __POST_XEN_DOMCTL_WRITE(gethvmcontext, hvmcontext, size);
1740       /* ...but only writes to the buffer if it was non NULL */
1741       if ( domctl->u.hvmcontext.buffer.p )
1742           POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
1743                          sizeof(*domctl->u.hvmcontext.buffer.p)
1744                          * domctl->u.hvmcontext.size);
1745       break;
1746
1747   case VKI_XEN_DOMCTL_gethvmcontext_partial:
1748       switch (domctl->u.hvmcontext_partial.type) {
1749       case VKI_HVM_SAVE_CODE(CPU):
1750           if ( domctl->u.hvmcontext_partial.buffer.p )
1751                POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
1752                   VKI_HVM_SAVE_LENGTH(CPU));
1753           break;
1754       }
1755       break;
1756
1757   case VKI_XEN_DOMCTL_scheduler_op:
1758      if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
1759         switch(domctl->u.scheduler_op.sched_id) {
1760         case VKI_XEN_SCHEDULER_SEDF:
1761            POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.period);
1762            POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.slice);
1763            POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.latency);
1764            POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.extratime);
1765            POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.weight);
1766            break;
1767         case VKI_XEN_SCHEDULER_CREDIT:
1768            POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.weight);
1769            POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.cap);
1770            break;
1771         case VKI_XEN_SCHEDULER_CREDIT2:
1772            POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit2.weight);
1773            break;
1774         case VKI_XEN_SCHEDULER_ARINC653:
1775            break;
1776         case VKI_XEN_SCHEDULER_RTDS:
1777            POST_XEN_DOMCTL_WRITE(scheduler_op, u.rtds.period);
1778            POST_XEN_DOMCTL_WRITE(scheduler_op, u.rtds.budget);
1779            break;
1780         }
1781      }
1782      break;
1783
1784   case VKI_XEN_DOMCTL_getvcpuaffinity:
1785   case VKI_XEN_DOMCTL_setvcpuaffinity: /* Writes back actual result */
1786      switch (domctl->interface_version) {
1787      case 0x00000007:
1788      case 0x00000008:
1789      case 0x00000009:
1790         POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity_00000009.cpumap.bitmap.p,
1791                        domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
1792         break;
1793      case 0x0000000a:
1794         if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
1795            POST_MEM_WRITE(
1796               (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
1797               domctl->u.vcpuaffinity_0000000a.cpumap_hard.nr_bits / 8);
1798         if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_SOFT)
1799            POST_MEM_WRITE(
1800               (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_soft.bitmap.p,
1801               domctl->u.vcpuaffinity_0000000a.cpumap_soft.nr_bits / 8);
1802      }
1803      break;
1804
1805   case VKI_XEN_DOMCTL_getnodeaffinity:
1806      POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
1807                     domctl->u.nodeaffinity.nodemap.nr_bits / 8);
1808      break;
1809
1810   case VKI_XEN_DOMCTL_getdomaininfo:
1811      switch (domctl->interface_version) {
1812      case 0x00000007:
1813	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, domain);
1814	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, flags);
1815	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, tot_pages);
1816	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_pages);
1817	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shr_pages);
1818	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shared_info_frame);
1819	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpu_time);
1820	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, nr_online_vcpus);
1821	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_vcpu_id);
1822	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, ssidref);
1823	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, handle);
1824	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpupool);
1825      break;
1826      case 0x00000008:
1827	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, domain);
1828	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, flags);
1829	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, tot_pages);
1830	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_pages);
1831	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shr_pages);
1832	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, paged_pages);
1833	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shared_info_frame);
1834	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpu_time);
1835	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, nr_online_vcpus);
1836	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_vcpu_id);
1837	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, ssidref);
1838	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
1839	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
1840      break;
1841      case 0x00000009:
1842      case 0x0000000a:
1843	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
1844	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
1845	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
1846	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
1847	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
1848	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
1849	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
1850	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
1851	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
1852	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
1853	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
1854	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
1855	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
1856	 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
1857      break;
1858      }
1859      break;
1860   case VKI_XEN_DOMCTL_getvcpucontext:
1861      __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
1862      break;
1863
1864   case VKI_XEN_DOMCTL_getpageframeinfo3:
1865       POST_MEM_WRITE((Addr)domctl->u.getpageframeinfo3.array.p,
1866                      domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
1867       break;
1868
1869   case VKI_XEN_DOMCTL_get_ext_vcpucontext:
1870       switch (domctl->interface_version)
1871       {
1872       case 0x00000007:
1873       case 0x00000008:
1874           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008, size);
1875#if defined(__i386__) || defined(__x86_64__)
1876           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1877                                   syscall32_callback_eip);
1878           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1879                                   sysenter_callback_eip);
1880           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1881                                   syscall32_callback_cs);
1882           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1883                                   sysenter_callback_cs);
1884           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1885                                   syscall32_disables_events);
1886           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1887                                   sysenter_disables_events);
1888
1889           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1890                                   mcg_cap);
1891#endif
1892           break;
1893
1894       case 0x00000009:
1895           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, size);
1896#if defined(__i386__) || defined(__x86_64__)
1897           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1898                                   syscall32_callback_eip);
1899           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1900                                   sysenter_callback_eip);
1901           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1902                                   syscall32_callback_cs);
1903           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1904                                   sysenter_callback_cs);
1905           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1906                                   syscall32_disables_events);
1907           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1908                                   sysenter_disables_events);
1909
1910           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1911                                   caps);
1912           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1913                                   mci_ctl2_bank0);
1914           __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1915                                   mci_ctl2_bank1);
1916#endif
1917	   break;
1918       }
1919       break;
1920
1921
1922   case VKI_XEN_DOMCTL_getvcpuextstate:
1923      if (domctl->u.vcpuextstate.buffer.p)
1924         POST_MEM_WRITE((Addr)domctl->u.vcpuextstate.buffer.p,
1925                        domctl->u.vcpuextstate.size);
1926      break;
1927
1928   case VKI_XEN_DOMCTL_shadow_op:
1929       switch(domctl->u.shadow_op.op)
1930       {
1931       case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
1932       case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
1933           /* No outputs */
1934           break;
1935
1936       case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1937       case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1938           POST_XEN_DOMCTL_WRITE(shadow_op, pages);
1939           POST_XEN_DOMCTL_WRITE(shadow_op, stats.fault_count);
1940           POST_XEN_DOMCTL_WRITE(shadow_op, stats.dirty_count);
1941           if(domctl->u.shadow_op.dirty_bitmap.p)
1942               POST_MEM_WRITE((Addr)domctl->u.shadow_op.dirty_bitmap.p,
1943                              domctl->u.shadow_op.pages * sizeof(vki_uint8_t));
1944           break;
1945
1946       case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
1947           POST_XEN_DOMCTL_WRITE(shadow_op, mb);
1948           break;
1949
1950       default:
1951           break;
1952       }
1953       break;
1954   case VKI_XEN_DOMCTL_get_vcpu_msrs:
1955      if (domctl->u.vcpu_msrs.msrs.p)
1956         POST_MEM_WRITE((Addr)domctl->u.vcpu_msrs.msrs.p,
1957                        sizeof(vki_xen_domctl_vcpu_msr_t) *
1958                        domctl->u.vcpu_msrs.msr_count);
1959      break;
1960
1961   case VKI_XEN_DOMCTL_mem_event_op:
1962       POST_XEN_DOMCTL_WRITE(mem_event_op, port);
1963
1964       break;
1965   }
1966#undef POST_XEN_DOMCTL_WRITE
1967#undef __POST_XEN_DOMCTL_WRITE
1968}
1969
1970POST(hvm_op)
1971{
1972   unsigned long op = ARG1;
1973   void *arg = (void *)(unsigned long)ARG2;
1974
1975#define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field)  \
1976      POST_MEM_WRITE((Addr)&((_type*)arg)->_field,      \
1977                     sizeof(((_type*)arg)->_field))
1978#define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1979      __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1980
1981   switch (op) {
1982   case VKI_XEN_HVMOP_set_param:
1983   case VKI_XEN_HVMOP_set_pci_intx_level:
1984   case VKI_XEN_HVMOP_set_isa_irq_level:
1985   case VKI_XEN_HVMOP_set_pci_link_route:
1986   case VKI_XEN_HVMOP_set_mem_type:
1987   case VKI_XEN_HVMOP_set_mem_access:
1988   case VKI_XEN_HVMOP_inject_trap:
1989      /* No output paramters */
1990      break;
1991
1992   case VKI_XEN_HVMOP_get_param:
1993      __POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
1994      break;
1995
1996   case VKI_XEN_HVMOP_get_mem_access:
1997      POST_XEN_HVMOP_WRITE(get_mem_access, hvmmem_access);
1998      break;
1999   }
2000#undef __POST_XEN_HVMOP_WRITE
2001#undef POST_XEN_HVMOP_WRITE
2002}
2003
2004POST(tmem_op)
2005{
2006    struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
2007
2008    switch(tmem->cmd) {
2009
2010    case VKI_XEN_TMEM_control:
2011
2012        switch(tmem->u.ctrl.subop) {
2013            /* No outputs */
2014            case VKI_XEN_TMEMC_save_begin:
2015                break;
2016        }
2017
2018        break;
2019    }
2020}
2021
2022typedef
2023   struct {
2024      SyscallTableEntry entry;
2025      int nr_args;
2026   }
2027   XenHypercallTableEntry;
2028
2029#define HYPX_(const, name, nr_args) \
2030   [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
2031#define HYPXY(const, name, nr_args)                     \
2032   [const] = { { vgSysWrap_xen_##name##_before,         \
2033                 vgSysWrap_xen_##name##_after },        \
2034               nr_args }
2035
2036static XenHypercallTableEntry hypercall_table[] = {
2037   //    __VKI_XEN_set_trap_table                                  // 0
2038   //    __VKI_XEN_mmu_update                                      // 1
2039   //    __VKI_XEN_set_gdt                                         // 2
2040   //    __VKI_XEN_stack_switch                                    // 3
2041   //    __VKI_XEN_set_callbacks                                   // 4
2042
2043   //    __VKI_XEN_fpu_taskswitch                                  // 5
2044   //    __VKI_XEN_sched_op_compat                                 // 6
2045   //    __VKI_XEN_platform_op                                     // 7
2046   //    __VKI_XEN_set_debugreg                                    // 8
2047   //    __VKI_XEN_get_debugreg                                    // 9
2048
2049   //    __VKI_XEN_update_descriptor                               // 10
2050   //                                                                 // 11
2051   HYPXY(__VKI_XEN_memory_op,               memory_op,         2), // 12
2052   //    __VKI_XEN_multicall                                       // 13
2053   //    __VKI_XEN_update_va_mapping                               // 14
2054
2055   //    __VKI_XEN_set_timer_op                                    // 15
2056   HYPXY(__VKI_XEN_event_channel_op_compat, evtchn_op_compat,  1), // 16
2057   HYPXY(__VKI_XEN_xen_version,             xen_version,       2), // 17
2058   //    __VKI_XEN_console_io                                      // 18
2059   //    __VKI_XEN_physdev_op_compat                               // 19
2060
2061   HYPXY(__VKI_XEN_grant_table_op,          grant_table_op,    3), // 20
2062   //    __VKI_XEN_vm_assist                                       // 21
2063   //    __VKI_XEN_update_va_mapping_otherdomain                   // 22
2064   //    __VKI_XEN_iret,                    iret                   // 23
2065   //    __VKI_XEN_vcpu_op,                 vcpu_op                // 24
2066
2067   //    __VKI_XEN_set_segment_base                                // 25
2068   HYPXY(__VKI_XEN_mmuext_op,               mmuext_op,         2), // 26
2069   HYPXY(__VKI_XEN_xsm_op,                  xsm_op,            1), // 27
2070   //    __VKI_XEN_nmi_op                                          // 28
2071   HYPXY(__VKI_XEN_sched_op,                sched_op,          2), // 29
2072
2073   //    __VKI_XEN_callback_op                                     // 30
2074   //    __VKI_XEN_xenoprof_op                                     // 31
2075   HYPXY(__VKI_XEN_event_channel_op,        evtchn_op,         2), // 32
2076   HYPXY(__VKI_XEN_physdev_op,              physdev_op,        2), // 33
2077   HYPXY(__VKI_XEN_hvm_op,                  hvm_op,            2), // 34
2078
2079   HYPXY(__VKI_XEN_sysctl,                  sysctl,            1), // 35
2080   HYPXY(__VKI_XEN_domctl,                  domctl,            1), // 36
2081   //    __VKI_XEN_kexec_op                                        // 37
2082   HYPXY(__VKI_XEN_tmem_op,                 tmem_op,           1), // 38
2083};
2084
2085static void bad_before ( ThreadId              tid,
2086                         SyscallArgLayout*     layout,
2087                         /*MOD*/SyscallArgs*   args,
2088                         /*OUT*/SyscallStatus* status,
2089                         /*OUT*/UWord*         flags )
2090{
2091   VG_(dmsg)("WARNING: unhandled hypercall: %s\n",
2092      VG_SYSNUM_STRING(args->sysno));
2093   if (VG_(clo_verbosity) > 1) {
2094      VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
2095   }
2096   VG_(dmsg)("You may be able to write your own handler.\n");
2097   VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
2098   VG_(dmsg)("Nevertheless we consider this a bug.  Please report\n");
2099   VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
2100   VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
2101
2102   SET_STATUS_Failure(VKI_ENOSYS);
2103}
2104
2105static XenHypercallTableEntry bad_hyper =
2106{ { bad_before, NULL }, 0 };
2107
2108static XenHypercallTableEntry* ML_(get_xen_hypercall_entry) ( UInt sysno )
2109{
2110   XenHypercallTableEntry *ret = &bad_hyper;
2111
2112   const UInt hypercall_table_size
2113      = sizeof(hypercall_table) / sizeof(hypercall_table[0]);
2114
2115   /* Is it in the contiguous initial section of the table? */
2116   if (sysno < hypercall_table_size) {
2117      XenHypercallTableEntry* ent = &hypercall_table[sysno];
2118      if (ent->entry.before != NULL)
2119         ret = ent;
2120   }
2121
2122   /* Can't find a wrapper */
2123   return ret;
2124}
2125
2126DEFN_PRE_TEMPLATE(xen, hypercall)
2127{
2128   XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
2129
2130   /* Return number of arguments consumed */
2131   ARG8 = ent->nr_args;
2132
2133   vg_assert(ent);
2134   vg_assert(ent->entry.before);
2135   (ent->entry.before)( tid, layout, arrghs, status, flags );
2136
2137}
2138
2139DEFN_POST_TEMPLATE(xen, hypercall)
2140{
2141   XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
2142
2143   /* Return number of arguments consumed */
2144   ARG8 = ent->nr_args;
2145
2146   vg_assert(ent);
2147   if (ent->entry.after)
2148      (ent->entry.after)( tid, arrghs, status );
2149}
2150
2151#endif // defined(ENABLE_XEN)
2152