1
2/*--------------------------------------------------------------------*/
3/*--- Platform-specific syscalls stuff.      syswrap-x86-solaris.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7   This file is part of Valgrind, a dynamic binary instrumentation
8   framework.
9
10   Copyright (C) 2011-2015 Petr Pavlu
11      setup@dagobah.cz
12
13   This program is free software; you can redistribute it and/or
14   modify it under the terms of the GNU General Public License as
15   published by the Free Software Foundation; either version 2 of the
16   License, or (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful, but
19   WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21   General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; if not, write to the Free Software
25   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26   02111-1307, USA.
27
28   The GNU General Public License is contained in the file COPYING.
29*/
30
31#if defined(VGP_x86_solaris)
32
33#include "libvex_guest_offsets.h"
34#include "pub_core_basics.h"
35#include "pub_core_vki.h"
36#include "pub_core_threadstate.h"
37#include "pub_core_aspacemgr.h"
38#include "pub_core_xarray.h"
39#include "pub_core_clientstate.h"
40#include "pub_core_debuglog.h"
41#include "pub_core_libcassert.h"
42#include "pub_core_libcbase.h"
43#include "pub_core_libcfile.h"
44#include "pub_core_libcprint.h"
45#include "pub_core_libcsignal.h"
46#include "pub_core_machine.h"           // VG_(get_SP)
47#include "pub_core_mallocfree.h"
48#include "pub_core_options.h"
49#include "pub_core_tooliface.h"
50#include "pub_core_signals.h"
51#include "pub_core_syscall.h"
52#include "pub_core_syswrap.h"
53
54#include "priv_types_n_macros.h"
55#include "priv_syswrap-generic.h"
56#include "priv_syswrap-solaris.h"
57
58/* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
59   use 'retaddr' as f's return-to address.  Also, clear all the integer
60   registers before entering f. */
61__attribute__((noreturn))
62void ML_(call_on_new_stack_0_1)(Addr stack,             /* 4(%esp) */
63                                Addr retaddr,           /* 8(%esp) */
64                                void (*f)(Word),        /* 12(%esp) */
65                                Word arg1);             /* 16(%esp) */
66__asm__ (
67".text\n"
68".globl vgModuleLocal_call_on_new_stack_0_1\n"
69"vgModuleLocal_call_on_new_stack_0_1:\n"
70"   movl  %esp, %esi\n"         /* remember old stack pointer */
71"   movl  4(%esi), %esp\n"      /* set stack */
72"   pushl $0\n"                 /* align stack */
73"   pushl $0\n"                 /* align stack */
74"   pushl $0\n"                 /* align stack */
75"   pushl 16(%esi)\n"           /* arg1 to stack */
76"   pushl 8(%esi)\n"            /* retaddr to stack */
77"   pushl 12(%esi)\n"           /* f to stack */
78"   movl  $0, %eax\n"           /* zero all GP regs */
79"   movl  $0, %ebx\n"
80"   movl  $0, %ecx\n"
81"   movl  $0, %edx\n"
82"   movl  $0, %esi\n"
83"   movl  $0, %edi\n"
84"   movl  $0, %ebp\n"
85"   ret\n"                      /* jump to f */
86"   ud2\n"                      /* should never get here */
87".previous\n"
88);
89
90/* This function is called to setup a context of a new Valgrind thread (which
91   will run the client code). */
92void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc)
93{
94   ThreadState *tst = VG_(get_ThreadState)(tid);
95   UWord *stack = (UWord*)tst->os_state.valgrind_stack_init_SP;
96   UShort cs, ds, ss, es, fs, gs;
97
98   VG_(memset)(uc, 0, sizeof(*uc));
99   uc->uc_flags = VKI_UC_CPU | VKI_UC_SIGMASK;
100
101   /* Start the thread with everything blocked. */
102   VG_(sigfillset)(&uc->uc_sigmask);
103
104   /* Set up the stack, it should be always 16-byte aligned before doing
105      a function call, i.e. the first parameter is also 16-byte aligned. */
106   vg_assert(VG_IS_16_ALIGNED(stack));
107   stack -= 1;
108   stack[0] = 0; /* bogus return value */
109   stack[1] = (UWord)tst; /* the parameter */
110
111   /* Set up the registers. */
112   uc->uc_mcontext.gregs[VKI_EIP] = (UWord)ML_(start_thread_NORETURN);
113   uc->uc_mcontext.gregs[VKI_UESP] = (UWord)stack;
114
115   /* Copy segment registers. */
116   __asm__ __volatile__(
117      "movw %%cs, %[cs]\n"
118      "movw %%ds, %[ds]\n"
119      "movw %%ss, %[ss]\n"
120      "movw %%es, %[es]\n"
121      "movw %%fs, %[fs]\n"
122      "movw %%gs, %[gs]\n"
123      : [cs] "=m" (cs), [ds] "=m" (ds), [ss] "=m" (ss), [es] "=m" (es),
124        [fs] "=m" (fs), [gs] "=m" (gs));
125   uc->uc_mcontext.gregs[VKI_CS] = cs;
126   uc->uc_mcontext.gregs[VKI_DS] = ds;
127   uc->uc_mcontext.gregs[VKI_SS] = ss;
128   uc->uc_mcontext.gregs[VKI_ES] = es;
129   uc->uc_mcontext.gregs[VKI_FS] = fs;
130   uc->uc_mcontext.gregs[VKI_GS] = gs;
131}
132
133/* Architecture-specific part of VG_(save_context). */
134void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
135                               CorePart part)
136{
137   ThreadState *tst = VG_(get_ThreadState)(tid);
138   struct vki_fpchip_state *fs
139      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
140   SizeT i;
141
142   /* CPU */
143   /* Common registers */
144   uc->uc_mcontext.gregs[VKI_EIP] = tst->arch.vex.guest_EIP;
145   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EIP,
146            (Addr)&uc->uc_mcontext.gregs[VKI_EIP], sizeof(UWord));
147   uc->uc_mcontext.gregs[VKI_EAX] = tst->arch.vex.guest_EAX;
148   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EAX,
149            (Addr)&uc->uc_mcontext.gregs[VKI_EAX], sizeof(UWord));
150   uc->uc_mcontext.gregs[VKI_EBX] = tst->arch.vex.guest_EBX;
151   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBX,
152            (Addr)&uc->uc_mcontext.gregs[VKI_EBX], sizeof(UWord));
153   uc->uc_mcontext.gregs[VKI_ECX] = tst->arch.vex.guest_ECX;
154   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ECX,
155            (Addr)&uc->uc_mcontext.gregs[VKI_ECX], sizeof(UWord));
156   uc->uc_mcontext.gregs[VKI_EDX] = tst->arch.vex.guest_EDX;
157   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDX,
158            (Addr)&uc->uc_mcontext.gregs[VKI_EDX], sizeof(UWord));
159   uc->uc_mcontext.gregs[VKI_EBP] = tst->arch.vex.guest_EBP;
160   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBP,
161            (Addr)&uc->uc_mcontext.gregs[VKI_EBP], sizeof(UWord));
162   uc->uc_mcontext.gregs[VKI_ESI] = tst->arch.vex.guest_ESI;
163   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESI,
164            (Addr)&uc->uc_mcontext.gregs[VKI_ESI], sizeof(UWord));
165   uc->uc_mcontext.gregs[VKI_EDI] = tst->arch.vex.guest_EDI;
166   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDI,
167            (Addr)&uc->uc_mcontext.gregs[VKI_EDI], sizeof(UWord));
168   uc->uc_mcontext.gregs[VKI_UESP] = tst->arch.vex.guest_ESP;
169   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESP,
170            (Addr)&uc->uc_mcontext.gregs[VKI_UESP], sizeof(UWord));
171   uc->uc_mcontext.gregs[VKI_ESP] = 0;
172   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ESP],
173            sizeof(UWord));
174
175   /* ERR and TRAPNO */
176   uc->uc_mcontext.gregs[VKI_ERR] = 0;
177   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ERR],
178            sizeof(UWord));
179   uc->uc_mcontext.gregs[VKI_TRAPNO] = 0;
180   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_TRAPNO],
181            sizeof(UWord));
182
183   /* Segment registers */
184   /* Note that segment registers are 16b in VEX, but 32b in mcontext.  Thus
185      we tell a tool that the lower 16 bits were copied and that the higher 16
186      bits were set (to zero).  (This assumes a little-endian
187      architecture.) */
188   uc->uc_mcontext.gregs[VKI_CS] = tst->arch.vex.guest_CS;
189   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_CS,
190            (Addr)&uc->uc_mcontext.gregs[VKI_CS], sizeof(UShort));
191   VG_TRACK(post_mem_write, part, tid,
192            (Addr)(&uc->uc_mcontext.gregs[VKI_CS]) + 2, sizeof(UShort));
193   uc->uc_mcontext.gregs[VKI_DS] = tst->arch.vex.guest_DS;
194   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_DS,
195            (Addr)&uc->uc_mcontext.gregs[VKI_DS], sizeof(UShort));
196   VG_TRACK(post_mem_write, part, tid,
197            (Addr)(&uc->uc_mcontext.gregs[VKI_DS]) + 2, sizeof(UShort));
198   uc->uc_mcontext.gregs[VKI_SS] = tst->arch.vex.guest_SS;
199   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_SS,
200            (Addr)&uc->uc_mcontext.gregs[VKI_SS], sizeof(UShort));
201   VG_TRACK(post_mem_write, part, tid,
202            (Addr)(&uc->uc_mcontext.gregs[VKI_SS]) + 2, sizeof(UShort));
203   uc->uc_mcontext.gregs[VKI_ES] = tst->arch.vex.guest_ES;
204   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ES,
205            (Addr)&uc->uc_mcontext.gregs[VKI_ES], sizeof(UShort));
206   VG_TRACK(post_mem_write, part, tid,
207            (Addr)(&uc->uc_mcontext.gregs[VKI_ES]) + 2, sizeof(UShort));
208   uc->uc_mcontext.gregs[VKI_FS] = tst->arch.vex.guest_FS;
209   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_FS,
210            (Addr)&uc->uc_mcontext.gregs[VKI_FS], sizeof(UShort));
211   VG_TRACK(post_mem_write, part, tid,
212            (Addr)(&uc->uc_mcontext.gregs[VKI_FS]) + 2, sizeof(UShort));
213   uc->uc_mcontext.gregs[VKI_GS] = tst->arch.vex.guest_GS;
214   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_GS,
215            (Addr)&uc->uc_mcontext.gregs[VKI_GS], sizeof(UShort));
216   VG_TRACK(post_mem_write, part, tid,
217            (Addr)(&uc->uc_mcontext.gregs[VKI_GS]) + 2, sizeof(UShort));
218
219   /* Handle eflags (optimistically make all flags defined). */
220   uc->uc_mcontext.gregs[VKI_EFL] =
221      LibVEX_GuestX86_get_eflags(&tst->arch.vex);
222   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_EFL],
223         sizeof(UWord));
224   /* The LibVEX_GuestX86_get_eflags() call calculates eflags value from the
225      CC_OP, CC_DEP1, CC_DEP2, CC_NDEP, DFLAG, IDFLAG and ACFLAG guest state
226      values.  The *FLAG values represent one-bit information and are saved
227      without loss of precision into eflags.  However when CC_* values are
228      converted into eflags then precision is lost.  What we do here is to
229      save unmodified CC_* values into unused ucontext members (the 'long
230      uc_filler[5] and 'int fs->__pad[2]' arrays) so we can then restore the
231      context in ML_(restore_machine_context)() without the loss of precision.
232      This imposes a requirement on client programs to not use these two
233      members. Luckily this is never a case in Solaris-gate programs and
234      libraries. */
235   /* CC_OP and CC_NDEP are always defined, but we don't want to tell a tool
236      that we just defined uc_filler[0,1].  This helps if someone uses an
237      uninitialized ucontext and tries to read (use) uc_filler[0,1].  Memcheck
238      in such a case should detect this error. */
239   VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
240   VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
241   /* We want to copy shadow values of CC_DEP1 and CC_DEP2 so we have to tell
242      a tool about this copy. */
243   VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
244   VG_TRACK(copy_reg_to_mem, part, tid,
245            offsetof(VexGuestX86State, guest_CC_DEP1),
246            (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
247   VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
248   VG_TRACK(copy_reg_to_mem, part, tid,
249            offsetof(VexGuestX86State, guest_CC_DEP2),
250            (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
251   /* Make another copy of eflags. */
252   VKI_UC_GUEST_EFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_EFL];
253   /* Calculate a checksum. */
254   {
255      UInt buf[5];
256      UInt checksum;
257
258      buf[0] = VKI_UC_GUEST_CC_OP(uc);
259      buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
260      buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
261      buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
262      buf[4] = uc->uc_mcontext.gregs[VKI_EFL];
263      checksum = ML_(fletcher32)((UShort*)&buf, sizeof(buf) / sizeof(UShort));
264      /* Store the checksum. */
265      VKI_UC_GUEST_EFLAGS_CHECKSUM(uc) = checksum;
266   }
267
268   /* FPU */
269   /* x87 */
270   vg_assert(sizeof(fs->state) == 108);
271   LibVEX_GuestX86_get_x87(&tst->arch.vex, (UChar*)&fs->state);
272
273   /* Flags and control words */
274   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->state, 28);
275   /* ST registers */
276   for (i = 0; i < 8; i++) {
277      Addr addr = (Addr)&fs->state + 28 + i * 10;
278      /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
279         have to lie here. :< */
280      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
281               guest_FPREG[i]), addr, sizeof(ULong));
282      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
283               guest_FPREG[i]), addr + 8, sizeof(UShort));
284      }
285
286   /* Status word (sw) at exception */
287   fs->status = 0;
288   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));
289
290   /* SSE */
291   fs->mxcsr = LibVEX_GuestX86_get_mxcsr(&tst->arch.vex);
292   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
293
294   /* MXCSR at exception */
295   fs->xstatus = 0;
296   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
297            sizeof(fs->xstatus));
298
299   /* XMM registers */
300#define COPY_OUT_XMM(dest, src) \
301   do {                         \
302      dest._l[0] = src[0];      \
303      dest._l[1] = src[1];      \
304      dest._l[2] = src[2];      \
305      dest._l[3] = src[3];      \
306   } while (0)
307   COPY_OUT_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
308   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
309            guest_XMM0), (Addr)&fs->xmm[0], sizeof(U128));
310   COPY_OUT_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
311   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
312            guest_XMM1), (Addr)&fs->xmm[1], sizeof(U128));
313   COPY_OUT_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
314   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
315            guest_XMM2), (Addr)&fs->xmm[2], sizeof(U128));
316   COPY_OUT_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
317   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
318            guest_XMM3), (Addr)&fs->xmm[3], sizeof(U128));
319   COPY_OUT_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
320   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
321            guest_XMM4), (Addr)&fs->xmm[4], sizeof(U128));
322   COPY_OUT_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
323   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
324            guest_XMM5), (Addr)&fs->xmm[5], sizeof(U128));
325   COPY_OUT_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
326   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
327            guest_XMM6), (Addr)&fs->xmm[6], sizeof(U128));
328   COPY_OUT_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
329   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
330            guest_XMM7), (Addr)&fs->xmm[7], sizeof(U128));
331#undef COPY_OUT_XMM
332}
333
334/* Architecture-specific part of VG_(restore_context). */
335void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
336                                  CorePart part, Bool esp_is_thrptr)
337{
338   ThreadState *tst = VG_(get_ThreadState)(tid);
339   struct vki_fpchip_state *fs
340      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
341
342   /* CPU */
343   if (uc->uc_flags & VKI_UC_CPU) {
344      /* Common registers */
345      tst->arch.vex.guest_EIP = uc->uc_mcontext.gregs[VKI_EIP];
346      VG_TRACK(copy_mem_to_reg, part, tid,
347               (Addr)&uc->uc_mcontext.gregs[VKI_EIP], OFFSET_x86_EIP,
348               sizeof(UWord));
349      tst->arch.vex.guest_EAX = uc->uc_mcontext.gregs[VKI_EAX];
350      VG_TRACK(copy_mem_to_reg, part, tid,
351               (Addr)&uc->uc_mcontext.gregs[VKI_EAX], OFFSET_x86_EAX,
352               sizeof(UWord));
353      tst->arch.vex.guest_EBX = uc->uc_mcontext.gregs[VKI_EBX];
354      VG_TRACK(copy_mem_to_reg, part, tid,
355               (Addr)&uc->uc_mcontext.gregs[VKI_EBX], OFFSET_x86_EBX,
356               sizeof(UWord));
357      tst->arch.vex.guest_ECX = uc->uc_mcontext.gregs[VKI_ECX];
358      VG_TRACK(copy_mem_to_reg, part, tid,
359               (Addr)&uc->uc_mcontext.gregs[VKI_ECX], OFFSET_x86_ECX,
360               sizeof(UWord));
361      tst->arch.vex.guest_EDX = uc->uc_mcontext.gregs[VKI_EDX];
362      VG_TRACK(copy_mem_to_reg, part, tid,
363               (Addr)&uc->uc_mcontext.gregs[VKI_EDX], OFFSET_x86_EDX,
364               sizeof(UWord));
365      tst->arch.vex.guest_EBP = uc->uc_mcontext.gregs[VKI_EBP];
366      VG_TRACK(copy_mem_to_reg, part, tid,
367               (Addr)&uc->uc_mcontext.gregs[VKI_EBP], OFFSET_x86_EBP,
368               sizeof(UWord));
369      tst->arch.vex.guest_ESI = uc->uc_mcontext.gregs[VKI_ESI];
370      VG_TRACK(copy_mem_to_reg, part, tid,
371               (Addr)&uc->uc_mcontext.gregs[VKI_ESI], OFFSET_x86_ESI,
372               sizeof(UWord));
373      tst->arch.vex.guest_EDI = uc->uc_mcontext.gregs[VKI_EDI];
374      VG_TRACK(copy_mem_to_reg, part, tid,
375               (Addr)&uc->uc_mcontext.gregs[VKI_EDI], OFFSET_x86_EDI,
376               sizeof(UWord));
377      tst->arch.vex.guest_ESP = uc->uc_mcontext.gregs[VKI_UESP];
378      VG_TRACK(copy_mem_to_reg, part, tid,
379               (Addr)&uc->uc_mcontext.gregs[VKI_UESP], OFFSET_x86_ESP,
380               sizeof(UWord));
381
382      if (esp_is_thrptr) {
383         /* The thrptr value is passed by libc to the kernel in the otherwise
384            unused ESP field.  This is used when a new thread is created. */
385         VG_TRACK(pre_mem_read, part, tid,
386                  "restore_machine_context(uc->uc_mcontext.gregs[VKI_ESP])",
387                  (Addr)&uc->uc_mcontext.gregs[VKI_ESP], sizeof(UWord));
388         if (uc->uc_mcontext.gregs[VKI_ESP]) {
389            tst->os_state.thrptr = uc->uc_mcontext.gregs[VKI_ESP];
390            ML_(update_gdt_lwpgs)(tid);
391         }
392      }
393
394      /* Ignore ERR and TRAPNO. */
395
396      /* Segment registers */
397      tst->arch.vex.guest_CS = uc->uc_mcontext.gregs[VKI_CS];
398      VG_TRACK(copy_mem_to_reg, part, tid,
399               (Addr)&uc->uc_mcontext.gregs[VKI_CS], OFFSET_x86_CS,
400               sizeof(UShort));
401      tst->arch.vex.guest_DS = uc->uc_mcontext.gregs[VKI_DS];
402      VG_TRACK(copy_mem_to_reg, part, tid,
403               (Addr)&uc->uc_mcontext.gregs[VKI_DS], OFFSET_x86_DS,
404               sizeof(UShort));
405      tst->arch.vex.guest_SS = uc->uc_mcontext.gregs[VKI_SS];
406      VG_TRACK(copy_mem_to_reg, part, tid,
407               (Addr)&uc->uc_mcontext.gregs[VKI_SS], OFFSET_x86_SS,
408               sizeof(UShort));
409      tst->arch.vex.guest_ES = uc->uc_mcontext.gregs[VKI_ES];
410      VG_TRACK(copy_mem_to_reg, part, tid,
411               (Addr)&uc->uc_mcontext.gregs[VKI_ES], OFFSET_x86_ES,
412               sizeof(UShort));
413      tst->arch.vex.guest_FS = uc->uc_mcontext.gregs[VKI_FS];
414      VG_TRACK(copy_mem_to_reg, part, tid,
415               (Addr)&uc->uc_mcontext.gregs[VKI_FS], OFFSET_x86_FS,
416               sizeof(UShort));
417      tst->arch.vex.guest_GS = uc->uc_mcontext.gregs[VKI_GS];
418      VG_TRACK(copy_mem_to_reg, part, tid,
419               (Addr)&uc->uc_mcontext.gregs[VKI_GS], OFFSET_x86_GS,
420               sizeof(UShort));
421
422      /* Eflags */
423      {
424         UInt eflags;
425         UInt orig_eflags;
426         UInt new_eflags;
427         Bool ok_restore = False;
428
429         VG_TRACK(pre_mem_read, part, tid,
430                  "restore_machine_context(uc->uc_mcontext.gregs[VKI_EFL])",
431                  (Addr)&uc->uc_mcontext.gregs[VKI_EFL], sizeof(UWord));
432         eflags = uc->uc_mcontext.gregs[VKI_EFL];
433         orig_eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex);
434         new_eflags = eflags;
435         /* The kernel disallows the ID flag to be changed via the setcontext
436            call, thus do the same. */
437         if (orig_eflags & VKI_EFLAGS_ID_BIT)
438            new_eflags |= VKI_EFLAGS_ID_BIT;
439         else
440            new_eflags &= ~VKI_EFLAGS_ID_BIT;
441         LibVEX_GuestX86_put_eflags(new_eflags, &tst->arch.vex);
442         VG_TRACK(post_reg_write, part, tid,
443                  offsetof(VexGuestX86State, guest_CC_DEP1), sizeof(UWord));
444         VG_TRACK(post_reg_write, part, tid,
445                  offsetof(VexGuestX86State, guest_CC_DEP2), sizeof(UWord));
446
447         /* Check if this context was created by us in VG_(save_context). In
448            that case, try to restore the CC_OP, CC_DEP1, CC_DEP2 and CC_NDEP
449            values which we previously stashed into unused members of the
450            context. */
451         if (eflags != ~VKI_UC_GUEST_EFLAGS_NEG(uc)) {
452            VG_(debugLog)(1, "syswrap-solaris",
453                             "The eflags value was restored from an "
454                             "explicitly set value in thread %u.\n", tid);
455            ok_restore = True;
456         }
457         else {
458            UInt buf[5];
459            UInt checksum;
460
461            buf[0] = VKI_UC_GUEST_CC_OP(uc);
462            buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
463            buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
464            buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
465            buf[4] = eflags;
466            checksum = ML_(fletcher32)((UShort*)&buf,
467                                       sizeof(buf) / sizeof(UShort));
468            if (checksum == VKI_UC_GUEST_EFLAGS_CHECKSUM(uc)) {
469               /* Check ok, the full restoration is possible. */
470               VG_(debugLog)(1, "syswrap-solaris",
471                                "The CC_* guest state values were fully "
472                                "restored in thread %u.\n", tid);
473               ok_restore = True;
474
475               tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
476               tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
477               tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
478               VG_TRACK(copy_mem_to_reg, part, tid,
479                        (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
480                        offsetof(VexGuestX86State, guest_CC_DEP1),
481                        sizeof(UWord));
482               tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
483               VG_TRACK(copy_mem_to_reg, part, tid,
484                        (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
485                        offsetof(VexGuestX86State, guest_CC_DEP2),
486                        sizeof(UWord));
487            }
488         }
489
490         if (!ok_restore)
491            VG_(debugLog)(1, "syswrap-solaris",
492                             "Cannot fully restore the CC_* guest state "
493                             "values, using approximate eflags in thread "
494                             "%u.\n", tid);
495      }
496   }
497
498   if (uc->uc_flags & VKI_UC_FPU) {
499      /* FPU */
500      VexEmNote note;
501      SizeT i;
502
503      /* x87 */
504      /* Flags and control words */
505      VG_TRACK(pre_mem_read, part, tid,
506               "restore_machine_context(uc->uc_mcontext.fpregs..x87_state)",
507               (Addr)&fs->state, 28);
508      /* ST registers */
509      for (i = 0; i < 8; i++) {
510         Addr addr = (Addr)&fs->state + 28 + i * 10;
511         VG_TRACK(copy_mem_to_reg, part, tid, addr,
512                  offsetof(VexGuestX86State, guest_FPREG[i]), sizeof(ULong));
513      }
514      note = LibVEX_GuestX86_put_x87((UChar*)&fs->state, &tst->arch.vex);
515      if (note != EmNote_NONE)
516         VG_(message)(Vg_UserMsg,
517                      "Error restoring x87 state in thread %u: %s.\n",
518                      tid, LibVEX_EmNote_string(note));
519
520      /* SSE */
521      VG_TRACK(pre_mem_read, part, tid,
522               "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
523               (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
524      note = LibVEX_GuestX86_put_mxcsr(fs->mxcsr, &tst->arch.vex);
525      if (note != EmNote_NONE)
526         VG_(message)(Vg_UserMsg,
527                      "Error restoring mxcsr state in thread %u: %s.\n",
528                      tid, LibVEX_EmNote_string(note));
529      /* XMM registers */
530#define COPY_IN_XMM(src, dest) \
531      do {                     \
532         dest[0] = src._l[0];  \
533         dest[1] = src._l[1];  \
534         dest[2] = src._l[2];  \
535         dest[3] = src._l[3];  \
536      } while (0)
537      COPY_IN_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
538      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
539               offsetof(VexGuestX86State, guest_XMM0), sizeof(U128));
540      COPY_IN_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
541      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
542               offsetof(VexGuestX86State, guest_XMM1), sizeof(U128));
543      COPY_IN_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
544      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
545               offsetof(VexGuestX86State, guest_XMM2), sizeof(U128));
546      COPY_IN_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
547      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
548               offsetof(VexGuestX86State, guest_XMM3), sizeof(U128));
549      COPY_IN_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
550      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
551               offsetof(VexGuestX86State, guest_XMM4), sizeof(U128));
552      COPY_IN_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
553      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
554               offsetof(VexGuestX86State, guest_XMM5), sizeof(U128));
555      COPY_IN_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
556      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
557               offsetof(VexGuestX86State, guest_XMM6), sizeof(U128));
558      COPY_IN_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
559      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
560               offsetof(VexGuestX86State, guest_XMM7), sizeof(U128));
561#undef COPY_IN_XMM
562   }
563}
564
565/* Allocate GDT for a given thread. */
566void ML_(setup_gdt)(VexGuestX86State *vex)
567{
568   Addr gdt = (Addr)VG_(calloc)("syswrap-solaris-x86.gdt",
569                                VEX_GUEST_X86_GDT_NENT,
570                                sizeof(VexGuestX86SegDescr));
571   vex->guest_GDT = gdt;
572}
573
574/* Deallocate GDT for a given thread. */
575void ML_(cleanup_gdt)(VexGuestX86State *vex)
576{
577   if (!vex->guest_GDT)
578      return;
579   VG_(free)((void*)vex->guest_GDT);
580   vex->guest_GDT = 0;
581}
582
583/* For a given thread, update the LWPGS descriptor in the thread's GDT
584   according to the thread pointer. */
585void ML_(update_gdt_lwpgs)(ThreadId tid)
586{
587   ThreadState *tst = VG_(get_ThreadState)(tid);
588   Addr base = tst->os_state.thrptr;
589   VexGuestX86SegDescr *gdt = (VexGuestX86SegDescr*)tst->arch.vex.guest_GDT;
590   VexGuestX86SegDescr desc;
591
592   vg_assert(gdt);
593
594   VG_(memset)(&desc, 0, sizeof(desc));
595   if (base) {
596      desc.LdtEnt.Bits.LimitLow = -1;
597      desc.LdtEnt.Bits.LimitHi = -1;
598      desc.LdtEnt.Bits.BaseLow = base & 0xffff;
599      desc.LdtEnt.Bits.BaseMid = (base >> 16) & 0xff;
600      desc.LdtEnt.Bits.BaseHi = (base >> 24) & 0xff;
601      desc.LdtEnt.Bits.Pres = 1;
602      desc.LdtEnt.Bits.Dpl = 3; /* SEL_UPL */
603      desc.LdtEnt.Bits.Type = 19; /* SDT_MEMRWA */
604      desc.LdtEnt.Bits.Granularity = 1; /* SDP_PAGES */
605      desc.LdtEnt.Bits.Default_Big = 1; /* SDP_OP32 */
606   }
607
608   gdt[VKI_GDT_LWPGS] = desc;
609
610   /* Write %gs. */
611   tst->arch.vex.guest_GS = VKI_LWPGS_SEL;
612   VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_GS,
613            sizeof(UShort));
614}
615
616
617/* ---------------------------------------------------------------------
618   PRE/POST wrappers for x86/Solaris-specific syscalls
619   ------------------------------------------------------------------ */
620
621#define PRE(name)       DEFN_PRE_TEMPLATE(x86_solaris, name)
622#define POST(name)      DEFN_POST_TEMPLATE(x86_solaris, name)
623
624/* implementation */
625
626PRE(sys_fstatat64)
627{
628   /* int fstatat64(int fildes, const char *path, struct stat64 *buf,
629                    int flag); */
630   PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %ld )", SARG1, ARG2,
631         (HChar*)ARG2, ARG3, SARG4);
632   PRE_REG_READ4(long, "fstatat64", int, fildes, const char *, path,
633                 struct stat64 *, buf, int, flag);
634   if (ARG2)
635      PRE_MEM_RASCIIZ("fstatat64(path)", ARG2);
636   PRE_MEM_WRITE("fstatat64(buf)", ARG3, sizeof(struct vki_stat64));
637
638   /* Be strict. */
639   if (ARG1 != VKI_AT_FDCWD &&
640       !ML_(fd_allowed)(ARG1, "fstatat64", tid, False))
641      SET_STATUS_Failure(VKI_EBADF);
642}
643
644POST(sys_fstatat64)
645{
646   POST_MEM_WRITE(ARG3, sizeof(struct vki_stat64));
647}
648
649PRE(sys_openat64)
650{
651   /* int openat64(int fildes, const char *filename, int flags);
652      int openat64(int fildes, const char *filename, int flags, mode_t mode);
653    */
654   *flags |= SfMayBlock;
655
656   if (ARG3 & VKI_O_CREAT) {
657      /* 4-arg version */
658      PRINT("sys_openat64 ( %ld, %#lx(%s), %ld, %ld )", SARG1, ARG2,
659            (HChar*)ARG2, SARG3, SARG4);
660      PRE_REG_READ4(long, "openat64", int, fildes, const char *, filename,
661                    int, flags, vki_mode_t, mode);
662   }
663   else {
664      /* 3-arg version */
665      PRINT("sys_openat64 ( %ld, %#lx(%s), %ld )", SARG1, ARG2, (HChar*)ARG2,
666            SARG3);
667      PRE_REG_READ3(long, "openat64", int, fildes, const char *, filename,
668                    int, flags);
669   }
670
671   PRE_MEM_RASCIIZ("openat64(filename)", ARG2);
672
673   /* Be strict. */
674   if (ARG1 != VKI_AT_FDCWD && !ML_(fd_allowed)(ARG1, "openat64", tid, False))
675      SET_STATUS_Failure(VKI_EBADF);
676}
677
678POST(sys_openat64)
679{
680   if (!ML_(fd_allowed)(RES, "openat64", tid, True)) {
681      VG_(close)(RES);
682      SET_STATUS_Failure(VKI_EMFILE);
683   }
684   else if (VG_(clo_track_fds))
685      ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
686}
687
688PRE(sys_llseek32)
689{
690   /* offset_t llseek(int fildes, offset_t offset, int whence); */
691   PRINT("sys_llseek32 ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
692   PRE_REG_READ4(long, "llseek", int, fildes, vki_u32, offset_low,
693                 vki_u32, offset_high, int, whence);
694
695   /* Stay sane. */
696   if (!ML_(fd_allowed)(ARG1, "llseek", tid, False))
697      SET_STATUS_Failure(VKI_EBADF);
698}
699
700PRE(sys_mmap64)
701{
702   /* void *mmap64(void *addr, size_t len, int prot, int flags,
703                   int fildes, uint32_t offlo, uint32_t offhi); */
704   /* Note this wrapper assumes a little-endian architecture, offlo and offhi
705      have to be swapped if a big-endian architecture is present. */
706#if !defined(VG_LITTLEENDIAN)
707#error "Unexpected endianness."
708#endif /* !VG_LITTLEENDIAN */
709
710   SysRes r;
711   ULong u;
712   Off64T offset;
713
714   /* Stay sane. */
715   vg_assert(VKI_PAGE_SIZE == 4096);
716   vg_assert(sizeof(u) == sizeof(offset));
717
718   PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx, %#lx )",
719         ARG1, ARG2, ARG3, ARG4, SARG5, ARG6, ARG7);
720   PRE_REG_READ7(long, "mmap", void *, start, vki_size_t, length,
721                 int, prot, int, flags, int, fd, uint32_t, offlo,
722                 uint32_t, offhi);
723
724   /* The offlo and offhi values can actually represent a negative value.
725      Make sure it's passed correctly to the generic mmap wrapper. */
726   u = ((ULong)ARG7 << 32) + ARG6;
727   offset = *(Off64T*)&u;
728
729   r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
730   SET_STATUS_from_SysRes(r);
731}
732
733PRE(sys_stat64)
734{
735   /* int stat64(const char *path, struct stat64 *buf); */
736   PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
737   PRE_REG_READ2(long, "stat64", const char *, path, struct stat64 *, buf);
738
739   PRE_MEM_RASCIIZ("stat64(path)", ARG1);
740   PRE_MEM_WRITE("stat64(buf)", ARG2, sizeof(struct vki_stat64));
741}
742
743POST(sys_stat64)
744{
745   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
746}
747
748PRE(sys_lstat64)
749{
750   /* int lstat64(const char *path, struct stat64 *buf); */
751   PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
752   PRE_REG_READ2(long, "lstat64", const char *, path, struct stat64 *, buf);
753
754   PRE_MEM_RASCIIZ("lstat64(path)", ARG1);
755   PRE_MEM_WRITE("lstat64(buf)", ARG2, sizeof(struct vki_stat64));
756}
757
758POST(sys_lstat64)
759{
760   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
761}
762
763PRE(sys_fstat64)
764{
765   /* int fstat64(int fildes, struct stat64 *buf); */
766   PRINT("sys_fstat64 ( %ld, %#lx )", SARG1, ARG2);
767   PRE_REG_READ2(long, "fstat64", int, fildes, struct stat64 *, buf);
768   PRE_MEM_WRITE("fstat64(buf)", ARG2, sizeof(struct vki_stat64));
769
770   /* Be strict. */
771   if (!ML_(fd_allowed)(ARG1, "fstat64", tid, False))
772      SET_STATUS_Failure(VKI_EBADF);
773}
774
775POST(sys_fstat64)
776{
777   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
778}
779
780static void do_statvfs64_post(struct vki_statvfs64 *stats, ThreadId tid)
781{
782   POST_FIELD_WRITE(stats->f_bsize);
783   POST_FIELD_WRITE(stats->f_frsize);
784   POST_FIELD_WRITE(stats->f_blocks);
785   POST_FIELD_WRITE(stats->f_bfree);
786   POST_FIELD_WRITE(stats->f_bavail);
787   POST_FIELD_WRITE(stats->f_files);
788   POST_FIELD_WRITE(stats->f_ffree);
789   POST_FIELD_WRITE(stats->f_favail);
790   POST_FIELD_WRITE(stats->f_fsid);
791   POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
792   POST_FIELD_WRITE(stats->f_flag);
793   POST_FIELD_WRITE(stats->f_namemax);
794   POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
795}
796
797PRE(sys_statvfs64)
798{
799   /* int statvfs64(const char *path, struct statvfs64 *buf); */
800   *flags |= SfMayBlock;
801   PRINT("sys_statvfs64 ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
802   PRE_REG_READ2(long, "statvfs64", const char *, path,
803                 struct vki_statvfs64 *, buf);
804   PRE_MEM_RASCIIZ("statvfs64(path)", ARG1);
805   PRE_MEM_WRITE("statvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
806}
807
808POST(sys_statvfs64)
809{
810   do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
811}
812
813PRE(sys_fstatvfs64)
814{
815   /* int fstatvfs64(int fd, struct statvfs64 *buf); */
816   *flags |= SfMayBlock;
817   PRINT("sys_fstatvfs64 ( %ld, %#lx )", SARG1, ARG2);
818   PRE_REG_READ2(long, "fstatvfs64", int, fd, struct vki_statvfs64 *, buf);
819   PRE_MEM_WRITE("fstatvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
820
821   /* Be strict. */
822   if (!ML_(fd_allowed)(ARG1, "fstatvfs64", tid, False))
823      SET_STATUS_Failure(VKI_EBADF);
824}
825
826POST(sys_fstatvfs64)
827{
828   do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
829}
830
831PRE(sys_setrlimit64)
832{
833   /* int setrlimit64(int resource, struct rlimit64 *rlim); */
834   struct vki_rlimit64 *limit = (struct vki_rlimit64 *)ARG2;
835   PRINT("sys_setrlimit64 ( %ld, %#lx )", SARG1, ARG2);
836   PRE_REG_READ2(long, "setrlimit64", int, resource, struct rlimit64 *, rlim);
837   PRE_MEM_READ("setrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
838
839   if (limit && limit->rlim_cur > limit->rlim_max)
840      SET_STATUS_Failure(VKI_EINVAL);
841   else if (ARG1 == VKI_RLIMIT_NOFILE) {
842      if (limit->rlim_cur > VG_(fd_hard_limit) ||
843          limit->rlim_max != VG_(fd_hard_limit)) {
844         SET_STATUS_Failure(VKI_EPERM);
845      }
846      else {
847         VG_(fd_soft_limit) = limit->rlim_cur;
848         SET_STATUS_Success(0);
849      }
850   }
851   else if (ARG1 == VKI_RLIMIT_DATA) {
852      if (limit->rlim_cur > VG_(client_rlimit_data).rlim_max ||
853          limit->rlim_max > VG_(client_rlimit_data).rlim_max) {
854         SET_STATUS_Failure(VKI_EPERM);
855      }
856      else {
857         VG_(client_rlimit_data).rlim_max = limit->rlim_max;
858         VG_(client_rlimit_data).rlim_cur = limit->rlim_cur;
859         SET_STATUS_Success(0);
860      }
861   }
862   else if (ARG1 == VKI_RLIMIT_STACK && tid == 1) {
863      if (limit->rlim_cur > VG_(client_rlimit_stack).rlim_max ||
864          limit->rlim_max > VG_(client_rlimit_stack).rlim_max) {
865         SET_STATUS_Failure(VKI_EPERM);
866      }
867      else {
868         /* Change the value of client_stack_szB to the rlim_cur value but
869            only if it is smaller than the size of the allocated stack for the
870            client. */
871         if (limit->rlim_cur <= VG_(clstk_max_size))
872            VG_(threads)[tid].client_stack_szB = limit->rlim_cur;
873
874         VG_(client_rlimit_stack).rlim_max = limit->rlim_max;
875         VG_(client_rlimit_stack).rlim_cur = limit->rlim_cur;
876         SET_STATUS_Success(0);
877      }
878   }
879}
880
881PRE(sys_getrlimit64)
882{
883   /* int getrlimit64(int resource, struct rlimit64 *rlim); */
884   PRINT("sys_getrlimit64 ( %ld, %#lx )", SARG1, ARG2);
885   PRE_REG_READ2(long, "getrlimit64",
886                 int, resource, struct rlimit64 *, rlim);
887   PRE_MEM_WRITE("getrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
888}
889
890POST(sys_getrlimit64)
891{
892   /* Based on common_post_getrlimit() from syswrap-generic.c. */
893   struct vki_rlimit64 *rlim = (struct vki_rlimit64*)ARG2;
894
895   POST_MEM_WRITE(ARG2, sizeof(struct vki_rlimit64));
896
897   switch (ARG1 /*resource*/) {
898   case VKI_RLIMIT_NOFILE:
899      rlim->rlim_cur = VG_(fd_soft_limit);
900      rlim->rlim_max = VG_(fd_hard_limit);
901      break;
902   case VKI_RLIMIT_DATA:
903      rlim->rlim_cur = VG_(client_rlimit_data).rlim_cur;
904      rlim->rlim_max = VG_(client_rlimit_data).rlim_max;
905      break;
906   case VKI_RLIMIT_STACK:
907      rlim->rlim_cur = VG_(client_rlimit_stack).rlim_cur;
908      rlim->rlim_max = VG_(client_rlimit_stack).rlim_max;
909      break;
910   }
911}
912
913PRE(sys_pread64)
914{
915   /* ssize32_t pread64(int fd, void *buf, size32_t count,
916                        uint32_t offset_1, uint32_t offset_2);
917    */
918   *flags |= SfMayBlock;
919   PRINT("sys_pread64 ( %ld, %#lx, %lu, %#lx, %#lx )",
920         SARG1, ARG2, ARG3, ARG4, ARG5);
921   PRE_REG_READ5(long, "pread64", int, fd, void *, buf, vki_size32_t, count,
922                 vki_uint32_t, offset_1, vki_uint32_t, offset_2);
923   PRE_MEM_WRITE("pread64(buf)", ARG2, ARG3);
924
925   /* Be strict. */
926   if (!ML_(fd_allowed)(ARG1, "pread64", tid, False))
927      SET_STATUS_Failure(VKI_EBADF);
928}
929
930POST(sys_pread64)
931{
932   POST_MEM_WRITE(ARG2, RES);
933}
934
935PRE(sys_pwrite64)
936{
937   /* ssize32_t pwrite64(int fd, void *buf, size32_t count,
938                         uint32_t offset_1, uint32_t offset_2);
939    */
940   *flags |= SfMayBlock;
941   PRINT("sys_pwrite64 ( %ld, %#lx, %lu, %#lx, %#lx )",
942         SARG1, ARG2, ARG3, ARG4, ARG5);
943   PRE_REG_READ5(long, "pwrite64", int, fd, void *, buf, vki_size32_t, count,
944                 vki_uint32_t, offset_1, vki_uint32_t, offset_2);
945   PRE_MEM_READ("pwrite64(buf)", ARG2, ARG3);
946
947   /* Be strict. */
948   if (!ML_(fd_allowed)(ARG1, "pwrite64", tid, False))
949      SET_STATUS_Failure(VKI_EBADF);
950}
951
952PRE(sys_open64)
953{
954   /* int open64(const char *filename, int flags);
955      int open64(const char *filename, int flags, mode_t mode); */
956   *flags |= SfMayBlock;
957
958   if (ARG2 & VKI_O_CREAT) {
959      /* 3-arg version */
960      PRINT("sys_open64 ( %#lx(%s), %#lx, %ld )", ARG1, (HChar*)ARG1, ARG2,
961            SARG3);
962      PRE_REG_READ3(long, "open64", const char *, filename, int, flags,
963                    vki_mode_t, mode);
964   }
965   else {
966      /* 2-arg version */
967      PRINT("sys_open64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
968      PRE_REG_READ2(long, "open64", const char *, filename, int, flags);
969   }
970   PRE_MEM_RASCIIZ("open(filename)", ARG1);
971}
972
973POST(sys_open64)
974{
975   if (!ML_(fd_allowed)(RES, "open64", tid, True)) {
976      VG_(close)(RES);
977      SET_STATUS_Failure(VKI_EMFILE);
978   }
979   else if (VG_(clo_track_fds))
980      ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1);
981}
982
983#undef PRE
984#undef POST
985
986#endif // defined(VGP_x86_solaris)
987
988/*--------------------------------------------------------------------*/
989/*--- end                                                          ---*/
990/*--------------------------------------------------------------------*/
991