1
2/*--------------------------------------------------------------------*/
3/*--- Support for doing system calls.       syscall-amd64-darwin.S ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7  This file is part of Valgrind, a dynamic binary instrumentation
8  framework.
9
10  Copyright (C) 2000-2013 Julian Seward
11     jseward@acm.org
12
13  This program is free software; you can redistribute it and/or
14  modify it under the terms of the GNU General Public License as
15  published by the Free Software Foundation; either version 2 of the
16  License, or (at your option) any later version.
17
18  This program is distributed in the hope that it will be useful, but
19  WITHOUT ANY WARRANTY; without even the implied warranty of
20  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  General Public License for more details.
22
23  You should have received a copy of the GNU General Public License
24  along with this program; if not, write to the Free Software
25  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26  02111-1307, USA.
27
28  The GNU General Public License is contained in the file COPYING.
29*/
30
31#if defined(VGP_amd64_darwin)
32
33#include "pub_core_basics_asm.h"
34#include "pub_core_vkiscnums_asm.h"
35#include "libvex_guest_offsets.h"
36
37
38/*----------------------------------------------------------------*/
39/*
40	Perform a syscall for the client.  This will run a syscall
41	with the client's specific per-thread signal mask.
42
43	The structure of this function is such that, if the syscall is
44	interrupted by a signal, we can determine exactly what
45	execution state we were in with respect to the execution of
46	the syscall by examining the value of %eip in the signal
47	handler.  This means that we can always do the appropriate
48	thing to precisely emulate the kernel's signal/syscall
49	interactions.
50
51	The syscall number is taken from the argument, even though it
52	should also be in guest_state->guest_RAX.  The syscall result
53	is written back to guest_state->guest_RAX on completion.
54
55	Returns 0 if the syscall was successfully called (even if the
56	syscall itself failed), or a -ve error code if one of the
57	sigprocmasks failed (there's no way to determine which one
58	failed).
59
60	VG_(fixup_guest_state_after_syscall_interrupted) does the
61	thread state fixup in the case where we were interrupted by a
62	signal.
63
64	Prototype:
65
66	Int ML_(do_syscall_for_client_WRK(
67	                          Int syscallno,		// rdi
68				  void* guest_state,		// rsi
69				  const vki_sigset_t *sysmask,	// rdx
70				  const vki_sigset_t *postmask,	// rcx
71				  Int sigsetSzB)		// r8
72
73        Note that sigsetSzB is totally ignored (and irrelevant).
74*/
75
76/* from vki_arch.h */
77#define VKI_SIG_SETMASK	3
78
79/* DO_SYSCALL MACH|MDEP|UNIX */
80#define MACH 1
81#define MDEP 2
82#define UNIX 3
83
84.macro DO_SYSCALL
85	/* save callee-saved regs */
86	pushq	%rbp
87	movq	%rsp, %rbp
88	// stack is now aligned
89	pushq	%rdi  // -8(%rbp)   syscallno
90	pushq	%rsi  // -16(%rbp)  guest_state
91	pushq	%rdx  // -24(%rbp)  sysmask
92	pushq	%rcx  // -32(%rbp)  postmask
93	pushq	%r8   // -40(%rbp)  sigsetSzB
94	// stack is now aligned
95
96L_$0_1:	/* Even though we can't take a signal until the sigprocmask completes,
97	   start the range early.
98	   If rip is in the range [1,2), the syscall hasn't been started yet */
99
100	/* Set the signal mask which should be current during the syscall. */
101	/* GrP fixme signals
102           DDD: JRS fixme: use __NR___pthread_sigmask, not __NR_rt_sigprocmask
103	movq	$__NR_rt_sigprocmask, %rax	// syscall #
104	movq	$VKI_SIG_SETMASK, %rdi		// how
105	movq	-24(%rbp), %rsi			// sysmask
106	movq	-32(%rbp), %rdx			// postmask
107	movq	-40(%rbp), %r10			// sigsetSzB in r10 not rcx
108	DDD: fixme return address
109	syscall
110
111	jnc	7f	// sigprocmask failed
112	*/
113
114	/* OK, that worked.  Now do the syscall proper. */
115
116	/* 6 register parameters */
117	movq	-16(%rbp), %r11	/* r11 = VexGuestAMD64State * */
118	movq	OFFSET_amd64_RDI(%r11), %rdi
119	movq	OFFSET_amd64_RSI(%r11), %rsi
120	movq	OFFSET_amd64_RDX(%r11), %rdx
121	movq	OFFSET_amd64_RCX(%r11), %r10 /* rcx is passed in r10 instead */
122	movq	OFFSET_amd64_R8(%r11), %r8
123	movq	OFFSET_amd64_R9(%r11), %r9
124	/* 2 stack parameters plus return address (ignored by syscall) */
125	movq	OFFSET_amd64_RSP(%r11), %r11 /* r11 = simulated RSP */
126	movq	16(%r11), %rax
127	pushq	%rax
128	movq	8(%r11), %rax
129	pushq	%rax
130	/* stack is currently aligned - return address misaligns */
131	movq	0(%r11), %rax
132	pushq	%rax
133	/* syscallno */
134	movq	-8(%rbp), %rax
135
136	/* If rip==2, then the syscall was either just about
137	   to start, or was interrupted and the kernel was
138	   restarting it. */
139L_$0_2:	syscall
140L_$0_3:	/* In the range [3, 4), the syscall result is in %rax,
141	   but hasn't been committed to RAX. */
142
143	/* stack contents: 3 words for syscall above, plus our prologue */
144	setc	0(%rsp) 	/* stash returned carry flag */
145
146	movq	-16(%rbp), %r11	/* r11 = VexGuestAMD64State * */
147	movq	%rax, OFFSET_amd64_RAX(%r11)	/* save back to RAX */
148	movq	%rdx, OFFSET_amd64_RDX(%r11)	/* save back to RDX */
149
150.if $0 == UNIX
151	/* save carry flag to VEX */
152	xor	%rax, %rax
153	movb	0(%rsp), %al
154	movq	%rax, %rdi	/* arg1 = new flag */
155	movq	%r11, %rsi	/* arg2 = vex state */
156	addq	$$24, %rsp	/* remove syscall parameters */
157	call	_LibVEX_GuestAMD64_put_rflag_c
158.else
159	addq	$$24, %rsp	/* remove syscall parameters*/
160.endif
161
162L_$0_4:	/* Re-block signals.  If eip is in [4,5), then the syscall
163	   is complete and we needn't worry about it. */
164	/* GrP fixme signals
165           DDD: JRS fixme: use __NR___pthread_sigmask, not __NR_rt_sigprocmask
166	PUSH_di_si_dx_cx_8
167
168	movq	$__NR_rt_sigprocmask, %rax	// syscall #
169	movq	$VKI_SIG_SETMASK, %rdi		// how
170	movq	%rcx, %rsi			// postmask
171	xorq	%rdx, %rdx			// NULL
172	movq	%r8, %r10			// sigsetSzB
173	DDD: fixme return address
174	syscall
175
176	POP_di_si_dx_cx_8
177
178	jnc	7f	// sigprocmask failed
179	*/
180L_$0_5:	/* now safe from signals */
181	movq	$$0, %rax	/* SUCCESS */
182	movq	%rbp, %rsp
183	popq	%rbp
184	ret
185
186/* GrP fixme signals
187L_$0_7:	// failure:	 return 0x8000 | error code
188	DDD: fixme return value
189	movq	%rbp, %rsp
190	popq	%rbp
191	ret
192*/
193
194.endmacro
195
196
197.globl ML_(do_syscall_for_client_unix_WRK)
198ML_(do_syscall_for_client_unix_WRK):
199	DO_SYSCALL UNIX
200
201.globl ML_(do_syscall_for_client_mach_WRK)
202ML_(do_syscall_for_client_mach_WRK):
203	DO_SYSCALL MACH
204
205.globl ML_(do_syscall_for_client_mdep_WRK)
206ML_(do_syscall_for_client_mdep_WRK):
207	DO_SYSCALL MDEP
208
209.data
210/* export the ranges so that
211   VG_(fixup_guest_state_after_syscall_interrupted) can do the
212   right thing */
213
214/* eg MK_L_SCLASS_N(UNIX,99) produces L_3_99
215   since UNIX is #defined to 3 at the top of this file */
216#define FOO(scclass,labelno) L_##scclass##_##labelno
217#define MK_L_SCCLASS_N(scclass,labelno) FOO(scclass,labelno)
218
219.globl ML_(blksys_setup_MACH)
220.globl ML_(blksys_restart_MACH)
221.globl ML_(blksys_complete_MACH)
222.globl ML_(blksys_committed_MACH)
223.globl ML_(blksys_finished_MACH)
224ML_(blksys_setup_MACH):	.quad MK_L_SCCLASS_N(MACH,1)
225ML_(blksys_restart_MACH):	.quad MK_L_SCCLASS_N(MACH,2)
226ML_(blksys_complete_MACH):	.quad MK_L_SCCLASS_N(MACH,3)
227ML_(blksys_committed_MACH):	.quad MK_L_SCCLASS_N(MACH,4)
228ML_(blksys_finished_MACH):	.quad MK_L_SCCLASS_N(MACH,5)
229
230.globl ML_(blksys_setup_MDEP)
231.globl ML_(blksys_restart_MDEP)
232.globl ML_(blksys_complete_MDEP)
233.globl ML_(blksys_committed_MDEP)
234.globl ML_(blksys_finished_MDEP)
235ML_(blksys_setup_MDEP):	.quad MK_L_SCCLASS_N(MDEP,1)
236ML_(blksys_restart_MDEP):	.quad MK_L_SCCLASS_N(MDEP,2)
237ML_(blksys_complete_MDEP):	.quad MK_L_SCCLASS_N(MDEP,3)
238ML_(blksys_committed_MDEP):	.quad MK_L_SCCLASS_N(MDEP,4)
239ML_(blksys_finished_MDEP):	.quad MK_L_SCCLASS_N(MDEP,5)
240
241.globl ML_(blksys_setup_UNIX)
242.globl ML_(blksys_restart_UNIX)
243.globl ML_(blksys_complete_UNIX)
244.globl ML_(blksys_committed_UNIX)
245.globl ML_(blksys_finished_UNIX)
246ML_(blksys_setup_UNIX):	.quad MK_L_SCCLASS_N(UNIX,1)
247ML_(blksys_restart_UNIX):	.quad MK_L_SCCLASS_N(UNIX,2)
248ML_(blksys_complete_UNIX):	.quad MK_L_SCCLASS_N(UNIX,3)
249ML_(blksys_committed_UNIX):	.quad MK_L_SCCLASS_N(UNIX,4)
250ML_(blksys_finished_UNIX):	.quad MK_L_SCCLASS_N(UNIX,5)
251
252#endif // defined(VGP_amd64_darwin)
253
254/*--------------------------------------------------------------------*/
255/*--- end                                                          ---*/
256/*--------------------------------------------------------------------*/
257