dispatch-x86-linux.S revision d4da36103770543a209ad16b11d4392d54f82042
1
2/*--------------------------------------------------------------------*/
3/*--- The core dispatch loop, for jumping to a code address.       ---*/
4/*---                                         dispatch-x86-linux.S ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8  This file is part of Valgrind, a dynamic binary instrumentation
9  framework.
10
11  Copyright (C) 2000-2010 Julian Seward
12     jseward@acm.org
13
14  This program is free software; you can redistribute it and/or
15  modify it under the terms of the GNU General Public License as
16  published by the Free Software Foundation; either version 2 of the
17  License, or (at your option) any later version.
18
19  This program is distributed in the hope that it will be useful, but
20  WITHOUT ANY WARRANTY; without even the implied warranty of
21  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  General Public License for more details.
23
24  You should have received a copy of the GNU General Public License
25  along with this program; if not, write to the Free Software
26  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27  02111-1307, USA.
28
29  The GNU General Public License is contained in the file COPYING.
30*/
31
32#if defined(VGP_x86_linux)
33
34#include "pub_core_basics_asm.h"
35#include "pub_core_dispatch_asm.h"
36#include "pub_core_transtab_asm.h"
37#include "libvex_guest_offsets.h"	/* for OFFSET_x86_EIP */
38
39
40/*------------------------------------------------------------*/
41/*---                                                      ---*/
42/*--- The dispatch loop.  VG_(run_innerloop) is used to    ---*/
43/*--- run all translations except no-redir ones.           ---*/
44/*---                                                      ---*/
45/*------------------------------------------------------------*/
46
47/*----------------------------------------------------*/
48/*--- Preamble (set everything up)                 ---*/
49/*----------------------------------------------------*/
50
51/* signature:
52UWord VG_(run_innerloop) ( void* guest_state, UWord do_profiling );
53*/
54.text
55.globl VG_(run_innerloop)
56.type  VG_(run_innerloop), @function
57VG_(run_innerloop):
58	/* 4(%esp) holds guest_state */
59	/* 8(%esp) holds do_profiling */
60
61	/* ----- entry point to VG_(run_innerloop) ----- */
62	pushl	%ebx
63	pushl	%ecx
64	pushl	%edx
65	pushl	%esi
66	pushl	%edi
67	pushl	%ebp
68
69	/* 28(%esp) holds guest_state */
70	/* 32(%esp) holds do_profiling */
71
72	/* Set up the guest state pointer */
73	movl	28(%esp), %ebp
74
75	/* fetch %EIP into %eax */
76	movl	OFFSET_x86_EIP(%ebp), %eax
77
78	/* set host FPU control word to the default mode expected
79           by VEX-generated code.  See comments in libvex.h for
80           more info. */
81	finit
82	pushl	$0x027F
83	fldcw	(%esp)
84	addl	$4, %esp
85
86	/* set host SSE control word to the default mode expected
87	   by VEX-generated code. */
88	cmpl	$0, VG_(machine_x86_have_mxcsr)
89	jz	L1
90	pushl	$0x1F80
91	ldmxcsr	(%esp)
92	addl	$4, %esp
93L1:
94	/* set dir flag to known value */
95	cld
96
97	/* fall into main loop (the right one) */
98	cmpl	$0, 32(%esp) /* do_profiling */
99	je	VG_(run_innerloop__dispatch_unprofiled)
100	jmp	VG_(run_innerloop__dispatch_profiled)
101	/*NOTREACHED*/
102
103/*----------------------------------------------------*/
104/*--- NO-PROFILING (standard) dispatcher           ---*/
105/*----------------------------------------------------*/
106
107.align	16
108.global	VG_(run_innerloop__dispatch_unprofiled)
109VG_(run_innerloop__dispatch_unprofiled):
110	/* AT ENTRY: %eax is next guest addr, %ebp is possibly
111           modified guest state ptr */
112
113	/* Has the guest state pointer been messed with?  If yes, exit. */
114	testl	$1, %ebp
115	jnz	gsp_changed
116
117	/* save the jump address in the guest state */
118	movl	%eax, OFFSET_x86_EIP(%ebp)
119
120	/* Are we out of timeslice?  If yes, defer to scheduler. */
121	subl	$1, VG_(dispatch_ctr)
122	jz	counter_is_zero
123
124	/* try a fast lookup in the translation cache */
125	movl	%eax, %ebx			/* next guest addr */
126	andl	$ VG_TT_FAST_MASK, %ebx		/* entry# */
127	movl	0+VG_(tt_fast)(,%ebx,8), %esi	/* .guest */
128	movl	4+VG_(tt_fast)(,%ebx,8), %edi	/* .host */
129	cmpl	%eax, %esi
130	jnz	fast_lookup_failed
131
132	/* Found a match.  Jump to .host. */
133	jmp 	*%edi
134	ud2	/* persuade insn decoders not to speculate past here */
135	/* generated code should run, then jump back to
136	   VG_(run_innerloop__dispatch_unprofiled). */
137	/*NOTREACHED*/
138
139/*----------------------------------------------------*/
140/*--- PROFILING dispatcher (can be much slower)    ---*/
141/*----------------------------------------------------*/
142
143.align	16
144.global	VG_(run_innerloop__dispatch_profiled)
145VG_(run_innerloop__dispatch_profiled):
146	/* AT ENTRY: %eax is next guest addr, %ebp is possibly
147           modified guest state ptr */
148
149	/* Has the guest state pointer been messed with?  If yes, exit. */
150	testl	$1, %ebp
151	jnz	gsp_changed
152
153	/* save the jump address in the guest state */
154	movl	%eax, OFFSET_x86_EIP(%ebp)
155
156	/* Are we out of timeslice?  If yes, defer to scheduler. */
157	subl	$1, VG_(dispatch_ctr)
158	jz	counter_is_zero
159
160	/* try a fast lookup in the translation cache */
161	movl	%eax, %ebx			/* next guest addr */
162	andl	$ VG_TT_FAST_MASK, %ebx		/* entry# */
163	movl	0+VG_(tt_fast)(,%ebx,8), %esi	/* .guest */
164	movl	4+VG_(tt_fast)(,%ebx,8), %edi	/* .host */
165	cmpl	%eax, %esi
166	jnz	fast_lookup_failed
167
168	/* increment bb profile counter */
169	/* note: innocuous as this sounds, it causes a huge amount more
170           stress on D1 and significantly slows everything down. */
171	movl	VG_(tt_fastN)(,%ebx,4), %edx
172	/* Use "addl $1", not "incl", to avoid partial-flags stall on P4 */
173	addl	$1, (%edx)
174
175	/* Found a match.  Jump to .host. */
176	jmp 	*%edi
177	ud2	/* persuade insn decoders not to speculate past here */
178	/* generated code should run, then jump back to
179	   VG_(run_innerloop__dispatch_profiled). */
180	/*NOTREACHED*/
181
182/*----------------------------------------------------*/
183/*--- exit points                                  ---*/
184/*----------------------------------------------------*/
185
186gsp_changed:
187	/* Someone messed with the gsp.  Have to
188           defer to scheduler to resolve this.  dispatch ctr
189	   is not yet decremented, so no need to increment. */
190	/* %EIP is NOT up to date here.  First, need to write
191	   %eax back to %EIP, but without trashing %ebp since
192	   that holds the value we want to return to the scheduler.
193	   Hence use %esi transiently for the guest state pointer. */
194	movl	28(%esp), %esi
195	movl	%eax, OFFSET_x86_EIP(%esi)
196	movl	%ebp, %eax
197	jmp	run_innerloop_exit
198	/*NOTREACHED*/
199
200counter_is_zero:
201	/* %EIP is up to date here */
202	/* back out decrement of the dispatch counter */
203	addl	$1, VG_(dispatch_ctr)
204	movl	$ VG_TRC_INNER_COUNTERZERO, %eax
205	jmp	run_innerloop_exit
206	/*NOTREACHED*/
207
208fast_lookup_failed:
209	/* %EIP is up to date here */
210	/* back out decrement of the dispatch counter */
211	addl	$1, VG_(dispatch_ctr)
212	movl	$ VG_TRC_INNER_FASTMISS, %eax
213	jmp	run_innerloop_exit
214	/*NOTREACHED*/
215
216
217
218/* All exits from the dispatcher go through here.  %eax holds
219   the return value.
220*/
221run_innerloop_exit:
222	/* We're leaving.  Check that nobody messed with
223           %mxcsr or %fpucw.  We can't mess with %eax here as it
224	   holds the tentative return value, but any other is OK. */
225#if !defined(ENABLE_INNER)
226        /* This check fails for self-hosting, so skip in that case */
227	pushl	$0
228	fstcw	(%esp)
229	cmpl	$0x027F, (%esp)
230	popl	%esi /* get rid of the word without trashing %eflags */
231	jnz	invariant_violation
232#endif
233	cmpl	$0, VG_(machine_x86_have_mxcsr)
234	jz	L2
235	pushl	$0
236	stmxcsr	(%esp)
237	andl	$0xFFFFFFC0, (%esp)  /* mask out status flags */
238	cmpl	$0x1F80, (%esp)
239	popl	%esi
240	jnz	invariant_violation
241L2:	/* otherwise we're OK */
242	jmp	run_innerloop_exit_REALLY
243
244invariant_violation:
245	movl	$ VG_TRC_INVARIANT_FAILED, %eax
246	jmp	run_innerloop_exit_REALLY
247
248run_innerloop_exit_REALLY:
249	popl	%ebp
250	popl	%edi
251	popl	%esi
252	popl	%edx
253	popl	%ecx
254	popl	%ebx
255	ret
256.size VG_(run_innerloop), .-VG_(run_innerloop)
257
258
259/*------------------------------------------------------------*/
260/*---                                                      ---*/
261/*--- A special dispatcher, for running no-redir           ---*/
262/*--- translations.  Just runs the given translation once. ---*/
263/*---                                                      ---*/
264/*------------------------------------------------------------*/
265
266/* signature:
267void VG_(run_a_noredir_translation) ( UWord* argblock );
268*/
269
270/* Run a no-redir translation.  argblock points to 4 UWords, 2 to carry args
271   and 2 to carry results:
272      0: input:  ptr to translation
273      1: input:  ptr to guest state
274      2: output: next guest PC
275      3: output: guest state pointer afterwards (== thread return code)
276*/
277.align 16
278.global VG_(run_a_noredir_translation)
279.type VG_(run_a_noredir_translation), @function
280VG_(run_a_noredir_translation):
281	/* Save callee-saves regs */
282	pushl %esi
283	pushl %edi
284	pushl %ebp
285	pushl %ebx
286
287	movl 20(%esp), %edi	/* %edi = argblock */
288	movl 4(%edi), %ebp	/* argblock[1] */
289	jmp *0(%edi)		/* argblock[0] */
290	/*NOTREACHED*/
291	ud2
292	/* If the translation has been correctly constructed, we
293	should resume at the the following label. */
294.global VG_(run_a_noredir_translation__return_point)
295VG_(run_a_noredir_translation__return_point):
296	movl 20(%esp), %edi
297	movl %eax, 8(%edi)	/* argblock[2] */
298	movl %ebp, 12(%edi)	/* argblock[3] */
299
300	popl %ebx
301	popl %ebp
302	popl %edi
303	popl %esi
304	ret
305.size VG_(run_a_noredir_translation), .-VG_(run_a_noredir_translation)
306
307
308/* Let the linker know we don't need an executable stack */
309.section .note.GNU-stack,"",@progbits
310
311#endif // defined(VGP_x86_linux)
312
313/*--------------------------------------------------------------------*/
314/*--- end                                                          ---*/
315/*--------------------------------------------------------------------*/
316