valgrind.h revision 4118e0f0df0468b3eccf2515d13c3d9c478d6f16
1d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* -*- c -*- 2d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant ---------------------------------------------------------------- 3d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 4d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant Notice that the following BSD-style license applies to this one 5d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant file (valgrind.h) only. The rest of Valgrind is licensed under the 6d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant terms of the GNU General Public License, version 2, unless 7d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant otherwise indicated. See the COPYING file in the source 8d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant distribution for details. 9d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 10d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant ---------------------------------------------------------------- 11d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 12d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant This file is part of Valgrind, a dynamic binary instrumentation 13d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant framework. 14d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 15d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant Copyright (C) 2000-2011 Julian Seward. All rights reserved. 16d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 17d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant Redistribution and use in source and binary forms, with or without 18d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant modification, are permitted provided that the following conditions 19d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant are met: 20d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 21f72cdd5b49372fa50dbc79d712615b4b35b00796Nick Kledzik 1. Redistributions of source code must retain the above copyright 224c2acbcfc928a7d4f18b2c9202288abdf0131219Marshall Clow notice, this list of conditions and the following disclaimer. 2315a6928f197d9bdce3284fb1588820989abdc1e5Nick Kledzik 2415a6928f197d9bdce3284fb1588820989abdc1e5Nick Kledzik 2. The origin of this software must not be misrepresented; you must 2515a6928f197d9bdce3284fb1588820989abdc1e5Nick Kledzik not claim that you wrote the original software. If you use this 26d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant software in a product, an acknowledgment in the product 2725f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant documentation would be appreciated but is not required. 28d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 29d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 3. Altered source versions must be plainly marked as such, and must 30d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant not be misrepresented as being the original software. 31d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 32d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 4. The name of the author may not be used to endorse or promote 33d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant products derived from this software without specific prior written 34d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant permission. 35d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 36ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 37ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 38d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 40703d148d5994d7a1cd189dc57acc7704f48fbd13Marshall Clow DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41703d148d5994d7a1cd189dc57acc7704f48fbd13Marshall Clow DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 42d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 43d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 44ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 45ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 46d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 47d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 48d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant ---------------------------------------------------------------- 49d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 50703d148d5994d7a1cd189dc57acc7704f48fbd13Marshall Clow Notice that the above BSD-style license applies to this one file 51d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant (valgrind.h) only. The entire rest of Valgrind is licensed under 52d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant the terms of the GNU General Public License, version 2. See the 53d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant COPYING file in the source distribution for details. 54d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 554c2acbcfc928a7d4f18b2c9202288abdf0131219Marshall Clow ---------------------------------------------------------------- 564c2acbcfc928a7d4f18b2c9202288abdf0131219Marshall Clow*/ 57d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 58d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 59d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* This file is for inclusion into client (your!) code. 60d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 614c2acbcfc928a7d4f18b2c9202288abdf0131219Marshall Clow You can use these macros to manipulate and query Valgrind's 62d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant execution inside your own programs. 6392827189cbb1e63dfebf0a46aac4372c3089ff11Howard Hinnant 644c2acbcfc928a7d4f18b2c9202288abdf0131219Marshall Clow The resulting executables will still run without Valgrind, just a 6592827189cbb1e63dfebf0a46aac4372c3089ff11Howard Hinnant little bit more slowly than they otherwise would, but otherwise 66d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant unchanged. When not running on valgrind, each client request 67d8cfd659b70bcf65a9c810ebe525632af38726f0Nick Lewycky consumes very few (eg. 7) instructions, so the resulting performance 68d8cfd659b70bcf65a9c810ebe525632af38726f0Nick Lewycky loss is negligible unless you plan to execute client requests 69d8cfd659b70bcf65a9c810ebe525632af38726f0Nick Lewycky millions of times per second. Nevertheless, if that is still a 70d8cfd659b70bcf65a9c810ebe525632af38726f0Nick Lewycky problem, you can compile with the NVALGRIND symbol defined (gcc 71d8cfd659b70bcf65a9c810ebe525632af38726f0Nick Lewycky -DNVALGRIND) so that client requests are not even compiled in. */ 72d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 73d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#ifndef __VALGRIND_H 74d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#define __VALGRIND_H 75d8cfd659b70bcf65a9c810ebe525632af38726f0Nick Lewycky 76d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 77d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* ------------------------------------------------------------------ */ 78d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* VERSION NUMBER OF VALGRIND */ 7925f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant/* ------------------------------------------------------------------ */ 80d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 8125f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant/* Specify Valgrind's version number, so that user code can 8225f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant conditionally compile based on our version number. Note that these 83d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant were introduced at version 3.6 and so do not exist in version 3.5 84d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant or earlier. The recommended way to use them to check for "version 8525f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant X.Y or later" is (eg) 86d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 8725f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ 8825f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant && (__VALGRIND_MAJOR__ > 3 \ 89d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) 90d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant*/ 91d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#define __VALGRIND_MAJOR__ 3 92d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#define __VALGRIND_MINOR__ 8 9325f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant 94d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 9525f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#include <stdarg.h> 9625f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant 97d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* Nb: this file might be included in a file compiled with -ansi. So 98d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant we can't use C++ style "//" comments nor the "asm" keyword (instead 99d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant use "__asm__"). */ 100d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 101d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* Derive some tags indicating what the target platform is. Note 102d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant that in this file we're using the compiler's CPP symbols for 10325f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant identifying architectures, which are different to the ones we use 10425f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant within the rest of Valgrind. Note, __powerpc__ is active for both 105d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 32 and 64-bit PPC, whereas __powerpc64__ is only active for the 106d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant latter (on Linux, that is). 107d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 108d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant Misc note: how to find out what's predefined in gcc by default: 10925f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant gcc -Wp,-dM somefile.c 11025f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant*/ 111d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_x86_darwin 112d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_amd64_darwin 113d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_x86_win32 114d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_x86_linux 115d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_amd64_linux 11625f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#undef PLAT_ppc32_linux 117d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_ppc64_linux 118d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_arm_linux 119d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_s390x_linux 120d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#undef PLAT_mips32_linux 121d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 12225f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant 123d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#if defined(__APPLE__) && defined(__i386__) 124d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_x86_darwin 1 125d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#elif defined(__APPLE__) && defined(__x86_64__) 126d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_amd64_darwin 1 127d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#elif defined(__MINGW32__) || defined(__CYGWIN32__) \ 12825f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant || (defined(_WIN32) && defined(_M_IX86)) 129d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_x86_win32 1 130d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#elif defined(__linux__) && defined(__i386__) 131d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_x86_linux 1 132d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#elif defined(__linux__) && defined(__x86_64__) 133d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_amd64_linux 1 134d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) 13525f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant# define PLAT_ppc32_linux 1 13625f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) 137d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_ppc64_linux 1 138d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#elif defined(__linux__) && defined(__arm__) 139d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define PLAT_arm_linux 1 14025f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#elif defined(__linux__) && defined(__s390__) && defined(__s390x__) 14125f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant# define PLAT_s390x_linux 1 14225f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#elif defined(__linux__) && defined(__mips__) 14325f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant# define PLAT_mips32_linux 1 14425f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant#else 145d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* If we're not compiling for our target platform, don't generate 146d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant any inline asms. */ 147d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# if !defined(NVALGRIND) 148d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# define NVALGRIND 1 149d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant# endif 150d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#endif 151d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 152d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 153d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* ------------------------------------------------------------------ */ 15425f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ 15525f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant/* in here of use to end-users -- skip to the next section. */ 15625f180797ba296e6cd3aa45e1fef9321b828301aHoward Hinnant/* ------------------------------------------------------------------ */ 157d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 158d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant/* 159d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client 160ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant * request. Accepts both pointers and integers as arguments. 161d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant * 162ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind 163ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant * client request that does not return a value. 164d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant 165d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind 166ca00a4e83ed1adcdc6f34ae25c5e705315168a26Howard Hinnant * client request and whose value equals the client request result. Accepts 167d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant * both pointers and integers as arguments. Note that such calls are not 168d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant * necessarily pure functions -- they may have side effects. 169d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant */ 1701b0aed9312dca7ffd27522864b9101ca816112b1Howard Hinnant 171d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ 17215a6928f197d9bdce3284fb1588820989abdc1e5Nick Kledzik _zzq_request, _zzq_arg1, _zzq_arg2, \ 173d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 174d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ 175d213ffdf3befead3b8f5a0ba12ce1c2d9949525Howard Hinnant (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ 176 (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) 177 178#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ 179 _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 180 do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ 181 (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ 182 (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) 183 184#if defined(NVALGRIND) 185 186/* Define NVALGRIND to completely remove the Valgrind magic sequence 187 from the compiled code (analogous to NDEBUG's effects on 188 assert()) */ 189#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 190 _zzq_default, _zzq_request, \ 191 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 192 (_zzq_default) 193 194#else /* ! NVALGRIND */ 195 196/* The following defines the magic code sequences which the JITter 197 spots and handles magically. Don't look too closely at them as 198 they will rot your brain. 199 200 The assembly code sequences for all architectures is in this one 201 file. This is because this file must be stand-alone, and we don't 202 want to have multiple files. 203 204 For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default 205 value gets put in the return slot, so that everything works when 206 this is executed not under Valgrind. Args are passed in a memory 207 block, and so there's no intrinsic limit to the number that could 208 be passed, but it's currently five. 209 210 The macro args are: 211 _zzq_rlval result lvalue 212 _zzq_default default value (result returned when running on real CPU) 213 _zzq_request request code 214 _zzq_arg1..5 request params 215 216 The other two macros are used to support function wrapping, and are 217 a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the 218 guest's NRADDR pseudo-register and whatever other information is 219 needed to safely run the call original from the wrapper: on 220 ppc64-linux, the R2 value at the divert point is also needed. This 221 information is abstracted into a user-visible type, OrigFn. 222 223 VALGRIND_CALL_NOREDIR_* behaves the same as the following on the 224 guest, but guarantees that the branch instruction will not be 225 redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: 226 branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a 227 complete inline asm, since it needs to be combined with more magic 228 inline asm stuff to be useful. 229*/ 230 231/* ------------------------- x86-{linux,darwin} ---------------- */ 232 233#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ 234 || (defined(PLAT_x86_win32) && defined(__GNUC__)) 235 236typedef 237 struct { 238 unsigned int nraddr; /* where's the code? */ 239 } 240 OrigFn; 241 242#define __SPECIAL_INSTRUCTION_PREAMBLE \ 243 "roll $3, %%edi ; roll $13, %%edi\n\t" \ 244 "roll $29, %%edi ; roll $19, %%edi\n\t" 245 246#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 247 _zzq_default, _zzq_request, \ 248 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 249 __extension__ \ 250 ({volatile unsigned int _zzq_args[6]; \ 251 volatile unsigned int _zzq_result; \ 252 _zzq_args[0] = (unsigned int)(_zzq_request); \ 253 _zzq_args[1] = (unsigned int)(_zzq_arg1); \ 254 _zzq_args[2] = (unsigned int)(_zzq_arg2); \ 255 _zzq_args[3] = (unsigned int)(_zzq_arg3); \ 256 _zzq_args[4] = (unsigned int)(_zzq_arg4); \ 257 _zzq_args[5] = (unsigned int)(_zzq_arg5); \ 258 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 259 /* %EDX = client_request ( %EAX ) */ \ 260 "xchgl %%ebx,%%ebx" \ 261 : "=d" (_zzq_result) \ 262 : "a" (&_zzq_args[0]), "0" (_zzq_default) \ 263 : "cc", "memory" \ 264 ); \ 265 _zzq_result; \ 266 }) 267 268#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 269 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 270 volatile unsigned int __addr; \ 271 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 272 /* %EAX = guest_NRADDR */ \ 273 "xchgl %%ecx,%%ecx" \ 274 : "=a" (__addr) \ 275 : \ 276 : "cc", "memory" \ 277 ); \ 278 _zzq_orig->nraddr = __addr; \ 279 } 280 281#define VALGRIND_CALL_NOREDIR_EAX \ 282 __SPECIAL_INSTRUCTION_PREAMBLE \ 283 /* call-noredir *%EAX */ \ 284 "xchgl %%edx,%%edx\n\t" 285#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */ 286 287/* ------------------------- x86-Win32 ------------------------- */ 288 289#if defined(PLAT_x86_win32) && !defined(__GNUC__) 290 291typedef 292 struct { 293 unsigned int nraddr; /* where's the code? */ 294 } 295 OrigFn; 296 297#if defined(_MSC_VER) 298 299#define __SPECIAL_INSTRUCTION_PREAMBLE \ 300 __asm rol edi, 3 __asm rol edi, 13 \ 301 __asm rol edi, 29 __asm rol edi, 19 302 303#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 304 _zzq_default, _zzq_request, \ 305 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 306 valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ 307 (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ 308 (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ 309 (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) 310 311static __inline uintptr_t 312valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, 313 uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, 314 uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, 315 uintptr_t _zzq_arg5) 316{ 317 volatile uintptr_t _zzq_args[6]; 318 volatile unsigned int _zzq_result; 319 _zzq_args[0] = (uintptr_t)(_zzq_request); 320 _zzq_args[1] = (uintptr_t)(_zzq_arg1); 321 _zzq_args[2] = (uintptr_t)(_zzq_arg2); 322 _zzq_args[3] = (uintptr_t)(_zzq_arg3); 323 _zzq_args[4] = (uintptr_t)(_zzq_arg4); 324 _zzq_args[5] = (uintptr_t)(_zzq_arg5); 325 __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default 326 __SPECIAL_INSTRUCTION_PREAMBLE 327 /* %EDX = client_request ( %EAX ) */ 328 __asm xchg ebx,ebx 329 __asm mov _zzq_result, edx 330 } 331 return _zzq_result; 332} 333 334#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 335 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 336 volatile unsigned int __addr; \ 337 __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ 338 /* %EAX = guest_NRADDR */ \ 339 __asm xchg ecx,ecx \ 340 __asm mov __addr, eax \ 341 } \ 342 _zzq_orig->nraddr = __addr; \ 343 } 344 345#define VALGRIND_CALL_NOREDIR_EAX ERROR 346 347#else 348#error Unsupported compiler. 349#endif 350 351#endif /* PLAT_x86_win32 */ 352 353/* ------------------------ amd64-{linux,darwin} --------------- */ 354 355#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) 356 357typedef 358 struct { 359 unsigned long long int nraddr; /* where's the code? */ 360 } 361 OrigFn; 362 363#define __SPECIAL_INSTRUCTION_PREAMBLE \ 364 "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ 365 "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" 366 367#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 368 _zzq_default, _zzq_request, \ 369 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 370 __extension__ \ 371 ({ volatile unsigned long long int _zzq_args[6]; \ 372 volatile unsigned long long int _zzq_result; \ 373 _zzq_args[0] = (unsigned long long int)(_zzq_request); \ 374 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ 375 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ 376 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ 377 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ 378 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ 379 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 380 /* %RDX = client_request ( %RAX ) */ \ 381 "xchgq %%rbx,%%rbx" \ 382 : "=d" (_zzq_result) \ 383 : "a" (&_zzq_args[0]), "0" (_zzq_default) \ 384 : "cc", "memory" \ 385 ); \ 386 _zzq_result; \ 387 }) 388 389#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 390 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 391 volatile unsigned long long int __addr; \ 392 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 393 /* %RAX = guest_NRADDR */ \ 394 "xchgq %%rcx,%%rcx" \ 395 : "=a" (__addr) \ 396 : \ 397 : "cc", "memory" \ 398 ); \ 399 _zzq_orig->nraddr = __addr; \ 400 } 401 402#define VALGRIND_CALL_NOREDIR_RAX \ 403 __SPECIAL_INSTRUCTION_PREAMBLE \ 404 /* call-noredir *%RAX */ \ 405 "xchgq %%rdx,%%rdx\n\t" 406#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */ 407 408/* ------------------------ ppc32-linux ------------------------ */ 409 410#if defined(PLAT_ppc32_linux) 411 412typedef 413 struct { 414 unsigned int nraddr; /* where's the code? */ 415 } 416 OrigFn; 417 418#define __SPECIAL_INSTRUCTION_PREAMBLE \ 419 "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \ 420 "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t" 421 422#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 423 _zzq_default, _zzq_request, \ 424 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 425 \ 426 __extension__ \ 427 ({ unsigned int _zzq_args[6]; \ 428 unsigned int _zzq_result; \ 429 unsigned int* _zzq_ptr; \ 430 _zzq_args[0] = (unsigned int)(_zzq_request); \ 431 _zzq_args[1] = (unsigned int)(_zzq_arg1); \ 432 _zzq_args[2] = (unsigned int)(_zzq_arg2); \ 433 _zzq_args[3] = (unsigned int)(_zzq_arg3); \ 434 _zzq_args[4] = (unsigned int)(_zzq_arg4); \ 435 _zzq_args[5] = (unsigned int)(_zzq_arg5); \ 436 _zzq_ptr = _zzq_args; \ 437 __asm__ volatile("mr 3,%1\n\t" /*default*/ \ 438 "mr 4,%2\n\t" /*ptr*/ \ 439 __SPECIAL_INSTRUCTION_PREAMBLE \ 440 /* %R3 = client_request ( %R4 ) */ \ 441 "or 1,1,1\n\t" \ 442 "mr %0,3" /*result*/ \ 443 : "=b" (_zzq_result) \ 444 : "b" (_zzq_default), "b" (_zzq_ptr) \ 445 : "cc", "memory", "r3", "r4"); \ 446 _zzq_result; \ 447 }) 448 449#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 450 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 451 unsigned int __addr; \ 452 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 453 /* %R3 = guest_NRADDR */ \ 454 "or 2,2,2\n\t" \ 455 "mr %0,3" \ 456 : "=b" (__addr) \ 457 : \ 458 : "cc", "memory", "r3" \ 459 ); \ 460 _zzq_orig->nraddr = __addr; \ 461 } 462 463#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 464 __SPECIAL_INSTRUCTION_PREAMBLE \ 465 /* branch-and-link-to-noredir *%R11 */ \ 466 "or 3,3,3\n\t" 467#endif /* PLAT_ppc32_linux */ 468 469/* ------------------------ ppc64-linux ------------------------ */ 470 471#if defined(PLAT_ppc64_linux) 472 473typedef 474 struct { 475 unsigned long long int nraddr; /* where's the code? */ 476 unsigned long long int r2; /* what tocptr do we need? */ 477 } 478 OrigFn; 479 480#define __SPECIAL_INSTRUCTION_PREAMBLE \ 481 "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ 482 "rotldi 0,0,61 ; rotldi 0,0,51\n\t" 483 484#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 485 _zzq_default, _zzq_request, \ 486 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 487 \ 488 __extension__ \ 489 ({ unsigned long long int _zzq_args[6]; \ 490 unsigned long long int _zzq_result; \ 491 unsigned long long int* _zzq_ptr; \ 492 _zzq_args[0] = (unsigned long long int)(_zzq_request); \ 493 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ 494 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ 495 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ 496 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ 497 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ 498 _zzq_ptr = _zzq_args; \ 499 __asm__ volatile("mr 3,%1\n\t" /*default*/ \ 500 "mr 4,%2\n\t" /*ptr*/ \ 501 __SPECIAL_INSTRUCTION_PREAMBLE \ 502 /* %R3 = client_request ( %R4 ) */ \ 503 "or 1,1,1\n\t" \ 504 "mr %0,3" /*result*/ \ 505 : "=b" (_zzq_result) \ 506 : "b" (_zzq_default), "b" (_zzq_ptr) \ 507 : "cc", "memory", "r3", "r4"); \ 508 _zzq_result; \ 509 }) 510 511#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 512 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 513 unsigned long long int __addr; \ 514 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 515 /* %R3 = guest_NRADDR */ \ 516 "or 2,2,2\n\t" \ 517 "mr %0,3" \ 518 : "=b" (__addr) \ 519 : \ 520 : "cc", "memory", "r3" \ 521 ); \ 522 _zzq_orig->nraddr = __addr; \ 523 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 524 /* %R3 = guest_NRADDR_GPR2 */ \ 525 "or 4,4,4\n\t" \ 526 "mr %0,3" \ 527 : "=b" (__addr) \ 528 : \ 529 : "cc", "memory", "r3" \ 530 ); \ 531 _zzq_orig->r2 = __addr; \ 532 } 533 534#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 535 __SPECIAL_INSTRUCTION_PREAMBLE \ 536 /* branch-and-link-to-noredir *%R11 */ \ 537 "or 3,3,3\n\t" 538 539#endif /* PLAT_ppc64_linux */ 540 541/* ------------------------- arm-linux ------------------------- */ 542 543#if defined(PLAT_arm_linux) 544 545typedef 546 struct { 547 unsigned int nraddr; /* where's the code? */ 548 } 549 OrigFn; 550 551#define __SPECIAL_INSTRUCTION_PREAMBLE \ 552 "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ 553 "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" 554 555#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 556 _zzq_default, _zzq_request, \ 557 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 558 \ 559 __extension__ \ 560 ({volatile unsigned int _zzq_args[6]; \ 561 volatile unsigned int _zzq_result; \ 562 _zzq_args[0] = (unsigned int)(_zzq_request); \ 563 _zzq_args[1] = (unsigned int)(_zzq_arg1); \ 564 _zzq_args[2] = (unsigned int)(_zzq_arg2); \ 565 _zzq_args[3] = (unsigned int)(_zzq_arg3); \ 566 _zzq_args[4] = (unsigned int)(_zzq_arg4); \ 567 _zzq_args[5] = (unsigned int)(_zzq_arg5); \ 568 __asm__ volatile("mov r3, %1\n\t" /*default*/ \ 569 "mov r4, %2\n\t" /*ptr*/ \ 570 __SPECIAL_INSTRUCTION_PREAMBLE \ 571 /* R3 = client_request ( R4 ) */ \ 572 "orr r10, r10, r10\n\t" \ 573 "mov %0, r3" /*result*/ \ 574 : "=r" (_zzq_result) \ 575 : "r" (_zzq_default), "r" (&_zzq_args[0]) \ 576 : "cc","memory", "r3", "r4"); \ 577 _zzq_result; \ 578 }) 579 580#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 581 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 582 unsigned int __addr; \ 583 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 584 /* R3 = guest_NRADDR */ \ 585 "orr r11, r11, r11\n\t" \ 586 "mov %0, r3" \ 587 : "=r" (__addr) \ 588 : \ 589 : "cc", "memory", "r3" \ 590 ); \ 591 _zzq_orig->nraddr = __addr; \ 592 } 593 594#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 595 __SPECIAL_INSTRUCTION_PREAMBLE \ 596 /* branch-and-link-to-noredir *%R4 */ \ 597 "orr r12, r12, r12\n\t" 598 599#endif /* PLAT_arm_linux */ 600 601/* ------------------------ s390x-linux ------------------------ */ 602 603#if defined(PLAT_s390x_linux) 604 605typedef 606 struct { 607 unsigned long long int nraddr; /* where's the code? */ 608 } 609 OrigFn; 610 611/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific 612 * code. This detection is implemented in platform specific toIR.c 613 * (e.g. VEX/priv/guest_s390_decoder.c). 614 */ 615#define __SPECIAL_INSTRUCTION_PREAMBLE \ 616 "lr 15,15\n\t" \ 617 "lr 1,1\n\t" \ 618 "lr 2,2\n\t" \ 619 "lr 3,3\n\t" 620 621#define __CLIENT_REQUEST_CODE "lr 2,2\n\t" 622#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" 623#define __CALL_NO_REDIR_CODE "lr 4,4\n\t" 624 625#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 626 _zzq_default, _zzq_request, \ 627 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 628 __extension__ \ 629 ({volatile unsigned long long int _zzq_args[6]; \ 630 volatile unsigned long long int _zzq_result; \ 631 _zzq_args[0] = (unsigned long long int)(_zzq_request); \ 632 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ 633 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ 634 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ 635 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ 636 _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ 637 __asm__ volatile(/* r2 = args */ \ 638 "lgr 2,%1\n\t" \ 639 /* r3 = default */ \ 640 "lgr 3,%2\n\t" \ 641 __SPECIAL_INSTRUCTION_PREAMBLE \ 642 __CLIENT_REQUEST_CODE \ 643 /* results = r3 */ \ 644 "lgr %0, 3\n\t" \ 645 : "=d" (_zzq_result) \ 646 : "a" (&_zzq_args[0]), "0" (_zzq_default) \ 647 : "cc", "2", "3", "memory" \ 648 ); \ 649 _zzq_result; \ 650 }) 651 652#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 653 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 654 volatile unsigned long long int __addr; \ 655 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 656 __GET_NR_CONTEXT_CODE \ 657 "lgr %0, 3\n\t" \ 658 : "=a" (__addr) \ 659 : \ 660 : "cc", "3", "memory" \ 661 ); \ 662 _zzq_orig->nraddr = __addr; \ 663 } 664 665#define VALGRIND_CALL_NOREDIR_R1 \ 666 __SPECIAL_INSTRUCTION_PREAMBLE \ 667 __CALL_NO_REDIR_CODE 668 669#endif /* PLAT_s390x_linux */ 670 671/* ------------------------- mips32-linux ---------------- */ 672 673#if defined(PLAT_mips32_linux) 674 675typedef 676 struct { 677 unsigned int nraddr; /* where's the code? */ 678 } 679 OrigFn; 680 681/* .word 0x342 682 * .word 0x742 683 * .word 0xC2 684 * .word 0x4C2*/ 685#define __SPECIAL_INSTRUCTION_PREAMBLE \ 686 "srl $0, $0, 13\n\t" \ 687 "srl $0, $0, 29\n\t" \ 688 "srl $0, $0, 3\n\t" \ 689 "srl $0, $0, 19\n\t" 690 691#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 692 _zzq_default, _zzq_request, \ 693 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ 694 __extension__ \ 695 ({ volatile unsigned int _zzq_args[6]; \ 696 volatile unsigned int _zzq_result; \ 697 _zzq_args[0] = (unsigned int)(_zzq_request); \ 698 _zzq_args[1] = (unsigned int)(_zzq_arg1); \ 699 _zzq_args[2] = (unsigned int)(_zzq_arg2); \ 700 _zzq_args[3] = (unsigned int)(_zzq_arg3); \ 701 _zzq_args[4] = (unsigned int)(_zzq_arg4); \ 702 _zzq_args[5] = (unsigned int)(_zzq_arg5); \ 703 __asm__ volatile("move $11, %1\n\t" /*default*/ \ 704 "move $12, %2\n\t" /*ptr*/ \ 705 __SPECIAL_INSTRUCTION_PREAMBLE \ 706 /* T3 = client_request ( T4 ) */ \ 707 "or $13, $13, $13\n\t" \ 708 "move %0, $11\n\t" /*result*/ \ 709 : "=r" (_zzq_result) \ 710 : "r" (_zzq_default), "r" (&_zzq_args[0]) \ 711 : "cc","memory", "t3", "t4"); \ 712 _zzq_result; \ 713 }) 714 715#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ 716 { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ 717 volatile unsigned int __addr; \ 718 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ 719 /* %t9 = guest_NRADDR */ \ 720 "or $14, $14, $14\n\t" \ 721 "move %0, $11" /*result*/ \ 722 : "=r" (__addr) \ 723 : \ 724 : "cc", "memory" , "t3" \ 725 ); \ 726 _zzq_orig->nraddr = __addr; \ 727 } 728 729#define VALGRIND_CALL_NOREDIR_T9 \ 730 __SPECIAL_INSTRUCTION_PREAMBLE \ 731 /* call-noredir *%t9 */ \ 732 "or $15, $15, $15\n\t" 733#endif /* PLAT_mips32_linux */ 734 735/* Insert assembly code for other platforms here... */ 736 737#endif /* NVALGRIND */ 738 739 740/* ------------------------------------------------------------------ */ 741/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ 742/* ugly. It's the least-worst tradeoff I can think of. */ 743/* ------------------------------------------------------------------ */ 744 745/* This section defines magic (a.k.a appalling-hack) macros for doing 746 guaranteed-no-redirection macros, so as to get from function 747 wrappers to the functions they are wrapping. The whole point is to 748 construct standard call sequences, but to do the call itself with a 749 special no-redirect call pseudo-instruction that the JIT 750 understands and handles specially. This section is long and 751 repetitious, and I can't see a way to make it shorter. 752 753 The naming scheme is as follows: 754 755 CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} 756 757 'W' stands for "word" and 'v' for "void". Hence there are 758 different macros for calling arity 0, 1, 2, 3, 4, etc, functions, 759 and for each, the possibility of returning a word-typed result, or 760 no result. 761*/ 762 763/* Use these to write the name of your wrapper. NOTE: duplicates 764 VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts 765 the default behaviour equivalance class tag "0000" into the name. 766 See pub_tool_redir.h for details -- normally you don't need to 767 think about this, though. */ 768 769/* Use an extra level of macroisation so as to ensure the soname/fnname 770 args are fully macro-expanded before pasting them together. */ 771#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd 772 773#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ 774 VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) 775 776#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ 777 VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) 778 779/* Use this macro from within a wrapper function to collect the 780 context (address and possibly other info) of the original function. 781 Once you have that you can then use it in one of the CALL_FN_ 782 macros. The type of the argument _lval is OrigFn. */ 783#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) 784 785/* Also provide end-user facilities for function replacement, rather 786 than wrapping. A replacement function differs from a wrapper in 787 that it has no way to get hold of the original function being 788 called, and hence no way to call onwards to it. In a replacement 789 function, VALGRIND_GET_ORIG_FN always returns zero. */ 790 791#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ 792 VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) 793 794#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ 795 VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) 796 797/* Derivatives of the main macros below, for calling functions 798 returning void. */ 799 800#define CALL_FN_v_v(fnptr) \ 801 do { volatile unsigned long _junk; \ 802 CALL_FN_W_v(_junk,fnptr); } while (0) 803 804#define CALL_FN_v_W(fnptr, arg1) \ 805 do { volatile unsigned long _junk; \ 806 CALL_FN_W_W(_junk,fnptr,arg1); } while (0) 807 808#define CALL_FN_v_WW(fnptr, arg1,arg2) \ 809 do { volatile unsigned long _junk; \ 810 CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) 811 812#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ 813 do { volatile unsigned long _junk; \ 814 CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) 815 816#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ 817 do { volatile unsigned long _junk; \ 818 CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) 819 820#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ 821 do { volatile unsigned long _junk; \ 822 CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) 823 824#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ 825 do { volatile unsigned long _junk; \ 826 CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) 827 828#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ 829 do { volatile unsigned long _junk; \ 830 CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) 831 832/* ------------------------- x86-{linux,darwin} ---------------- */ 833 834#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) 835 836/* These regs are trashed by the hidden call. No need to mention eax 837 as gcc can already see that, plus causes gcc to bomb. */ 838#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" 839 840/* Macros to save and align the stack before making a function 841 call and restore it afterwards as gcc may not keep the stack 842 pointer aligned if it doesn't realise calls are being made 843 to other functions. */ 844 845#define VALGRIND_ALIGN_STACK \ 846 "movl %%esp,%%edi\n\t" \ 847 "andl $0xfffffff0,%%esp\n\t" 848#define VALGRIND_RESTORE_STACK \ 849 "movl %%edi,%%esp\n\t" 850 851/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned 852 long) == 4. */ 853 854#define CALL_FN_W_v(lval, orig) \ 855 do { \ 856 volatile OrigFn _orig = (orig); \ 857 volatile unsigned long _argvec[1]; \ 858 volatile unsigned long _res; \ 859 _argvec[0] = (unsigned long)_orig.nraddr; \ 860 __asm__ volatile( \ 861 VALGRIND_ALIGN_STACK \ 862 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 863 VALGRIND_CALL_NOREDIR_EAX \ 864 VALGRIND_RESTORE_STACK \ 865 : /*out*/ "=a" (_res) \ 866 : /*in*/ "a" (&_argvec[0]) \ 867 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 868 ); \ 869 lval = (__typeof__(lval)) _res; \ 870 } while (0) 871 872#define CALL_FN_W_W(lval, orig, arg1) \ 873 do { \ 874 volatile OrigFn _orig = (orig); \ 875 volatile unsigned long _argvec[2]; \ 876 volatile unsigned long _res; \ 877 _argvec[0] = (unsigned long)_orig.nraddr; \ 878 _argvec[1] = (unsigned long)(arg1); \ 879 __asm__ volatile( \ 880 VALGRIND_ALIGN_STACK \ 881 "subl $12, %%esp\n\t" \ 882 "pushl 4(%%eax)\n\t" \ 883 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 884 VALGRIND_CALL_NOREDIR_EAX \ 885 VALGRIND_RESTORE_STACK \ 886 : /*out*/ "=a" (_res) \ 887 : /*in*/ "a" (&_argvec[0]) \ 888 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 889 ); \ 890 lval = (__typeof__(lval)) _res; \ 891 } while (0) 892 893#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ 894 do { \ 895 volatile OrigFn _orig = (orig); \ 896 volatile unsigned long _argvec[3]; \ 897 volatile unsigned long _res; \ 898 _argvec[0] = (unsigned long)_orig.nraddr; \ 899 _argvec[1] = (unsigned long)(arg1); \ 900 _argvec[2] = (unsigned long)(arg2); \ 901 __asm__ volatile( \ 902 VALGRIND_ALIGN_STACK \ 903 "subl $8, %%esp\n\t" \ 904 "pushl 8(%%eax)\n\t" \ 905 "pushl 4(%%eax)\n\t" \ 906 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 907 VALGRIND_CALL_NOREDIR_EAX \ 908 VALGRIND_RESTORE_STACK \ 909 : /*out*/ "=a" (_res) \ 910 : /*in*/ "a" (&_argvec[0]) \ 911 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 912 ); \ 913 lval = (__typeof__(lval)) _res; \ 914 } while (0) 915 916#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ 917 do { \ 918 volatile OrigFn _orig = (orig); \ 919 volatile unsigned long _argvec[4]; \ 920 volatile unsigned long _res; \ 921 _argvec[0] = (unsigned long)_orig.nraddr; \ 922 _argvec[1] = (unsigned long)(arg1); \ 923 _argvec[2] = (unsigned long)(arg2); \ 924 _argvec[3] = (unsigned long)(arg3); \ 925 __asm__ volatile( \ 926 VALGRIND_ALIGN_STACK \ 927 "subl $4, %%esp\n\t" \ 928 "pushl 12(%%eax)\n\t" \ 929 "pushl 8(%%eax)\n\t" \ 930 "pushl 4(%%eax)\n\t" \ 931 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 932 VALGRIND_CALL_NOREDIR_EAX \ 933 VALGRIND_RESTORE_STACK \ 934 : /*out*/ "=a" (_res) \ 935 : /*in*/ "a" (&_argvec[0]) \ 936 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 937 ); \ 938 lval = (__typeof__(lval)) _res; \ 939 } while (0) 940 941#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ 942 do { \ 943 volatile OrigFn _orig = (orig); \ 944 volatile unsigned long _argvec[5]; \ 945 volatile unsigned long _res; \ 946 _argvec[0] = (unsigned long)_orig.nraddr; \ 947 _argvec[1] = (unsigned long)(arg1); \ 948 _argvec[2] = (unsigned long)(arg2); \ 949 _argvec[3] = (unsigned long)(arg3); \ 950 _argvec[4] = (unsigned long)(arg4); \ 951 __asm__ volatile( \ 952 VALGRIND_ALIGN_STACK \ 953 "pushl 16(%%eax)\n\t" \ 954 "pushl 12(%%eax)\n\t" \ 955 "pushl 8(%%eax)\n\t" \ 956 "pushl 4(%%eax)\n\t" \ 957 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 958 VALGRIND_CALL_NOREDIR_EAX \ 959 VALGRIND_RESTORE_STACK \ 960 : /*out*/ "=a" (_res) \ 961 : /*in*/ "a" (&_argvec[0]) \ 962 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 963 ); \ 964 lval = (__typeof__(lval)) _res; \ 965 } while (0) 966 967#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ 968 do { \ 969 volatile OrigFn _orig = (orig); \ 970 volatile unsigned long _argvec[6]; \ 971 volatile unsigned long _res; \ 972 _argvec[0] = (unsigned long)_orig.nraddr; \ 973 _argvec[1] = (unsigned long)(arg1); \ 974 _argvec[2] = (unsigned long)(arg2); \ 975 _argvec[3] = (unsigned long)(arg3); \ 976 _argvec[4] = (unsigned long)(arg4); \ 977 _argvec[5] = (unsigned long)(arg5); \ 978 __asm__ volatile( \ 979 VALGRIND_ALIGN_STACK \ 980 "subl $12, %%esp\n\t" \ 981 "pushl 20(%%eax)\n\t" \ 982 "pushl 16(%%eax)\n\t" \ 983 "pushl 12(%%eax)\n\t" \ 984 "pushl 8(%%eax)\n\t" \ 985 "pushl 4(%%eax)\n\t" \ 986 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 987 VALGRIND_CALL_NOREDIR_EAX \ 988 VALGRIND_RESTORE_STACK \ 989 : /*out*/ "=a" (_res) \ 990 : /*in*/ "a" (&_argvec[0]) \ 991 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 992 ); \ 993 lval = (__typeof__(lval)) _res; \ 994 } while (0) 995 996#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ 997 do { \ 998 volatile OrigFn _orig = (orig); \ 999 volatile unsigned long _argvec[7]; \ 1000 volatile unsigned long _res; \ 1001 _argvec[0] = (unsigned long)_orig.nraddr; \ 1002 _argvec[1] = (unsigned long)(arg1); \ 1003 _argvec[2] = (unsigned long)(arg2); \ 1004 _argvec[3] = (unsigned long)(arg3); \ 1005 _argvec[4] = (unsigned long)(arg4); \ 1006 _argvec[5] = (unsigned long)(arg5); \ 1007 _argvec[6] = (unsigned long)(arg6); \ 1008 __asm__ volatile( \ 1009 VALGRIND_ALIGN_STACK \ 1010 "subl $8, %%esp\n\t" \ 1011 "pushl 24(%%eax)\n\t" \ 1012 "pushl 20(%%eax)\n\t" \ 1013 "pushl 16(%%eax)\n\t" \ 1014 "pushl 12(%%eax)\n\t" \ 1015 "pushl 8(%%eax)\n\t" \ 1016 "pushl 4(%%eax)\n\t" \ 1017 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1018 VALGRIND_CALL_NOREDIR_EAX \ 1019 VALGRIND_RESTORE_STACK \ 1020 : /*out*/ "=a" (_res) \ 1021 : /*in*/ "a" (&_argvec[0]) \ 1022 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1023 ); \ 1024 lval = (__typeof__(lval)) _res; \ 1025 } while (0) 1026 1027#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1028 arg7) \ 1029 do { \ 1030 volatile OrigFn _orig = (orig); \ 1031 volatile unsigned long _argvec[8]; \ 1032 volatile unsigned long _res; \ 1033 _argvec[0] = (unsigned long)_orig.nraddr; \ 1034 _argvec[1] = (unsigned long)(arg1); \ 1035 _argvec[2] = (unsigned long)(arg2); \ 1036 _argvec[3] = (unsigned long)(arg3); \ 1037 _argvec[4] = (unsigned long)(arg4); \ 1038 _argvec[5] = (unsigned long)(arg5); \ 1039 _argvec[6] = (unsigned long)(arg6); \ 1040 _argvec[7] = (unsigned long)(arg7); \ 1041 __asm__ volatile( \ 1042 VALGRIND_ALIGN_STACK \ 1043 "subl $4, %%esp\n\t" \ 1044 "pushl 28(%%eax)\n\t" \ 1045 "pushl 24(%%eax)\n\t" \ 1046 "pushl 20(%%eax)\n\t" \ 1047 "pushl 16(%%eax)\n\t" \ 1048 "pushl 12(%%eax)\n\t" \ 1049 "pushl 8(%%eax)\n\t" \ 1050 "pushl 4(%%eax)\n\t" \ 1051 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1052 VALGRIND_CALL_NOREDIR_EAX \ 1053 VALGRIND_RESTORE_STACK \ 1054 : /*out*/ "=a" (_res) \ 1055 : /*in*/ "a" (&_argvec[0]) \ 1056 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1057 ); \ 1058 lval = (__typeof__(lval)) _res; \ 1059 } while (0) 1060 1061#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1062 arg7,arg8) \ 1063 do { \ 1064 volatile OrigFn _orig = (orig); \ 1065 volatile unsigned long _argvec[9]; \ 1066 volatile unsigned long _res; \ 1067 _argvec[0] = (unsigned long)_orig.nraddr; \ 1068 _argvec[1] = (unsigned long)(arg1); \ 1069 _argvec[2] = (unsigned long)(arg2); \ 1070 _argvec[3] = (unsigned long)(arg3); \ 1071 _argvec[4] = (unsigned long)(arg4); \ 1072 _argvec[5] = (unsigned long)(arg5); \ 1073 _argvec[6] = (unsigned long)(arg6); \ 1074 _argvec[7] = (unsigned long)(arg7); \ 1075 _argvec[8] = (unsigned long)(arg8); \ 1076 __asm__ volatile( \ 1077 VALGRIND_ALIGN_STACK \ 1078 "pushl 32(%%eax)\n\t" \ 1079 "pushl 28(%%eax)\n\t" \ 1080 "pushl 24(%%eax)\n\t" \ 1081 "pushl 20(%%eax)\n\t" \ 1082 "pushl 16(%%eax)\n\t" \ 1083 "pushl 12(%%eax)\n\t" \ 1084 "pushl 8(%%eax)\n\t" \ 1085 "pushl 4(%%eax)\n\t" \ 1086 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1087 VALGRIND_CALL_NOREDIR_EAX \ 1088 VALGRIND_RESTORE_STACK \ 1089 : /*out*/ "=a" (_res) \ 1090 : /*in*/ "a" (&_argvec[0]) \ 1091 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1092 ); \ 1093 lval = (__typeof__(lval)) _res; \ 1094 } while (0) 1095 1096#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1097 arg7,arg8,arg9) \ 1098 do { \ 1099 volatile OrigFn _orig = (orig); \ 1100 volatile unsigned long _argvec[10]; \ 1101 volatile unsigned long _res; \ 1102 _argvec[0] = (unsigned long)_orig.nraddr; \ 1103 _argvec[1] = (unsigned long)(arg1); \ 1104 _argvec[2] = (unsigned long)(arg2); \ 1105 _argvec[3] = (unsigned long)(arg3); \ 1106 _argvec[4] = (unsigned long)(arg4); \ 1107 _argvec[5] = (unsigned long)(arg5); \ 1108 _argvec[6] = (unsigned long)(arg6); \ 1109 _argvec[7] = (unsigned long)(arg7); \ 1110 _argvec[8] = (unsigned long)(arg8); \ 1111 _argvec[9] = (unsigned long)(arg9); \ 1112 __asm__ volatile( \ 1113 VALGRIND_ALIGN_STACK \ 1114 "subl $12, %%esp\n\t" \ 1115 "pushl 36(%%eax)\n\t" \ 1116 "pushl 32(%%eax)\n\t" \ 1117 "pushl 28(%%eax)\n\t" \ 1118 "pushl 24(%%eax)\n\t" \ 1119 "pushl 20(%%eax)\n\t" \ 1120 "pushl 16(%%eax)\n\t" \ 1121 "pushl 12(%%eax)\n\t" \ 1122 "pushl 8(%%eax)\n\t" \ 1123 "pushl 4(%%eax)\n\t" \ 1124 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1125 VALGRIND_CALL_NOREDIR_EAX \ 1126 VALGRIND_RESTORE_STACK \ 1127 : /*out*/ "=a" (_res) \ 1128 : /*in*/ "a" (&_argvec[0]) \ 1129 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1130 ); \ 1131 lval = (__typeof__(lval)) _res; \ 1132 } while (0) 1133 1134#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1135 arg7,arg8,arg9,arg10) \ 1136 do { \ 1137 volatile OrigFn _orig = (orig); \ 1138 volatile unsigned long _argvec[11]; \ 1139 volatile unsigned long _res; \ 1140 _argvec[0] = (unsigned long)_orig.nraddr; \ 1141 _argvec[1] = (unsigned long)(arg1); \ 1142 _argvec[2] = (unsigned long)(arg2); \ 1143 _argvec[3] = (unsigned long)(arg3); \ 1144 _argvec[4] = (unsigned long)(arg4); \ 1145 _argvec[5] = (unsigned long)(arg5); \ 1146 _argvec[6] = (unsigned long)(arg6); \ 1147 _argvec[7] = (unsigned long)(arg7); \ 1148 _argvec[8] = (unsigned long)(arg8); \ 1149 _argvec[9] = (unsigned long)(arg9); \ 1150 _argvec[10] = (unsigned long)(arg10); \ 1151 __asm__ volatile( \ 1152 VALGRIND_ALIGN_STACK \ 1153 "subl $8, %%esp\n\t" \ 1154 "pushl 40(%%eax)\n\t" \ 1155 "pushl 36(%%eax)\n\t" \ 1156 "pushl 32(%%eax)\n\t" \ 1157 "pushl 28(%%eax)\n\t" \ 1158 "pushl 24(%%eax)\n\t" \ 1159 "pushl 20(%%eax)\n\t" \ 1160 "pushl 16(%%eax)\n\t" \ 1161 "pushl 12(%%eax)\n\t" \ 1162 "pushl 8(%%eax)\n\t" \ 1163 "pushl 4(%%eax)\n\t" \ 1164 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1165 VALGRIND_CALL_NOREDIR_EAX \ 1166 VALGRIND_RESTORE_STACK \ 1167 : /*out*/ "=a" (_res) \ 1168 : /*in*/ "a" (&_argvec[0]) \ 1169 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1170 ); \ 1171 lval = (__typeof__(lval)) _res; \ 1172 } while (0) 1173 1174#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ 1175 arg6,arg7,arg8,arg9,arg10, \ 1176 arg11) \ 1177 do { \ 1178 volatile OrigFn _orig = (orig); \ 1179 volatile unsigned long _argvec[12]; \ 1180 volatile unsigned long _res; \ 1181 _argvec[0] = (unsigned long)_orig.nraddr; \ 1182 _argvec[1] = (unsigned long)(arg1); \ 1183 _argvec[2] = (unsigned long)(arg2); \ 1184 _argvec[3] = (unsigned long)(arg3); \ 1185 _argvec[4] = (unsigned long)(arg4); \ 1186 _argvec[5] = (unsigned long)(arg5); \ 1187 _argvec[6] = (unsigned long)(arg6); \ 1188 _argvec[7] = (unsigned long)(arg7); \ 1189 _argvec[8] = (unsigned long)(arg8); \ 1190 _argvec[9] = (unsigned long)(arg9); \ 1191 _argvec[10] = (unsigned long)(arg10); \ 1192 _argvec[11] = (unsigned long)(arg11); \ 1193 __asm__ volatile( \ 1194 VALGRIND_ALIGN_STACK \ 1195 "subl $4, %%esp\n\t" \ 1196 "pushl 44(%%eax)\n\t" \ 1197 "pushl 40(%%eax)\n\t" \ 1198 "pushl 36(%%eax)\n\t" \ 1199 "pushl 32(%%eax)\n\t" \ 1200 "pushl 28(%%eax)\n\t" \ 1201 "pushl 24(%%eax)\n\t" \ 1202 "pushl 20(%%eax)\n\t" \ 1203 "pushl 16(%%eax)\n\t" \ 1204 "pushl 12(%%eax)\n\t" \ 1205 "pushl 8(%%eax)\n\t" \ 1206 "pushl 4(%%eax)\n\t" \ 1207 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1208 VALGRIND_CALL_NOREDIR_EAX \ 1209 VALGRIND_RESTORE_STACK \ 1210 : /*out*/ "=a" (_res) \ 1211 : /*in*/ "a" (&_argvec[0]) \ 1212 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1213 ); \ 1214 lval = (__typeof__(lval)) _res; \ 1215 } while (0) 1216 1217#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ 1218 arg6,arg7,arg8,arg9,arg10, \ 1219 arg11,arg12) \ 1220 do { \ 1221 volatile OrigFn _orig = (orig); \ 1222 volatile unsigned long _argvec[13]; \ 1223 volatile unsigned long _res; \ 1224 _argvec[0] = (unsigned long)_orig.nraddr; \ 1225 _argvec[1] = (unsigned long)(arg1); \ 1226 _argvec[2] = (unsigned long)(arg2); \ 1227 _argvec[3] = (unsigned long)(arg3); \ 1228 _argvec[4] = (unsigned long)(arg4); \ 1229 _argvec[5] = (unsigned long)(arg5); \ 1230 _argvec[6] = (unsigned long)(arg6); \ 1231 _argvec[7] = (unsigned long)(arg7); \ 1232 _argvec[8] = (unsigned long)(arg8); \ 1233 _argvec[9] = (unsigned long)(arg9); \ 1234 _argvec[10] = (unsigned long)(arg10); \ 1235 _argvec[11] = (unsigned long)(arg11); \ 1236 _argvec[12] = (unsigned long)(arg12); \ 1237 __asm__ volatile( \ 1238 VALGRIND_ALIGN_STACK \ 1239 "pushl 48(%%eax)\n\t" \ 1240 "pushl 44(%%eax)\n\t" \ 1241 "pushl 40(%%eax)\n\t" \ 1242 "pushl 36(%%eax)\n\t" \ 1243 "pushl 32(%%eax)\n\t" \ 1244 "pushl 28(%%eax)\n\t" \ 1245 "pushl 24(%%eax)\n\t" \ 1246 "pushl 20(%%eax)\n\t" \ 1247 "pushl 16(%%eax)\n\t" \ 1248 "pushl 12(%%eax)\n\t" \ 1249 "pushl 8(%%eax)\n\t" \ 1250 "pushl 4(%%eax)\n\t" \ 1251 "movl (%%eax), %%eax\n\t" /* target->%eax */ \ 1252 VALGRIND_CALL_NOREDIR_EAX \ 1253 VALGRIND_RESTORE_STACK \ 1254 : /*out*/ "=a" (_res) \ 1255 : /*in*/ "a" (&_argvec[0]) \ 1256 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ 1257 ); \ 1258 lval = (__typeof__(lval)) _res; \ 1259 } while (0) 1260 1261#endif /* PLAT_x86_linux || PLAT_x86_darwin */ 1262 1263/* ------------------------ amd64-{linux,darwin} --------------- */ 1264 1265#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) 1266 1267/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ 1268 1269/* These regs are trashed by the hidden call. */ 1270#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ 1271 "rdi", "r8", "r9", "r10", "r11" 1272 1273/* This is all pretty complex. It's so as to make stack unwinding 1274 work reliably. See bug 243270. The basic problem is the sub and 1275 add of 128 of %rsp in all of the following macros. If gcc believes 1276 the CFA is in %rsp, then unwinding may fail, because what's at the 1277 CFA is not what gcc "expected" when it constructs the CFIs for the 1278 places where the macros are instantiated. 1279 1280 But we can't just add a CFI annotation to increase the CFA offset 1281 by 128, to match the sub of 128 from %rsp, because we don't know 1282 whether gcc has chosen %rsp as the CFA at that point, or whether it 1283 has chosen some other register (eg, %rbp). In the latter case, 1284 adding a CFI annotation to change the CFA offset is simply wrong. 1285 1286 So the solution is to get hold of the CFA using 1287 __builtin_dwarf_cfa(), put it in a known register, and add a 1288 CFI annotation to say what the register is. We choose %rbp for 1289 this (perhaps perversely), because: 1290 1291 (1) %rbp is already subject to unwinding. If a new register was 1292 chosen then the unwinder would have to unwind it in all stack 1293 traces, which is expensive, and 1294 1295 (2) %rbp is already subject to precise exception updates in the 1296 JIT. If a new register was chosen, we'd have to have precise 1297 exceptions for it too, which reduces performance of the 1298 generated code. 1299 1300 However .. one extra complication. We can't just whack the result 1301 of __builtin_dwarf_cfa() into %rbp and then add %rbp to the 1302 list of trashed registers at the end of the inline assembly 1303 fragments; gcc won't allow %rbp to appear in that list. Hence 1304 instead we need to stash %rbp in %r15 for the duration of the asm, 1305 and say that %r15 is trashed instead. gcc seems happy to go with 1306 that. 1307 1308 Oh .. and this all needs to be conditionalised so that it is 1309 unchanged from before this commit, when compiled with older gccs 1310 that don't support __builtin_dwarf_cfa. Furthermore, since 1311 this header file is freestanding, it has to be independent of 1312 config.h, and so the following conditionalisation cannot depend on 1313 configure time checks. 1314 1315 Although it's not clear from 1316 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', 1317 this expression excludes Darwin. 1318 .cfi directives in Darwin assembly appear to be completely 1319 different and I haven't investigated how they work. 1320 1321 For even more entertainment value, note we have to use the 1322 completely undocumented __builtin_dwarf_cfa(), which appears to 1323 really compute the CFA, whereas __builtin_frame_address(0) claims 1324 to but actually doesn't. See 1325 https://bugs.kde.org/show_bug.cgi?id=243270#c47 1326*/ 1327#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) 1328# define __FRAME_POINTER \ 1329 ,"r"(__builtin_dwarf_cfa()) 1330# define VALGRIND_CFI_PROLOGUE \ 1331 "movq %%rbp, %%r15\n\t" \ 1332 "movq %2, %%rbp\n\t" \ 1333 ".cfi_remember_state\n\t" \ 1334 ".cfi_def_cfa rbp, 0\n\t" 1335# define VALGRIND_CFI_EPILOGUE \ 1336 "movq %%r15, %%rbp\n\t" \ 1337 ".cfi_restore_state\n\t" 1338#else 1339# define __FRAME_POINTER 1340# define VALGRIND_CFI_PROLOGUE 1341# define VALGRIND_CFI_EPILOGUE 1342#endif 1343 1344/* Macros to save and align the stack before making a function 1345 call and restore it afterwards as gcc may not keep the stack 1346 pointer aligned if it doesn't realise calls are being made 1347 to other functions. */ 1348 1349#define VALGRIND_ALIGN_STACK \ 1350 "movq %%rsp,%%r14\n\t" \ 1351 "andq $0xfffffffffffffff0,%%rsp\n\t" 1352#define VALGRIND_RESTORE_STACK \ 1353 "movq %%r14,%%rsp\n\t" 1354 1355/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned 1356 long) == 8. */ 1357 1358/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ 1359 macros. In order not to trash the stack redzone, we need to drop 1360 %rsp by 128 before the hidden call, and restore afterwards. The 1361 nastyness is that it is only by luck that the stack still appears 1362 to be unwindable during the hidden call - since then the behaviour 1363 of any routine using this macro does not match what the CFI data 1364 says. Sigh. 1365 1366 Why is this important? Imagine that a wrapper has a stack 1367 allocated local, and passes to the hidden call, a pointer to it. 1368 Because gcc does not know about the hidden call, it may allocate 1369 that local in the redzone. Unfortunately the hidden call may then 1370 trash it before it comes to use it. So we must step clear of the 1371 redzone, for the duration of the hidden call, to make it safe. 1372 1373 Probably the same problem afflicts the other redzone-style ABIs too 1374 (ppc64-linux); but for those, the stack is 1375 self describing (none of this CFI nonsense) so at least messing 1376 with the stack pointer doesn't give a danger of non-unwindable 1377 stack. */ 1378 1379#define CALL_FN_W_v(lval, orig) \ 1380 do { \ 1381 volatile OrigFn _orig = (orig); \ 1382 volatile unsigned long _argvec[1]; \ 1383 volatile unsigned long _res; \ 1384 _argvec[0] = (unsigned long)_orig.nraddr; \ 1385 __asm__ volatile( \ 1386 VALGRIND_CFI_PROLOGUE \ 1387 VALGRIND_ALIGN_STACK \ 1388 "subq $128,%%rsp\n\t" \ 1389 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1390 VALGRIND_CALL_NOREDIR_RAX \ 1391 VALGRIND_RESTORE_STACK \ 1392 VALGRIND_CFI_EPILOGUE \ 1393 : /*out*/ "=a" (_res) \ 1394 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1395 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1396 ); \ 1397 lval = (__typeof__(lval)) _res; \ 1398 } while (0) 1399 1400#define CALL_FN_W_W(lval, orig, arg1) \ 1401 do { \ 1402 volatile OrigFn _orig = (orig); \ 1403 volatile unsigned long _argvec[2]; \ 1404 volatile unsigned long _res; \ 1405 _argvec[0] = (unsigned long)_orig.nraddr; \ 1406 _argvec[1] = (unsigned long)(arg1); \ 1407 __asm__ volatile( \ 1408 VALGRIND_CFI_PROLOGUE \ 1409 VALGRIND_ALIGN_STACK \ 1410 "subq $128,%%rsp\n\t" \ 1411 "movq 8(%%rax), %%rdi\n\t" \ 1412 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1413 VALGRIND_CALL_NOREDIR_RAX \ 1414 VALGRIND_RESTORE_STACK \ 1415 VALGRIND_CFI_EPILOGUE \ 1416 : /*out*/ "=a" (_res) \ 1417 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1418 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1419 ); \ 1420 lval = (__typeof__(lval)) _res; \ 1421 } while (0) 1422 1423#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ 1424 do { \ 1425 volatile OrigFn _orig = (orig); \ 1426 volatile unsigned long _argvec[3]; \ 1427 volatile unsigned long _res; \ 1428 _argvec[0] = (unsigned long)_orig.nraddr; \ 1429 _argvec[1] = (unsigned long)(arg1); \ 1430 _argvec[2] = (unsigned long)(arg2); \ 1431 __asm__ volatile( \ 1432 VALGRIND_CFI_PROLOGUE \ 1433 VALGRIND_ALIGN_STACK \ 1434 "subq $128,%%rsp\n\t" \ 1435 "movq 16(%%rax), %%rsi\n\t" \ 1436 "movq 8(%%rax), %%rdi\n\t" \ 1437 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1438 VALGRIND_CALL_NOREDIR_RAX \ 1439 VALGRIND_RESTORE_STACK \ 1440 VALGRIND_CFI_EPILOGUE \ 1441 : /*out*/ "=a" (_res) \ 1442 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1443 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1444 ); \ 1445 lval = (__typeof__(lval)) _res; \ 1446 } while (0) 1447 1448#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ 1449 do { \ 1450 volatile OrigFn _orig = (orig); \ 1451 volatile unsigned long _argvec[4]; \ 1452 volatile unsigned long _res; \ 1453 _argvec[0] = (unsigned long)_orig.nraddr; \ 1454 _argvec[1] = (unsigned long)(arg1); \ 1455 _argvec[2] = (unsigned long)(arg2); \ 1456 _argvec[3] = (unsigned long)(arg3); \ 1457 __asm__ volatile( \ 1458 VALGRIND_CFI_PROLOGUE \ 1459 VALGRIND_ALIGN_STACK \ 1460 "subq $128,%%rsp\n\t" \ 1461 "movq 24(%%rax), %%rdx\n\t" \ 1462 "movq 16(%%rax), %%rsi\n\t" \ 1463 "movq 8(%%rax), %%rdi\n\t" \ 1464 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1465 VALGRIND_CALL_NOREDIR_RAX \ 1466 VALGRIND_RESTORE_STACK \ 1467 VALGRIND_CFI_EPILOGUE \ 1468 : /*out*/ "=a" (_res) \ 1469 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1470 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1471 ); \ 1472 lval = (__typeof__(lval)) _res; \ 1473 } while (0) 1474 1475#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ 1476 do { \ 1477 volatile OrigFn _orig = (orig); \ 1478 volatile unsigned long _argvec[5]; \ 1479 volatile unsigned long _res; \ 1480 _argvec[0] = (unsigned long)_orig.nraddr; \ 1481 _argvec[1] = (unsigned long)(arg1); \ 1482 _argvec[2] = (unsigned long)(arg2); \ 1483 _argvec[3] = (unsigned long)(arg3); \ 1484 _argvec[4] = (unsigned long)(arg4); \ 1485 __asm__ volatile( \ 1486 VALGRIND_CFI_PROLOGUE \ 1487 VALGRIND_ALIGN_STACK \ 1488 "subq $128,%%rsp\n\t" \ 1489 "movq 32(%%rax), %%rcx\n\t" \ 1490 "movq 24(%%rax), %%rdx\n\t" \ 1491 "movq 16(%%rax), %%rsi\n\t" \ 1492 "movq 8(%%rax), %%rdi\n\t" \ 1493 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1494 VALGRIND_CALL_NOREDIR_RAX \ 1495 VALGRIND_RESTORE_STACK \ 1496 VALGRIND_CFI_EPILOGUE \ 1497 : /*out*/ "=a" (_res) \ 1498 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1499 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1500 ); \ 1501 lval = (__typeof__(lval)) _res; \ 1502 } while (0) 1503 1504#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ 1505 do { \ 1506 volatile OrigFn _orig = (orig); \ 1507 volatile unsigned long _argvec[6]; \ 1508 volatile unsigned long _res; \ 1509 _argvec[0] = (unsigned long)_orig.nraddr; \ 1510 _argvec[1] = (unsigned long)(arg1); \ 1511 _argvec[2] = (unsigned long)(arg2); \ 1512 _argvec[3] = (unsigned long)(arg3); \ 1513 _argvec[4] = (unsigned long)(arg4); \ 1514 _argvec[5] = (unsigned long)(arg5); \ 1515 __asm__ volatile( \ 1516 VALGRIND_CFI_PROLOGUE \ 1517 VALGRIND_ALIGN_STACK \ 1518 "subq $128,%%rsp\n\t" \ 1519 "movq 40(%%rax), %%r8\n\t" \ 1520 "movq 32(%%rax), %%rcx\n\t" \ 1521 "movq 24(%%rax), %%rdx\n\t" \ 1522 "movq 16(%%rax), %%rsi\n\t" \ 1523 "movq 8(%%rax), %%rdi\n\t" \ 1524 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1525 VALGRIND_CALL_NOREDIR_RAX \ 1526 VALGRIND_RESTORE_STACK \ 1527 VALGRIND_CFI_EPILOGUE \ 1528 : /*out*/ "=a" (_res) \ 1529 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1530 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1531 ); \ 1532 lval = (__typeof__(lval)) _res; \ 1533 } while (0) 1534 1535#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ 1536 do { \ 1537 volatile OrigFn _orig = (orig); \ 1538 volatile unsigned long _argvec[7]; \ 1539 volatile unsigned long _res; \ 1540 _argvec[0] = (unsigned long)_orig.nraddr; \ 1541 _argvec[1] = (unsigned long)(arg1); \ 1542 _argvec[2] = (unsigned long)(arg2); \ 1543 _argvec[3] = (unsigned long)(arg3); \ 1544 _argvec[4] = (unsigned long)(arg4); \ 1545 _argvec[5] = (unsigned long)(arg5); \ 1546 _argvec[6] = (unsigned long)(arg6); \ 1547 __asm__ volatile( \ 1548 VALGRIND_CFI_PROLOGUE \ 1549 VALGRIND_ALIGN_STACK \ 1550 "subq $128,%%rsp\n\t" \ 1551 "movq 48(%%rax), %%r9\n\t" \ 1552 "movq 40(%%rax), %%r8\n\t" \ 1553 "movq 32(%%rax), %%rcx\n\t" \ 1554 "movq 24(%%rax), %%rdx\n\t" \ 1555 "movq 16(%%rax), %%rsi\n\t" \ 1556 "movq 8(%%rax), %%rdi\n\t" \ 1557 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1558 VALGRIND_CALL_NOREDIR_RAX \ 1559 VALGRIND_RESTORE_STACK \ 1560 VALGRIND_CFI_EPILOGUE \ 1561 : /*out*/ "=a" (_res) \ 1562 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1563 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1564 ); \ 1565 lval = (__typeof__(lval)) _res; \ 1566 } while (0) 1567 1568#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1569 arg7) \ 1570 do { \ 1571 volatile OrigFn _orig = (orig); \ 1572 volatile unsigned long _argvec[8]; \ 1573 volatile unsigned long _res; \ 1574 _argvec[0] = (unsigned long)_orig.nraddr; \ 1575 _argvec[1] = (unsigned long)(arg1); \ 1576 _argvec[2] = (unsigned long)(arg2); \ 1577 _argvec[3] = (unsigned long)(arg3); \ 1578 _argvec[4] = (unsigned long)(arg4); \ 1579 _argvec[5] = (unsigned long)(arg5); \ 1580 _argvec[6] = (unsigned long)(arg6); \ 1581 _argvec[7] = (unsigned long)(arg7); \ 1582 __asm__ volatile( \ 1583 VALGRIND_CFI_PROLOGUE \ 1584 VALGRIND_ALIGN_STACK \ 1585 "subq $136,%%rsp\n\t" \ 1586 "pushq 56(%%rax)\n\t" \ 1587 "movq 48(%%rax), %%r9\n\t" \ 1588 "movq 40(%%rax), %%r8\n\t" \ 1589 "movq 32(%%rax), %%rcx\n\t" \ 1590 "movq 24(%%rax), %%rdx\n\t" \ 1591 "movq 16(%%rax), %%rsi\n\t" \ 1592 "movq 8(%%rax), %%rdi\n\t" \ 1593 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1594 VALGRIND_CALL_NOREDIR_RAX \ 1595 VALGRIND_RESTORE_STACK \ 1596 VALGRIND_CFI_EPILOGUE \ 1597 : /*out*/ "=a" (_res) \ 1598 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1599 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1600 ); \ 1601 lval = (__typeof__(lval)) _res; \ 1602 } while (0) 1603 1604#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1605 arg7,arg8) \ 1606 do { \ 1607 volatile OrigFn _orig = (orig); \ 1608 volatile unsigned long _argvec[9]; \ 1609 volatile unsigned long _res; \ 1610 _argvec[0] = (unsigned long)_orig.nraddr; \ 1611 _argvec[1] = (unsigned long)(arg1); \ 1612 _argvec[2] = (unsigned long)(arg2); \ 1613 _argvec[3] = (unsigned long)(arg3); \ 1614 _argvec[4] = (unsigned long)(arg4); \ 1615 _argvec[5] = (unsigned long)(arg5); \ 1616 _argvec[6] = (unsigned long)(arg6); \ 1617 _argvec[7] = (unsigned long)(arg7); \ 1618 _argvec[8] = (unsigned long)(arg8); \ 1619 __asm__ volatile( \ 1620 VALGRIND_CFI_PROLOGUE \ 1621 VALGRIND_ALIGN_STACK \ 1622 "subq $128,%%rsp\n\t" \ 1623 "pushq 64(%%rax)\n\t" \ 1624 "pushq 56(%%rax)\n\t" \ 1625 "movq 48(%%rax), %%r9\n\t" \ 1626 "movq 40(%%rax), %%r8\n\t" \ 1627 "movq 32(%%rax), %%rcx\n\t" \ 1628 "movq 24(%%rax), %%rdx\n\t" \ 1629 "movq 16(%%rax), %%rsi\n\t" \ 1630 "movq 8(%%rax), %%rdi\n\t" \ 1631 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1632 VALGRIND_CALL_NOREDIR_RAX \ 1633 VALGRIND_RESTORE_STACK \ 1634 VALGRIND_CFI_EPILOGUE \ 1635 : /*out*/ "=a" (_res) \ 1636 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1637 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1638 ); \ 1639 lval = (__typeof__(lval)) _res; \ 1640 } while (0) 1641 1642#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1643 arg7,arg8,arg9) \ 1644 do { \ 1645 volatile OrigFn _orig = (orig); \ 1646 volatile unsigned long _argvec[10]; \ 1647 volatile unsigned long _res; \ 1648 _argvec[0] = (unsigned long)_orig.nraddr; \ 1649 _argvec[1] = (unsigned long)(arg1); \ 1650 _argvec[2] = (unsigned long)(arg2); \ 1651 _argvec[3] = (unsigned long)(arg3); \ 1652 _argvec[4] = (unsigned long)(arg4); \ 1653 _argvec[5] = (unsigned long)(arg5); \ 1654 _argvec[6] = (unsigned long)(arg6); \ 1655 _argvec[7] = (unsigned long)(arg7); \ 1656 _argvec[8] = (unsigned long)(arg8); \ 1657 _argvec[9] = (unsigned long)(arg9); \ 1658 __asm__ volatile( \ 1659 VALGRIND_CFI_PROLOGUE \ 1660 VALGRIND_ALIGN_STACK \ 1661 "subq $136,%%rsp\n\t" \ 1662 "pushq 72(%%rax)\n\t" \ 1663 "pushq 64(%%rax)\n\t" \ 1664 "pushq 56(%%rax)\n\t" \ 1665 "movq 48(%%rax), %%r9\n\t" \ 1666 "movq 40(%%rax), %%r8\n\t" \ 1667 "movq 32(%%rax), %%rcx\n\t" \ 1668 "movq 24(%%rax), %%rdx\n\t" \ 1669 "movq 16(%%rax), %%rsi\n\t" \ 1670 "movq 8(%%rax), %%rdi\n\t" \ 1671 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1672 VALGRIND_CALL_NOREDIR_RAX \ 1673 VALGRIND_RESTORE_STACK \ 1674 VALGRIND_CFI_EPILOGUE \ 1675 : /*out*/ "=a" (_res) \ 1676 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1677 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1678 ); \ 1679 lval = (__typeof__(lval)) _res; \ 1680 } while (0) 1681 1682#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1683 arg7,arg8,arg9,arg10) \ 1684 do { \ 1685 volatile OrigFn _orig = (orig); \ 1686 volatile unsigned long _argvec[11]; \ 1687 volatile unsigned long _res; \ 1688 _argvec[0] = (unsigned long)_orig.nraddr; \ 1689 _argvec[1] = (unsigned long)(arg1); \ 1690 _argvec[2] = (unsigned long)(arg2); \ 1691 _argvec[3] = (unsigned long)(arg3); \ 1692 _argvec[4] = (unsigned long)(arg4); \ 1693 _argvec[5] = (unsigned long)(arg5); \ 1694 _argvec[6] = (unsigned long)(arg6); \ 1695 _argvec[7] = (unsigned long)(arg7); \ 1696 _argvec[8] = (unsigned long)(arg8); \ 1697 _argvec[9] = (unsigned long)(arg9); \ 1698 _argvec[10] = (unsigned long)(arg10); \ 1699 __asm__ volatile( \ 1700 VALGRIND_CFI_PROLOGUE \ 1701 VALGRIND_ALIGN_STACK \ 1702 "subq $128,%%rsp\n\t" \ 1703 "pushq 80(%%rax)\n\t" \ 1704 "pushq 72(%%rax)\n\t" \ 1705 "pushq 64(%%rax)\n\t" \ 1706 "pushq 56(%%rax)\n\t" \ 1707 "movq 48(%%rax), %%r9\n\t" \ 1708 "movq 40(%%rax), %%r8\n\t" \ 1709 "movq 32(%%rax), %%rcx\n\t" \ 1710 "movq 24(%%rax), %%rdx\n\t" \ 1711 "movq 16(%%rax), %%rsi\n\t" \ 1712 "movq 8(%%rax), %%rdi\n\t" \ 1713 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1714 VALGRIND_CALL_NOREDIR_RAX \ 1715 VALGRIND_RESTORE_STACK \ 1716 VALGRIND_CFI_EPILOGUE \ 1717 : /*out*/ "=a" (_res) \ 1718 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1719 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1720 ); \ 1721 lval = (__typeof__(lval)) _res; \ 1722 } while (0) 1723 1724#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1725 arg7,arg8,arg9,arg10,arg11) \ 1726 do { \ 1727 volatile OrigFn _orig = (orig); \ 1728 volatile unsigned long _argvec[12]; \ 1729 volatile unsigned long _res; \ 1730 _argvec[0] = (unsigned long)_orig.nraddr; \ 1731 _argvec[1] = (unsigned long)(arg1); \ 1732 _argvec[2] = (unsigned long)(arg2); \ 1733 _argvec[3] = (unsigned long)(arg3); \ 1734 _argvec[4] = (unsigned long)(arg4); \ 1735 _argvec[5] = (unsigned long)(arg5); \ 1736 _argvec[6] = (unsigned long)(arg6); \ 1737 _argvec[7] = (unsigned long)(arg7); \ 1738 _argvec[8] = (unsigned long)(arg8); \ 1739 _argvec[9] = (unsigned long)(arg9); \ 1740 _argvec[10] = (unsigned long)(arg10); \ 1741 _argvec[11] = (unsigned long)(arg11); \ 1742 __asm__ volatile( \ 1743 VALGRIND_CFI_PROLOGUE \ 1744 VALGRIND_ALIGN_STACK \ 1745 "subq $136,%%rsp\n\t" \ 1746 "pushq 88(%%rax)\n\t" \ 1747 "pushq 80(%%rax)\n\t" \ 1748 "pushq 72(%%rax)\n\t" \ 1749 "pushq 64(%%rax)\n\t" \ 1750 "pushq 56(%%rax)\n\t" \ 1751 "movq 48(%%rax), %%r9\n\t" \ 1752 "movq 40(%%rax), %%r8\n\t" \ 1753 "movq 32(%%rax), %%rcx\n\t" \ 1754 "movq 24(%%rax), %%rdx\n\t" \ 1755 "movq 16(%%rax), %%rsi\n\t" \ 1756 "movq 8(%%rax), %%rdi\n\t" \ 1757 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1758 VALGRIND_CALL_NOREDIR_RAX \ 1759 VALGRIND_RESTORE_STACK \ 1760 VALGRIND_CFI_EPILOGUE \ 1761 : /*out*/ "=a" (_res) \ 1762 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1763 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1764 ); \ 1765 lval = (__typeof__(lval)) _res; \ 1766 } while (0) 1767 1768#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 1769 arg7,arg8,arg9,arg10,arg11,arg12) \ 1770 do { \ 1771 volatile OrigFn _orig = (orig); \ 1772 volatile unsigned long _argvec[13]; \ 1773 volatile unsigned long _res; \ 1774 _argvec[0] = (unsigned long)_orig.nraddr; \ 1775 _argvec[1] = (unsigned long)(arg1); \ 1776 _argvec[2] = (unsigned long)(arg2); \ 1777 _argvec[3] = (unsigned long)(arg3); \ 1778 _argvec[4] = (unsigned long)(arg4); \ 1779 _argvec[5] = (unsigned long)(arg5); \ 1780 _argvec[6] = (unsigned long)(arg6); \ 1781 _argvec[7] = (unsigned long)(arg7); \ 1782 _argvec[8] = (unsigned long)(arg8); \ 1783 _argvec[9] = (unsigned long)(arg9); \ 1784 _argvec[10] = (unsigned long)(arg10); \ 1785 _argvec[11] = (unsigned long)(arg11); \ 1786 _argvec[12] = (unsigned long)(arg12); \ 1787 __asm__ volatile( \ 1788 VALGRIND_CFI_PROLOGUE \ 1789 VALGRIND_ALIGN_STACK \ 1790 "subq $128,%%rsp\n\t" \ 1791 "pushq 96(%%rax)\n\t" \ 1792 "pushq 88(%%rax)\n\t" \ 1793 "pushq 80(%%rax)\n\t" \ 1794 "pushq 72(%%rax)\n\t" \ 1795 "pushq 64(%%rax)\n\t" \ 1796 "pushq 56(%%rax)\n\t" \ 1797 "movq 48(%%rax), %%r9\n\t" \ 1798 "movq 40(%%rax), %%r8\n\t" \ 1799 "movq 32(%%rax), %%rcx\n\t" \ 1800 "movq 24(%%rax), %%rdx\n\t" \ 1801 "movq 16(%%rax), %%rsi\n\t" \ 1802 "movq 8(%%rax), %%rdi\n\t" \ 1803 "movq (%%rax), %%rax\n\t" /* target->%rax */ \ 1804 VALGRIND_CALL_NOREDIR_RAX \ 1805 VALGRIND_RESTORE_STACK \ 1806 VALGRIND_CFI_EPILOGUE \ 1807 : /*out*/ "=a" (_res) \ 1808 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 1809 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ 1810 ); \ 1811 lval = (__typeof__(lval)) _res; \ 1812 } while (0) 1813 1814#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */ 1815 1816/* ------------------------ ppc32-linux ------------------------ */ 1817 1818#if defined(PLAT_ppc32_linux) 1819 1820/* This is useful for finding out about the on-stack stuff: 1821 1822 extern int f9 ( int,int,int,int,int,int,int,int,int ); 1823 extern int f10 ( int,int,int,int,int,int,int,int,int,int ); 1824 extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); 1825 extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); 1826 1827 int g9 ( void ) { 1828 return f9(11,22,33,44,55,66,77,88,99); 1829 } 1830 int g10 ( void ) { 1831 return f10(11,22,33,44,55,66,77,88,99,110); 1832 } 1833 int g11 ( void ) { 1834 return f11(11,22,33,44,55,66,77,88,99,110,121); 1835 } 1836 int g12 ( void ) { 1837 return f12(11,22,33,44,55,66,77,88,99,110,121,132); 1838 } 1839*/ 1840 1841/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ 1842 1843/* These regs are trashed by the hidden call. */ 1844#define __CALLER_SAVED_REGS \ 1845 "lr", "ctr", "xer", \ 1846 "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ 1847 "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ 1848 "r11", "r12", "r13" 1849 1850/* Macros to save and align the stack before making a function 1851 call and restore it afterwards as gcc may not keep the stack 1852 pointer aligned if it doesn't realise calls are being made 1853 to other functions. */ 1854 1855#define VALGRIND_ALIGN_STACK \ 1856 "mr 28,1\n\t" \ 1857 "rlwinm 1,1,0,0,27\n\t" 1858#define VALGRIND_RESTORE_STACK \ 1859 "mr 1,28\n\t" 1860 1861/* These CALL_FN_ macros assume that on ppc32-linux, 1862 sizeof(unsigned long) == 4. */ 1863 1864#define CALL_FN_W_v(lval, orig) \ 1865 do { \ 1866 volatile OrigFn _orig = (orig); \ 1867 volatile unsigned long _argvec[1]; \ 1868 volatile unsigned long _res; \ 1869 _argvec[0] = (unsigned long)_orig.nraddr; \ 1870 __asm__ volatile( \ 1871 VALGRIND_ALIGN_STACK \ 1872 "mr 11,%1\n\t" \ 1873 "lwz 11,0(11)\n\t" /* target->r11 */ \ 1874 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 1875 VALGRIND_RESTORE_STACK \ 1876 "mr %0,3" \ 1877 : /*out*/ "=r" (_res) \ 1878 : /*in*/ "r" (&_argvec[0]) \ 1879 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 1880 ); \ 1881 lval = (__typeof__(lval)) _res; \ 1882 } while (0) 1883 1884#define CALL_FN_W_W(lval, orig, arg1) \ 1885 do { \ 1886 volatile OrigFn _orig = (orig); \ 1887 volatile unsigned long _argvec[2]; \ 1888 volatile unsigned long _res; \ 1889 _argvec[0] = (unsigned long)_orig.nraddr; \ 1890 _argvec[1] = (unsigned long)arg1; \ 1891 __asm__ volatile( \ 1892 VALGRIND_ALIGN_STACK \ 1893 "mr 11,%1\n\t" \ 1894 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 1895 "lwz 11,0(11)\n\t" /* target->r11 */ \ 1896 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 1897 VALGRIND_RESTORE_STACK \ 1898 "mr %0,3" \ 1899 : /*out*/ "=r" (_res) \ 1900 : /*in*/ "r" (&_argvec[0]) \ 1901 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 1902 ); \ 1903 lval = (__typeof__(lval)) _res; \ 1904 } while (0) 1905 1906#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ 1907 do { \ 1908 volatile OrigFn _orig = (orig); \ 1909 volatile unsigned long _argvec[3]; \ 1910 volatile unsigned long _res; \ 1911 _argvec[0] = (unsigned long)_orig.nraddr; \ 1912 _argvec[1] = (unsigned long)arg1; \ 1913 _argvec[2] = (unsigned long)arg2; \ 1914 __asm__ volatile( \ 1915 VALGRIND_ALIGN_STACK \ 1916 "mr 11,%1\n\t" \ 1917 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 1918 "lwz 4,8(11)\n\t" \ 1919 "lwz 11,0(11)\n\t" /* target->r11 */ \ 1920 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 1921 VALGRIND_RESTORE_STACK \ 1922 "mr %0,3" \ 1923 : /*out*/ "=r" (_res) \ 1924 : /*in*/ "r" (&_argvec[0]) \ 1925 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 1926 ); \ 1927 lval = (__typeof__(lval)) _res; \ 1928 } while (0) 1929 1930#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ 1931 do { \ 1932 volatile OrigFn _orig = (orig); \ 1933 volatile unsigned long _argvec[4]; \ 1934 volatile unsigned long _res; \ 1935 _argvec[0] = (unsigned long)_orig.nraddr; \ 1936 _argvec[1] = (unsigned long)arg1; \ 1937 _argvec[2] = (unsigned long)arg2; \ 1938 _argvec[3] = (unsigned long)arg3; \ 1939 __asm__ volatile( \ 1940 VALGRIND_ALIGN_STACK \ 1941 "mr 11,%1\n\t" \ 1942 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 1943 "lwz 4,8(11)\n\t" \ 1944 "lwz 5,12(11)\n\t" \ 1945 "lwz 11,0(11)\n\t" /* target->r11 */ \ 1946 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 1947 VALGRIND_RESTORE_STACK \ 1948 "mr %0,3" \ 1949 : /*out*/ "=r" (_res) \ 1950 : /*in*/ "r" (&_argvec[0]) \ 1951 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 1952 ); \ 1953 lval = (__typeof__(lval)) _res; \ 1954 } while (0) 1955 1956#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ 1957 do { \ 1958 volatile OrigFn _orig = (orig); \ 1959 volatile unsigned long _argvec[5]; \ 1960 volatile unsigned long _res; \ 1961 _argvec[0] = (unsigned long)_orig.nraddr; \ 1962 _argvec[1] = (unsigned long)arg1; \ 1963 _argvec[2] = (unsigned long)arg2; \ 1964 _argvec[3] = (unsigned long)arg3; \ 1965 _argvec[4] = (unsigned long)arg4; \ 1966 __asm__ volatile( \ 1967 VALGRIND_ALIGN_STACK \ 1968 "mr 11,%1\n\t" \ 1969 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 1970 "lwz 4,8(11)\n\t" \ 1971 "lwz 5,12(11)\n\t" \ 1972 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 1973 "lwz 11,0(11)\n\t" /* target->r11 */ \ 1974 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 1975 VALGRIND_RESTORE_STACK \ 1976 "mr %0,3" \ 1977 : /*out*/ "=r" (_res) \ 1978 : /*in*/ "r" (&_argvec[0]) \ 1979 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 1980 ); \ 1981 lval = (__typeof__(lval)) _res; \ 1982 } while (0) 1983 1984#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ 1985 do { \ 1986 volatile OrigFn _orig = (orig); \ 1987 volatile unsigned long _argvec[6]; \ 1988 volatile unsigned long _res; \ 1989 _argvec[0] = (unsigned long)_orig.nraddr; \ 1990 _argvec[1] = (unsigned long)arg1; \ 1991 _argvec[2] = (unsigned long)arg2; \ 1992 _argvec[3] = (unsigned long)arg3; \ 1993 _argvec[4] = (unsigned long)arg4; \ 1994 _argvec[5] = (unsigned long)arg5; \ 1995 __asm__ volatile( \ 1996 VALGRIND_ALIGN_STACK \ 1997 "mr 11,%1\n\t" \ 1998 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 1999 "lwz 4,8(11)\n\t" \ 2000 "lwz 5,12(11)\n\t" \ 2001 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2002 "lwz 7,20(11)\n\t" \ 2003 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2004 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2005 VALGRIND_RESTORE_STACK \ 2006 "mr %0,3" \ 2007 : /*out*/ "=r" (_res) \ 2008 : /*in*/ "r" (&_argvec[0]) \ 2009 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2010 ); \ 2011 lval = (__typeof__(lval)) _res; \ 2012 } while (0) 2013 2014#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ 2015 do { \ 2016 volatile OrigFn _orig = (orig); \ 2017 volatile unsigned long _argvec[7]; \ 2018 volatile unsigned long _res; \ 2019 _argvec[0] = (unsigned long)_orig.nraddr; \ 2020 _argvec[1] = (unsigned long)arg1; \ 2021 _argvec[2] = (unsigned long)arg2; \ 2022 _argvec[3] = (unsigned long)arg3; \ 2023 _argvec[4] = (unsigned long)arg4; \ 2024 _argvec[5] = (unsigned long)arg5; \ 2025 _argvec[6] = (unsigned long)arg6; \ 2026 __asm__ volatile( \ 2027 VALGRIND_ALIGN_STACK \ 2028 "mr 11,%1\n\t" \ 2029 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2030 "lwz 4,8(11)\n\t" \ 2031 "lwz 5,12(11)\n\t" \ 2032 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2033 "lwz 7,20(11)\n\t" \ 2034 "lwz 8,24(11)\n\t" \ 2035 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2036 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2037 VALGRIND_RESTORE_STACK \ 2038 "mr %0,3" \ 2039 : /*out*/ "=r" (_res) \ 2040 : /*in*/ "r" (&_argvec[0]) \ 2041 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2042 ); \ 2043 lval = (__typeof__(lval)) _res; \ 2044 } while (0) 2045 2046#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2047 arg7) \ 2048 do { \ 2049 volatile OrigFn _orig = (orig); \ 2050 volatile unsigned long _argvec[8]; \ 2051 volatile unsigned long _res; \ 2052 _argvec[0] = (unsigned long)_orig.nraddr; \ 2053 _argvec[1] = (unsigned long)arg1; \ 2054 _argvec[2] = (unsigned long)arg2; \ 2055 _argvec[3] = (unsigned long)arg3; \ 2056 _argvec[4] = (unsigned long)arg4; \ 2057 _argvec[5] = (unsigned long)arg5; \ 2058 _argvec[6] = (unsigned long)arg6; \ 2059 _argvec[7] = (unsigned long)arg7; \ 2060 __asm__ volatile( \ 2061 VALGRIND_ALIGN_STACK \ 2062 "mr 11,%1\n\t" \ 2063 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2064 "lwz 4,8(11)\n\t" \ 2065 "lwz 5,12(11)\n\t" \ 2066 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2067 "lwz 7,20(11)\n\t" \ 2068 "lwz 8,24(11)\n\t" \ 2069 "lwz 9,28(11)\n\t" \ 2070 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2071 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2072 VALGRIND_RESTORE_STACK \ 2073 "mr %0,3" \ 2074 : /*out*/ "=r" (_res) \ 2075 : /*in*/ "r" (&_argvec[0]) \ 2076 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2077 ); \ 2078 lval = (__typeof__(lval)) _res; \ 2079 } while (0) 2080 2081#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2082 arg7,arg8) \ 2083 do { \ 2084 volatile OrigFn _orig = (orig); \ 2085 volatile unsigned long _argvec[9]; \ 2086 volatile unsigned long _res; \ 2087 _argvec[0] = (unsigned long)_orig.nraddr; \ 2088 _argvec[1] = (unsigned long)arg1; \ 2089 _argvec[2] = (unsigned long)arg2; \ 2090 _argvec[3] = (unsigned long)arg3; \ 2091 _argvec[4] = (unsigned long)arg4; \ 2092 _argvec[5] = (unsigned long)arg5; \ 2093 _argvec[6] = (unsigned long)arg6; \ 2094 _argvec[7] = (unsigned long)arg7; \ 2095 _argvec[8] = (unsigned long)arg8; \ 2096 __asm__ volatile( \ 2097 VALGRIND_ALIGN_STACK \ 2098 "mr 11,%1\n\t" \ 2099 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2100 "lwz 4,8(11)\n\t" \ 2101 "lwz 5,12(11)\n\t" \ 2102 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2103 "lwz 7,20(11)\n\t" \ 2104 "lwz 8,24(11)\n\t" \ 2105 "lwz 9,28(11)\n\t" \ 2106 "lwz 10,32(11)\n\t" /* arg8->r10 */ \ 2107 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2108 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2109 VALGRIND_RESTORE_STACK \ 2110 "mr %0,3" \ 2111 : /*out*/ "=r" (_res) \ 2112 : /*in*/ "r" (&_argvec[0]) \ 2113 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2114 ); \ 2115 lval = (__typeof__(lval)) _res; \ 2116 } while (0) 2117 2118#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2119 arg7,arg8,arg9) \ 2120 do { \ 2121 volatile OrigFn _orig = (orig); \ 2122 volatile unsigned long _argvec[10]; \ 2123 volatile unsigned long _res; \ 2124 _argvec[0] = (unsigned long)_orig.nraddr; \ 2125 _argvec[1] = (unsigned long)arg1; \ 2126 _argvec[2] = (unsigned long)arg2; \ 2127 _argvec[3] = (unsigned long)arg3; \ 2128 _argvec[4] = (unsigned long)arg4; \ 2129 _argvec[5] = (unsigned long)arg5; \ 2130 _argvec[6] = (unsigned long)arg6; \ 2131 _argvec[7] = (unsigned long)arg7; \ 2132 _argvec[8] = (unsigned long)arg8; \ 2133 _argvec[9] = (unsigned long)arg9; \ 2134 __asm__ volatile( \ 2135 VALGRIND_ALIGN_STACK \ 2136 "mr 11,%1\n\t" \ 2137 "addi 1,1,-16\n\t" \ 2138 /* arg9 */ \ 2139 "lwz 3,36(11)\n\t" \ 2140 "stw 3,8(1)\n\t" \ 2141 /* args1-8 */ \ 2142 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2143 "lwz 4,8(11)\n\t" \ 2144 "lwz 5,12(11)\n\t" \ 2145 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2146 "lwz 7,20(11)\n\t" \ 2147 "lwz 8,24(11)\n\t" \ 2148 "lwz 9,28(11)\n\t" \ 2149 "lwz 10,32(11)\n\t" /* arg8->r10 */ \ 2150 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2151 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2152 VALGRIND_RESTORE_STACK \ 2153 "mr %0,3" \ 2154 : /*out*/ "=r" (_res) \ 2155 : /*in*/ "r" (&_argvec[0]) \ 2156 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2157 ); \ 2158 lval = (__typeof__(lval)) _res; \ 2159 } while (0) 2160 2161#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2162 arg7,arg8,arg9,arg10) \ 2163 do { \ 2164 volatile OrigFn _orig = (orig); \ 2165 volatile unsigned long _argvec[11]; \ 2166 volatile unsigned long _res; \ 2167 _argvec[0] = (unsigned long)_orig.nraddr; \ 2168 _argvec[1] = (unsigned long)arg1; \ 2169 _argvec[2] = (unsigned long)arg2; \ 2170 _argvec[3] = (unsigned long)arg3; \ 2171 _argvec[4] = (unsigned long)arg4; \ 2172 _argvec[5] = (unsigned long)arg5; \ 2173 _argvec[6] = (unsigned long)arg6; \ 2174 _argvec[7] = (unsigned long)arg7; \ 2175 _argvec[8] = (unsigned long)arg8; \ 2176 _argvec[9] = (unsigned long)arg9; \ 2177 _argvec[10] = (unsigned long)arg10; \ 2178 __asm__ volatile( \ 2179 VALGRIND_ALIGN_STACK \ 2180 "mr 11,%1\n\t" \ 2181 "addi 1,1,-16\n\t" \ 2182 /* arg10 */ \ 2183 "lwz 3,40(11)\n\t" \ 2184 "stw 3,12(1)\n\t" \ 2185 /* arg9 */ \ 2186 "lwz 3,36(11)\n\t" \ 2187 "stw 3,8(1)\n\t" \ 2188 /* args1-8 */ \ 2189 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2190 "lwz 4,8(11)\n\t" \ 2191 "lwz 5,12(11)\n\t" \ 2192 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2193 "lwz 7,20(11)\n\t" \ 2194 "lwz 8,24(11)\n\t" \ 2195 "lwz 9,28(11)\n\t" \ 2196 "lwz 10,32(11)\n\t" /* arg8->r10 */ \ 2197 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2198 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2199 VALGRIND_RESTORE_STACK \ 2200 "mr %0,3" \ 2201 : /*out*/ "=r" (_res) \ 2202 : /*in*/ "r" (&_argvec[0]) \ 2203 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2204 ); \ 2205 lval = (__typeof__(lval)) _res; \ 2206 } while (0) 2207 2208#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2209 arg7,arg8,arg9,arg10,arg11) \ 2210 do { \ 2211 volatile OrigFn _orig = (orig); \ 2212 volatile unsigned long _argvec[12]; \ 2213 volatile unsigned long _res; \ 2214 _argvec[0] = (unsigned long)_orig.nraddr; \ 2215 _argvec[1] = (unsigned long)arg1; \ 2216 _argvec[2] = (unsigned long)arg2; \ 2217 _argvec[3] = (unsigned long)arg3; \ 2218 _argvec[4] = (unsigned long)arg4; \ 2219 _argvec[5] = (unsigned long)arg5; \ 2220 _argvec[6] = (unsigned long)arg6; \ 2221 _argvec[7] = (unsigned long)arg7; \ 2222 _argvec[8] = (unsigned long)arg8; \ 2223 _argvec[9] = (unsigned long)arg9; \ 2224 _argvec[10] = (unsigned long)arg10; \ 2225 _argvec[11] = (unsigned long)arg11; \ 2226 __asm__ volatile( \ 2227 VALGRIND_ALIGN_STACK \ 2228 "mr 11,%1\n\t" \ 2229 "addi 1,1,-32\n\t" \ 2230 /* arg11 */ \ 2231 "lwz 3,44(11)\n\t" \ 2232 "stw 3,16(1)\n\t" \ 2233 /* arg10 */ \ 2234 "lwz 3,40(11)\n\t" \ 2235 "stw 3,12(1)\n\t" \ 2236 /* arg9 */ \ 2237 "lwz 3,36(11)\n\t" \ 2238 "stw 3,8(1)\n\t" \ 2239 /* args1-8 */ \ 2240 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2241 "lwz 4,8(11)\n\t" \ 2242 "lwz 5,12(11)\n\t" \ 2243 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2244 "lwz 7,20(11)\n\t" \ 2245 "lwz 8,24(11)\n\t" \ 2246 "lwz 9,28(11)\n\t" \ 2247 "lwz 10,32(11)\n\t" /* arg8->r10 */ \ 2248 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2249 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2250 VALGRIND_RESTORE_STACK \ 2251 "mr %0,3" \ 2252 : /*out*/ "=r" (_res) \ 2253 : /*in*/ "r" (&_argvec[0]) \ 2254 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2255 ); \ 2256 lval = (__typeof__(lval)) _res; \ 2257 } while (0) 2258 2259#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2260 arg7,arg8,arg9,arg10,arg11,arg12) \ 2261 do { \ 2262 volatile OrigFn _orig = (orig); \ 2263 volatile unsigned long _argvec[13]; \ 2264 volatile unsigned long _res; \ 2265 _argvec[0] = (unsigned long)_orig.nraddr; \ 2266 _argvec[1] = (unsigned long)arg1; \ 2267 _argvec[2] = (unsigned long)arg2; \ 2268 _argvec[3] = (unsigned long)arg3; \ 2269 _argvec[4] = (unsigned long)arg4; \ 2270 _argvec[5] = (unsigned long)arg5; \ 2271 _argvec[6] = (unsigned long)arg6; \ 2272 _argvec[7] = (unsigned long)arg7; \ 2273 _argvec[8] = (unsigned long)arg8; \ 2274 _argvec[9] = (unsigned long)arg9; \ 2275 _argvec[10] = (unsigned long)arg10; \ 2276 _argvec[11] = (unsigned long)arg11; \ 2277 _argvec[12] = (unsigned long)arg12; \ 2278 __asm__ volatile( \ 2279 VALGRIND_ALIGN_STACK \ 2280 "mr 11,%1\n\t" \ 2281 "addi 1,1,-32\n\t" \ 2282 /* arg12 */ \ 2283 "lwz 3,48(11)\n\t" \ 2284 "stw 3,20(1)\n\t" \ 2285 /* arg11 */ \ 2286 "lwz 3,44(11)\n\t" \ 2287 "stw 3,16(1)\n\t" \ 2288 /* arg10 */ \ 2289 "lwz 3,40(11)\n\t" \ 2290 "stw 3,12(1)\n\t" \ 2291 /* arg9 */ \ 2292 "lwz 3,36(11)\n\t" \ 2293 "stw 3,8(1)\n\t" \ 2294 /* args1-8 */ \ 2295 "lwz 3,4(11)\n\t" /* arg1->r3 */ \ 2296 "lwz 4,8(11)\n\t" \ 2297 "lwz 5,12(11)\n\t" \ 2298 "lwz 6,16(11)\n\t" /* arg4->r6 */ \ 2299 "lwz 7,20(11)\n\t" \ 2300 "lwz 8,24(11)\n\t" \ 2301 "lwz 9,28(11)\n\t" \ 2302 "lwz 10,32(11)\n\t" /* arg8->r10 */ \ 2303 "lwz 11,0(11)\n\t" /* target->r11 */ \ 2304 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2305 VALGRIND_RESTORE_STACK \ 2306 "mr %0,3" \ 2307 : /*out*/ "=r" (_res) \ 2308 : /*in*/ "r" (&_argvec[0]) \ 2309 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2310 ); \ 2311 lval = (__typeof__(lval)) _res; \ 2312 } while (0) 2313 2314#endif /* PLAT_ppc32_linux */ 2315 2316/* ------------------------ ppc64-linux ------------------------ */ 2317 2318#if defined(PLAT_ppc64_linux) 2319 2320/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ 2321 2322/* These regs are trashed by the hidden call. */ 2323#define __CALLER_SAVED_REGS \ 2324 "lr", "ctr", "xer", \ 2325 "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ 2326 "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ 2327 "r11", "r12", "r13" 2328 2329/* Macros to save and align the stack before making a function 2330 call and restore it afterwards as gcc may not keep the stack 2331 pointer aligned if it doesn't realise calls are being made 2332 to other functions. */ 2333 2334#define VALGRIND_ALIGN_STACK \ 2335 "mr 28,1\n\t" \ 2336 "rldicr 1,1,0,59\n\t" 2337#define VALGRIND_RESTORE_STACK \ 2338 "mr 1,28\n\t" 2339 2340/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned 2341 long) == 8. */ 2342 2343#define CALL_FN_W_v(lval, orig) \ 2344 do { \ 2345 volatile OrigFn _orig = (orig); \ 2346 volatile unsigned long _argvec[3+0]; \ 2347 volatile unsigned long _res; \ 2348 /* _argvec[0] holds current r2 across the call */ \ 2349 _argvec[1] = (unsigned long)_orig.r2; \ 2350 _argvec[2] = (unsigned long)_orig.nraddr; \ 2351 __asm__ volatile( \ 2352 VALGRIND_ALIGN_STACK \ 2353 "mr 11,%1\n\t" \ 2354 "std 2,-16(11)\n\t" /* save tocptr */ \ 2355 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2356 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2357 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2358 "mr 11,%1\n\t" \ 2359 "mr %0,3\n\t" \ 2360 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2361 VALGRIND_RESTORE_STACK \ 2362 : /*out*/ "=r" (_res) \ 2363 : /*in*/ "r" (&_argvec[2]) \ 2364 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2365 ); \ 2366 lval = (__typeof__(lval)) _res; \ 2367 } while (0) 2368 2369#define CALL_FN_W_W(lval, orig, arg1) \ 2370 do { \ 2371 volatile OrigFn _orig = (orig); \ 2372 volatile unsigned long _argvec[3+1]; \ 2373 volatile unsigned long _res; \ 2374 /* _argvec[0] holds current r2 across the call */ \ 2375 _argvec[1] = (unsigned long)_orig.r2; \ 2376 _argvec[2] = (unsigned long)_orig.nraddr; \ 2377 _argvec[2+1] = (unsigned long)arg1; \ 2378 __asm__ volatile( \ 2379 VALGRIND_ALIGN_STACK \ 2380 "mr 11,%1\n\t" \ 2381 "std 2,-16(11)\n\t" /* save tocptr */ \ 2382 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2383 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2384 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2385 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2386 "mr 11,%1\n\t" \ 2387 "mr %0,3\n\t" \ 2388 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2389 VALGRIND_RESTORE_STACK \ 2390 : /*out*/ "=r" (_res) \ 2391 : /*in*/ "r" (&_argvec[2]) \ 2392 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2393 ); \ 2394 lval = (__typeof__(lval)) _res; \ 2395 } while (0) 2396 2397#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ 2398 do { \ 2399 volatile OrigFn _orig = (orig); \ 2400 volatile unsigned long _argvec[3+2]; \ 2401 volatile unsigned long _res; \ 2402 /* _argvec[0] holds current r2 across the call */ \ 2403 _argvec[1] = (unsigned long)_orig.r2; \ 2404 _argvec[2] = (unsigned long)_orig.nraddr; \ 2405 _argvec[2+1] = (unsigned long)arg1; \ 2406 _argvec[2+2] = (unsigned long)arg2; \ 2407 __asm__ volatile( \ 2408 VALGRIND_ALIGN_STACK \ 2409 "mr 11,%1\n\t" \ 2410 "std 2,-16(11)\n\t" /* save tocptr */ \ 2411 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2412 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2413 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2414 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2415 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2416 "mr 11,%1\n\t" \ 2417 "mr %0,3\n\t" \ 2418 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2419 VALGRIND_RESTORE_STACK \ 2420 : /*out*/ "=r" (_res) \ 2421 : /*in*/ "r" (&_argvec[2]) \ 2422 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2423 ); \ 2424 lval = (__typeof__(lval)) _res; \ 2425 } while (0) 2426 2427#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ 2428 do { \ 2429 volatile OrigFn _orig = (orig); \ 2430 volatile unsigned long _argvec[3+3]; \ 2431 volatile unsigned long _res; \ 2432 /* _argvec[0] holds current r2 across the call */ \ 2433 _argvec[1] = (unsigned long)_orig.r2; \ 2434 _argvec[2] = (unsigned long)_orig.nraddr; \ 2435 _argvec[2+1] = (unsigned long)arg1; \ 2436 _argvec[2+2] = (unsigned long)arg2; \ 2437 _argvec[2+3] = (unsigned long)arg3; \ 2438 __asm__ volatile( \ 2439 VALGRIND_ALIGN_STACK \ 2440 "mr 11,%1\n\t" \ 2441 "std 2,-16(11)\n\t" /* save tocptr */ \ 2442 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2443 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2444 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2445 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2446 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2447 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2448 "mr 11,%1\n\t" \ 2449 "mr %0,3\n\t" \ 2450 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2451 VALGRIND_RESTORE_STACK \ 2452 : /*out*/ "=r" (_res) \ 2453 : /*in*/ "r" (&_argvec[2]) \ 2454 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2455 ); \ 2456 lval = (__typeof__(lval)) _res; \ 2457 } while (0) 2458 2459#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ 2460 do { \ 2461 volatile OrigFn _orig = (orig); \ 2462 volatile unsigned long _argvec[3+4]; \ 2463 volatile unsigned long _res; \ 2464 /* _argvec[0] holds current r2 across the call */ \ 2465 _argvec[1] = (unsigned long)_orig.r2; \ 2466 _argvec[2] = (unsigned long)_orig.nraddr; \ 2467 _argvec[2+1] = (unsigned long)arg1; \ 2468 _argvec[2+2] = (unsigned long)arg2; \ 2469 _argvec[2+3] = (unsigned long)arg3; \ 2470 _argvec[2+4] = (unsigned long)arg4; \ 2471 __asm__ volatile( \ 2472 VALGRIND_ALIGN_STACK \ 2473 "mr 11,%1\n\t" \ 2474 "std 2,-16(11)\n\t" /* save tocptr */ \ 2475 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2476 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2477 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2478 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2479 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2480 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2481 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2482 "mr 11,%1\n\t" \ 2483 "mr %0,3\n\t" \ 2484 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2485 VALGRIND_RESTORE_STACK \ 2486 : /*out*/ "=r" (_res) \ 2487 : /*in*/ "r" (&_argvec[2]) \ 2488 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2489 ); \ 2490 lval = (__typeof__(lval)) _res; \ 2491 } while (0) 2492 2493#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ 2494 do { \ 2495 volatile OrigFn _orig = (orig); \ 2496 volatile unsigned long _argvec[3+5]; \ 2497 volatile unsigned long _res; \ 2498 /* _argvec[0] holds current r2 across the call */ \ 2499 _argvec[1] = (unsigned long)_orig.r2; \ 2500 _argvec[2] = (unsigned long)_orig.nraddr; \ 2501 _argvec[2+1] = (unsigned long)arg1; \ 2502 _argvec[2+2] = (unsigned long)arg2; \ 2503 _argvec[2+3] = (unsigned long)arg3; \ 2504 _argvec[2+4] = (unsigned long)arg4; \ 2505 _argvec[2+5] = (unsigned long)arg5; \ 2506 __asm__ volatile( \ 2507 VALGRIND_ALIGN_STACK \ 2508 "mr 11,%1\n\t" \ 2509 "std 2,-16(11)\n\t" /* save tocptr */ \ 2510 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2511 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2512 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2513 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2514 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2515 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2516 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2517 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2518 "mr 11,%1\n\t" \ 2519 "mr %0,3\n\t" \ 2520 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2521 VALGRIND_RESTORE_STACK \ 2522 : /*out*/ "=r" (_res) \ 2523 : /*in*/ "r" (&_argvec[2]) \ 2524 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2525 ); \ 2526 lval = (__typeof__(lval)) _res; \ 2527 } while (0) 2528 2529#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ 2530 do { \ 2531 volatile OrigFn _orig = (orig); \ 2532 volatile unsigned long _argvec[3+6]; \ 2533 volatile unsigned long _res; \ 2534 /* _argvec[0] holds current r2 across the call */ \ 2535 _argvec[1] = (unsigned long)_orig.r2; \ 2536 _argvec[2] = (unsigned long)_orig.nraddr; \ 2537 _argvec[2+1] = (unsigned long)arg1; \ 2538 _argvec[2+2] = (unsigned long)arg2; \ 2539 _argvec[2+3] = (unsigned long)arg3; \ 2540 _argvec[2+4] = (unsigned long)arg4; \ 2541 _argvec[2+5] = (unsigned long)arg5; \ 2542 _argvec[2+6] = (unsigned long)arg6; \ 2543 __asm__ volatile( \ 2544 VALGRIND_ALIGN_STACK \ 2545 "mr 11,%1\n\t" \ 2546 "std 2,-16(11)\n\t" /* save tocptr */ \ 2547 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2548 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2549 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2550 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2551 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2552 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2553 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2554 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2555 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2556 "mr 11,%1\n\t" \ 2557 "mr %0,3\n\t" \ 2558 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2559 VALGRIND_RESTORE_STACK \ 2560 : /*out*/ "=r" (_res) \ 2561 : /*in*/ "r" (&_argvec[2]) \ 2562 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2563 ); \ 2564 lval = (__typeof__(lval)) _res; \ 2565 } while (0) 2566 2567#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2568 arg7) \ 2569 do { \ 2570 volatile OrigFn _orig = (orig); \ 2571 volatile unsigned long _argvec[3+7]; \ 2572 volatile unsigned long _res; \ 2573 /* _argvec[0] holds current r2 across the call */ \ 2574 _argvec[1] = (unsigned long)_orig.r2; \ 2575 _argvec[2] = (unsigned long)_orig.nraddr; \ 2576 _argvec[2+1] = (unsigned long)arg1; \ 2577 _argvec[2+2] = (unsigned long)arg2; \ 2578 _argvec[2+3] = (unsigned long)arg3; \ 2579 _argvec[2+4] = (unsigned long)arg4; \ 2580 _argvec[2+5] = (unsigned long)arg5; \ 2581 _argvec[2+6] = (unsigned long)arg6; \ 2582 _argvec[2+7] = (unsigned long)arg7; \ 2583 __asm__ volatile( \ 2584 VALGRIND_ALIGN_STACK \ 2585 "mr 11,%1\n\t" \ 2586 "std 2,-16(11)\n\t" /* save tocptr */ \ 2587 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2588 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2589 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2590 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2591 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2592 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2593 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2594 "ld 9, 56(11)\n\t" /* arg7->r9 */ \ 2595 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2596 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2597 "mr 11,%1\n\t" \ 2598 "mr %0,3\n\t" \ 2599 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2600 VALGRIND_RESTORE_STACK \ 2601 : /*out*/ "=r" (_res) \ 2602 : /*in*/ "r" (&_argvec[2]) \ 2603 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2604 ); \ 2605 lval = (__typeof__(lval)) _res; \ 2606 } while (0) 2607 2608#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2609 arg7,arg8) \ 2610 do { \ 2611 volatile OrigFn _orig = (orig); \ 2612 volatile unsigned long _argvec[3+8]; \ 2613 volatile unsigned long _res; \ 2614 /* _argvec[0] holds current r2 across the call */ \ 2615 _argvec[1] = (unsigned long)_orig.r2; \ 2616 _argvec[2] = (unsigned long)_orig.nraddr; \ 2617 _argvec[2+1] = (unsigned long)arg1; \ 2618 _argvec[2+2] = (unsigned long)arg2; \ 2619 _argvec[2+3] = (unsigned long)arg3; \ 2620 _argvec[2+4] = (unsigned long)arg4; \ 2621 _argvec[2+5] = (unsigned long)arg5; \ 2622 _argvec[2+6] = (unsigned long)arg6; \ 2623 _argvec[2+7] = (unsigned long)arg7; \ 2624 _argvec[2+8] = (unsigned long)arg8; \ 2625 __asm__ volatile( \ 2626 VALGRIND_ALIGN_STACK \ 2627 "mr 11,%1\n\t" \ 2628 "std 2,-16(11)\n\t" /* save tocptr */ \ 2629 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2630 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2631 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2632 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2633 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2634 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2635 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2636 "ld 9, 56(11)\n\t" /* arg7->r9 */ \ 2637 "ld 10, 64(11)\n\t" /* arg8->r10 */ \ 2638 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2639 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2640 "mr 11,%1\n\t" \ 2641 "mr %0,3\n\t" \ 2642 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2643 VALGRIND_RESTORE_STACK \ 2644 : /*out*/ "=r" (_res) \ 2645 : /*in*/ "r" (&_argvec[2]) \ 2646 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2647 ); \ 2648 lval = (__typeof__(lval)) _res; \ 2649 } while (0) 2650 2651#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2652 arg7,arg8,arg9) \ 2653 do { \ 2654 volatile OrigFn _orig = (orig); \ 2655 volatile unsigned long _argvec[3+9]; \ 2656 volatile unsigned long _res; \ 2657 /* _argvec[0] holds current r2 across the call */ \ 2658 _argvec[1] = (unsigned long)_orig.r2; \ 2659 _argvec[2] = (unsigned long)_orig.nraddr; \ 2660 _argvec[2+1] = (unsigned long)arg1; \ 2661 _argvec[2+2] = (unsigned long)arg2; \ 2662 _argvec[2+3] = (unsigned long)arg3; \ 2663 _argvec[2+4] = (unsigned long)arg4; \ 2664 _argvec[2+5] = (unsigned long)arg5; \ 2665 _argvec[2+6] = (unsigned long)arg6; \ 2666 _argvec[2+7] = (unsigned long)arg7; \ 2667 _argvec[2+8] = (unsigned long)arg8; \ 2668 _argvec[2+9] = (unsigned long)arg9; \ 2669 __asm__ volatile( \ 2670 VALGRIND_ALIGN_STACK \ 2671 "mr 11,%1\n\t" \ 2672 "std 2,-16(11)\n\t" /* save tocptr */ \ 2673 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2674 "addi 1,1,-128\n\t" /* expand stack frame */ \ 2675 /* arg9 */ \ 2676 "ld 3,72(11)\n\t" \ 2677 "std 3,112(1)\n\t" \ 2678 /* args1-8 */ \ 2679 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2680 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2681 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2682 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2683 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2684 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2685 "ld 9, 56(11)\n\t" /* arg7->r9 */ \ 2686 "ld 10, 64(11)\n\t" /* arg8->r10 */ \ 2687 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2688 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2689 "mr 11,%1\n\t" \ 2690 "mr %0,3\n\t" \ 2691 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2692 VALGRIND_RESTORE_STACK \ 2693 : /*out*/ "=r" (_res) \ 2694 : /*in*/ "r" (&_argvec[2]) \ 2695 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2696 ); \ 2697 lval = (__typeof__(lval)) _res; \ 2698 } while (0) 2699 2700#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2701 arg7,arg8,arg9,arg10) \ 2702 do { \ 2703 volatile OrigFn _orig = (orig); \ 2704 volatile unsigned long _argvec[3+10]; \ 2705 volatile unsigned long _res; \ 2706 /* _argvec[0] holds current r2 across the call */ \ 2707 _argvec[1] = (unsigned long)_orig.r2; \ 2708 _argvec[2] = (unsigned long)_orig.nraddr; \ 2709 _argvec[2+1] = (unsigned long)arg1; \ 2710 _argvec[2+2] = (unsigned long)arg2; \ 2711 _argvec[2+3] = (unsigned long)arg3; \ 2712 _argvec[2+4] = (unsigned long)arg4; \ 2713 _argvec[2+5] = (unsigned long)arg5; \ 2714 _argvec[2+6] = (unsigned long)arg6; \ 2715 _argvec[2+7] = (unsigned long)arg7; \ 2716 _argvec[2+8] = (unsigned long)arg8; \ 2717 _argvec[2+9] = (unsigned long)arg9; \ 2718 _argvec[2+10] = (unsigned long)arg10; \ 2719 __asm__ volatile( \ 2720 VALGRIND_ALIGN_STACK \ 2721 "mr 11,%1\n\t" \ 2722 "std 2,-16(11)\n\t" /* save tocptr */ \ 2723 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2724 "addi 1,1,-128\n\t" /* expand stack frame */ \ 2725 /* arg10 */ \ 2726 "ld 3,80(11)\n\t" \ 2727 "std 3,120(1)\n\t" \ 2728 /* arg9 */ \ 2729 "ld 3,72(11)\n\t" \ 2730 "std 3,112(1)\n\t" \ 2731 /* args1-8 */ \ 2732 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2733 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2734 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2735 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2736 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2737 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2738 "ld 9, 56(11)\n\t" /* arg7->r9 */ \ 2739 "ld 10, 64(11)\n\t" /* arg8->r10 */ \ 2740 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2741 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2742 "mr 11,%1\n\t" \ 2743 "mr %0,3\n\t" \ 2744 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2745 VALGRIND_RESTORE_STACK \ 2746 : /*out*/ "=r" (_res) \ 2747 : /*in*/ "r" (&_argvec[2]) \ 2748 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2749 ); \ 2750 lval = (__typeof__(lval)) _res; \ 2751 } while (0) 2752 2753#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2754 arg7,arg8,arg9,arg10,arg11) \ 2755 do { \ 2756 volatile OrigFn _orig = (orig); \ 2757 volatile unsigned long _argvec[3+11]; \ 2758 volatile unsigned long _res; \ 2759 /* _argvec[0] holds current r2 across the call */ \ 2760 _argvec[1] = (unsigned long)_orig.r2; \ 2761 _argvec[2] = (unsigned long)_orig.nraddr; \ 2762 _argvec[2+1] = (unsigned long)arg1; \ 2763 _argvec[2+2] = (unsigned long)arg2; \ 2764 _argvec[2+3] = (unsigned long)arg3; \ 2765 _argvec[2+4] = (unsigned long)arg4; \ 2766 _argvec[2+5] = (unsigned long)arg5; \ 2767 _argvec[2+6] = (unsigned long)arg6; \ 2768 _argvec[2+7] = (unsigned long)arg7; \ 2769 _argvec[2+8] = (unsigned long)arg8; \ 2770 _argvec[2+9] = (unsigned long)arg9; \ 2771 _argvec[2+10] = (unsigned long)arg10; \ 2772 _argvec[2+11] = (unsigned long)arg11; \ 2773 __asm__ volatile( \ 2774 VALGRIND_ALIGN_STACK \ 2775 "mr 11,%1\n\t" \ 2776 "std 2,-16(11)\n\t" /* save tocptr */ \ 2777 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2778 "addi 1,1,-144\n\t" /* expand stack frame */ \ 2779 /* arg11 */ \ 2780 "ld 3,88(11)\n\t" \ 2781 "std 3,128(1)\n\t" \ 2782 /* arg10 */ \ 2783 "ld 3,80(11)\n\t" \ 2784 "std 3,120(1)\n\t" \ 2785 /* arg9 */ \ 2786 "ld 3,72(11)\n\t" \ 2787 "std 3,112(1)\n\t" \ 2788 /* args1-8 */ \ 2789 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2790 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2791 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2792 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2793 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2794 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2795 "ld 9, 56(11)\n\t" /* arg7->r9 */ \ 2796 "ld 10, 64(11)\n\t" /* arg8->r10 */ \ 2797 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2798 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2799 "mr 11,%1\n\t" \ 2800 "mr %0,3\n\t" \ 2801 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2802 VALGRIND_RESTORE_STACK \ 2803 : /*out*/ "=r" (_res) \ 2804 : /*in*/ "r" (&_argvec[2]) \ 2805 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2806 ); \ 2807 lval = (__typeof__(lval)) _res; \ 2808 } while (0) 2809 2810#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 2811 arg7,arg8,arg9,arg10,arg11,arg12) \ 2812 do { \ 2813 volatile OrigFn _orig = (orig); \ 2814 volatile unsigned long _argvec[3+12]; \ 2815 volatile unsigned long _res; \ 2816 /* _argvec[0] holds current r2 across the call */ \ 2817 _argvec[1] = (unsigned long)_orig.r2; \ 2818 _argvec[2] = (unsigned long)_orig.nraddr; \ 2819 _argvec[2+1] = (unsigned long)arg1; \ 2820 _argvec[2+2] = (unsigned long)arg2; \ 2821 _argvec[2+3] = (unsigned long)arg3; \ 2822 _argvec[2+4] = (unsigned long)arg4; \ 2823 _argvec[2+5] = (unsigned long)arg5; \ 2824 _argvec[2+6] = (unsigned long)arg6; \ 2825 _argvec[2+7] = (unsigned long)arg7; \ 2826 _argvec[2+8] = (unsigned long)arg8; \ 2827 _argvec[2+9] = (unsigned long)arg9; \ 2828 _argvec[2+10] = (unsigned long)arg10; \ 2829 _argvec[2+11] = (unsigned long)arg11; \ 2830 _argvec[2+12] = (unsigned long)arg12; \ 2831 __asm__ volatile( \ 2832 VALGRIND_ALIGN_STACK \ 2833 "mr 11,%1\n\t" \ 2834 "std 2,-16(11)\n\t" /* save tocptr */ \ 2835 "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ 2836 "addi 1,1,-144\n\t" /* expand stack frame */ \ 2837 /* arg12 */ \ 2838 "ld 3,96(11)\n\t" \ 2839 "std 3,136(1)\n\t" \ 2840 /* arg11 */ \ 2841 "ld 3,88(11)\n\t" \ 2842 "std 3,128(1)\n\t" \ 2843 /* arg10 */ \ 2844 "ld 3,80(11)\n\t" \ 2845 "std 3,120(1)\n\t" \ 2846 /* arg9 */ \ 2847 "ld 3,72(11)\n\t" \ 2848 "std 3,112(1)\n\t" \ 2849 /* args1-8 */ \ 2850 "ld 3, 8(11)\n\t" /* arg1->r3 */ \ 2851 "ld 4, 16(11)\n\t" /* arg2->r4 */ \ 2852 "ld 5, 24(11)\n\t" /* arg3->r5 */ \ 2853 "ld 6, 32(11)\n\t" /* arg4->r6 */ \ 2854 "ld 7, 40(11)\n\t" /* arg5->r7 */ \ 2855 "ld 8, 48(11)\n\t" /* arg6->r8 */ \ 2856 "ld 9, 56(11)\n\t" /* arg7->r9 */ \ 2857 "ld 10, 64(11)\n\t" /* arg8->r10 */ \ 2858 "ld 11, 0(11)\n\t" /* target->r11 */ \ 2859 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ 2860 "mr 11,%1\n\t" \ 2861 "mr %0,3\n\t" \ 2862 "ld 2,-16(11)\n\t" /* restore tocptr */ \ 2863 VALGRIND_RESTORE_STACK \ 2864 : /*out*/ "=r" (_res) \ 2865 : /*in*/ "r" (&_argvec[2]) \ 2866 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ 2867 ); \ 2868 lval = (__typeof__(lval)) _res; \ 2869 } while (0) 2870 2871#endif /* PLAT_ppc64_linux */ 2872 2873/* ------------------------- arm-linux ------------------------- */ 2874 2875#if defined(PLAT_arm_linux) 2876 2877/* These regs are trashed by the hidden call. */ 2878#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14" 2879 2880/* Macros to save and align the stack before making a function 2881 call and restore it afterwards as gcc may not keep the stack 2882 pointer aligned if it doesn't realise calls are being made 2883 to other functions. */ 2884 2885#define VALGRIND_ALIGN_STACK \ 2886 "mov r11, sp\n\t" \ 2887 "bic sp, sp, #7\n\t" 2888#define VALGRIND_RESTORE_STACK \ 2889 "mov sp, r11\n\t" 2890 2891/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned 2892 long) == 4. */ 2893 2894#define CALL_FN_W_v(lval, orig) \ 2895 do { \ 2896 volatile OrigFn _orig = (orig); \ 2897 volatile unsigned long _argvec[1]; \ 2898 volatile unsigned long _res; \ 2899 _argvec[0] = (unsigned long)_orig.nraddr; \ 2900 __asm__ volatile( \ 2901 VALGRIND_ALIGN_STACK \ 2902 "ldr r4, [%1] \n\t" /* target->r4 */ \ 2903 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 2904 VALGRIND_RESTORE_STACK \ 2905 "mov %0, r0\n" \ 2906 : /*out*/ "=r" (_res) \ 2907 : /*in*/ "0" (&_argvec[0]) \ 2908 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 2909 ); \ 2910 lval = (__typeof__(lval)) _res; \ 2911 } while (0) 2912 2913#define CALL_FN_W_W(lval, orig, arg1) \ 2914 do { \ 2915 volatile OrigFn _orig = (orig); \ 2916 volatile unsigned long _argvec[2]; \ 2917 volatile unsigned long _res; \ 2918 _argvec[0] = (unsigned long)_orig.nraddr; \ 2919 _argvec[1] = (unsigned long)(arg1); \ 2920 __asm__ volatile( \ 2921 VALGRIND_ALIGN_STACK \ 2922 "ldr r0, [%1, #4] \n\t" \ 2923 "ldr r4, [%1] \n\t" /* target->r4 */ \ 2924 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 2925 VALGRIND_RESTORE_STACK \ 2926 "mov %0, r0\n" \ 2927 : /*out*/ "=r" (_res) \ 2928 : /*in*/ "0" (&_argvec[0]) \ 2929 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 2930 ); \ 2931 lval = (__typeof__(lval)) _res; \ 2932 } while (0) 2933 2934#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ 2935 do { \ 2936 volatile OrigFn _orig = (orig); \ 2937 volatile unsigned long _argvec[3]; \ 2938 volatile unsigned long _res; \ 2939 _argvec[0] = (unsigned long)_orig.nraddr; \ 2940 _argvec[1] = (unsigned long)(arg1); \ 2941 _argvec[2] = (unsigned long)(arg2); \ 2942 __asm__ volatile( \ 2943 VALGRIND_ALIGN_STACK \ 2944 "ldr r0, [%1, #4] \n\t" \ 2945 "ldr r1, [%1, #8] \n\t" \ 2946 "ldr r4, [%1] \n\t" /* target->r4 */ \ 2947 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 2948 VALGRIND_RESTORE_STACK \ 2949 "mov %0, r0\n" \ 2950 : /*out*/ "=r" (_res) \ 2951 : /*in*/ "0" (&_argvec[0]) \ 2952 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 2953 ); \ 2954 lval = (__typeof__(lval)) _res; \ 2955 } while (0) 2956 2957#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ 2958 do { \ 2959 volatile OrigFn _orig = (orig); \ 2960 volatile unsigned long _argvec[4]; \ 2961 volatile unsigned long _res; \ 2962 _argvec[0] = (unsigned long)_orig.nraddr; \ 2963 _argvec[1] = (unsigned long)(arg1); \ 2964 _argvec[2] = (unsigned long)(arg2); \ 2965 _argvec[3] = (unsigned long)(arg3); \ 2966 __asm__ volatile( \ 2967 VALGRIND_ALIGN_STACK \ 2968 "ldr r0, [%1, #4] \n\t" \ 2969 "ldr r1, [%1, #8] \n\t" \ 2970 "ldr r2, [%1, #12] \n\t" \ 2971 "ldr r4, [%1] \n\t" /* target->r4 */ \ 2972 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 2973 VALGRIND_RESTORE_STACK \ 2974 "mov %0, r0\n" \ 2975 : /*out*/ "=r" (_res) \ 2976 : /*in*/ "0" (&_argvec[0]) \ 2977 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 2978 ); \ 2979 lval = (__typeof__(lval)) _res; \ 2980 } while (0) 2981 2982#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ 2983 do { \ 2984 volatile OrigFn _orig = (orig); \ 2985 volatile unsigned long _argvec[5]; \ 2986 volatile unsigned long _res; \ 2987 _argvec[0] = (unsigned long)_orig.nraddr; \ 2988 _argvec[1] = (unsigned long)(arg1); \ 2989 _argvec[2] = (unsigned long)(arg2); \ 2990 _argvec[3] = (unsigned long)(arg3); \ 2991 _argvec[4] = (unsigned long)(arg4); \ 2992 __asm__ volatile( \ 2993 VALGRIND_ALIGN_STACK \ 2994 "ldr r0, [%1, #4] \n\t" \ 2995 "ldr r1, [%1, #8] \n\t" \ 2996 "ldr r2, [%1, #12] \n\t" \ 2997 "ldr r3, [%1, #16] \n\t" \ 2998 "ldr r4, [%1] \n\t" /* target->r4 */ \ 2999 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3000 VALGRIND_RESTORE_STACK \ 3001 "mov %0, r0" \ 3002 : /*out*/ "=r" (_res) \ 3003 : /*in*/ "0" (&_argvec[0]) \ 3004 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3005 ); \ 3006 lval = (__typeof__(lval)) _res; \ 3007 } while (0) 3008 3009#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ 3010 do { \ 3011 volatile OrigFn _orig = (orig); \ 3012 volatile unsigned long _argvec[6]; \ 3013 volatile unsigned long _res; \ 3014 _argvec[0] = (unsigned long)_orig.nraddr; \ 3015 _argvec[1] = (unsigned long)(arg1); \ 3016 _argvec[2] = (unsigned long)(arg2); \ 3017 _argvec[3] = (unsigned long)(arg3); \ 3018 _argvec[4] = (unsigned long)(arg4); \ 3019 _argvec[5] = (unsigned long)(arg5); \ 3020 __asm__ volatile( \ 3021 VALGRIND_ALIGN_STACK \ 3022 "sub sp, sp, #4 \n\t" \ 3023 "ldr r0, [%1, #20] \n\t" \ 3024 "push {r0} \n\t" \ 3025 "ldr r0, [%1, #4] \n\t" \ 3026 "ldr r1, [%1, #8] \n\t" \ 3027 "ldr r2, [%1, #12] \n\t" \ 3028 "ldr r3, [%1, #16] \n\t" \ 3029 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3030 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3031 VALGRIND_RESTORE_STACK \ 3032 "mov %0, r0" \ 3033 : /*out*/ "=r" (_res) \ 3034 : /*in*/ "0" (&_argvec[0]) \ 3035 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3036 ); \ 3037 lval = (__typeof__(lval)) _res; \ 3038 } while (0) 3039 3040#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ 3041 do { \ 3042 volatile OrigFn _orig = (orig); \ 3043 volatile unsigned long _argvec[7]; \ 3044 volatile unsigned long _res; \ 3045 _argvec[0] = (unsigned long)_orig.nraddr; \ 3046 _argvec[1] = (unsigned long)(arg1); \ 3047 _argvec[2] = (unsigned long)(arg2); \ 3048 _argvec[3] = (unsigned long)(arg3); \ 3049 _argvec[4] = (unsigned long)(arg4); \ 3050 _argvec[5] = (unsigned long)(arg5); \ 3051 _argvec[6] = (unsigned long)(arg6); \ 3052 __asm__ volatile( \ 3053 VALGRIND_ALIGN_STACK \ 3054 "ldr r0, [%1, #20] \n\t" \ 3055 "ldr r1, [%1, #24] \n\t" \ 3056 "push {r0, r1} \n\t" \ 3057 "ldr r0, [%1, #4] \n\t" \ 3058 "ldr r1, [%1, #8] \n\t" \ 3059 "ldr r2, [%1, #12] \n\t" \ 3060 "ldr r3, [%1, #16] \n\t" \ 3061 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3062 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3063 VALGRIND_RESTORE_STACK \ 3064 "mov %0, r0" \ 3065 : /*out*/ "=r" (_res) \ 3066 : /*in*/ "0" (&_argvec[0]) \ 3067 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3068 ); \ 3069 lval = (__typeof__(lval)) _res; \ 3070 } while (0) 3071 3072#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 3073 arg7) \ 3074 do { \ 3075 volatile OrigFn _orig = (orig); \ 3076 volatile unsigned long _argvec[8]; \ 3077 volatile unsigned long _res; \ 3078 _argvec[0] = (unsigned long)_orig.nraddr; \ 3079 _argvec[1] = (unsigned long)(arg1); \ 3080 _argvec[2] = (unsigned long)(arg2); \ 3081 _argvec[3] = (unsigned long)(arg3); \ 3082 _argvec[4] = (unsigned long)(arg4); \ 3083 _argvec[5] = (unsigned long)(arg5); \ 3084 _argvec[6] = (unsigned long)(arg6); \ 3085 _argvec[7] = (unsigned long)(arg7); \ 3086 __asm__ volatile( \ 3087 VALGRIND_ALIGN_STACK \ 3088 "sub sp, sp, #4 \n\t" \ 3089 "ldr r0, [%1, #20] \n\t" \ 3090 "ldr r1, [%1, #24] \n\t" \ 3091 "ldr r2, [%1, #28] \n\t" \ 3092 "push {r0, r1, r2} \n\t" \ 3093 "ldr r0, [%1, #4] \n\t" \ 3094 "ldr r1, [%1, #8] \n\t" \ 3095 "ldr r2, [%1, #12] \n\t" \ 3096 "ldr r3, [%1, #16] \n\t" \ 3097 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3098 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3099 VALGRIND_RESTORE_STACK \ 3100 "mov %0, r0" \ 3101 : /*out*/ "=r" (_res) \ 3102 : /*in*/ "0" (&_argvec[0]) \ 3103 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3104 ); \ 3105 lval = (__typeof__(lval)) _res; \ 3106 } while (0) 3107 3108#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 3109 arg7,arg8) \ 3110 do { \ 3111 volatile OrigFn _orig = (orig); \ 3112 volatile unsigned long _argvec[9]; \ 3113 volatile unsigned long _res; \ 3114 _argvec[0] = (unsigned long)_orig.nraddr; \ 3115 _argvec[1] = (unsigned long)(arg1); \ 3116 _argvec[2] = (unsigned long)(arg2); \ 3117 _argvec[3] = (unsigned long)(arg3); \ 3118 _argvec[4] = (unsigned long)(arg4); \ 3119 _argvec[5] = (unsigned long)(arg5); \ 3120 _argvec[6] = (unsigned long)(arg6); \ 3121 _argvec[7] = (unsigned long)(arg7); \ 3122 _argvec[8] = (unsigned long)(arg8); \ 3123 __asm__ volatile( \ 3124 VALGRIND_ALIGN_STACK \ 3125 "ldr r0, [%1, #20] \n\t" \ 3126 "ldr r1, [%1, #24] \n\t" \ 3127 "ldr r2, [%1, #28] \n\t" \ 3128 "ldr r3, [%1, #32] \n\t" \ 3129 "push {r0, r1, r2, r3} \n\t" \ 3130 "ldr r0, [%1, #4] \n\t" \ 3131 "ldr r1, [%1, #8] \n\t" \ 3132 "ldr r2, [%1, #12] \n\t" \ 3133 "ldr r3, [%1, #16] \n\t" \ 3134 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3135 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3136 VALGRIND_RESTORE_STACK \ 3137 "mov %0, r0" \ 3138 : /*out*/ "=r" (_res) \ 3139 : /*in*/ "0" (&_argvec[0]) \ 3140 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3141 ); \ 3142 lval = (__typeof__(lval)) _res; \ 3143 } while (0) 3144 3145#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 3146 arg7,arg8,arg9) \ 3147 do { \ 3148 volatile OrigFn _orig = (orig); \ 3149 volatile unsigned long _argvec[10]; \ 3150 volatile unsigned long _res; \ 3151 _argvec[0] = (unsigned long)_orig.nraddr; \ 3152 _argvec[1] = (unsigned long)(arg1); \ 3153 _argvec[2] = (unsigned long)(arg2); \ 3154 _argvec[3] = (unsigned long)(arg3); \ 3155 _argvec[4] = (unsigned long)(arg4); \ 3156 _argvec[5] = (unsigned long)(arg5); \ 3157 _argvec[6] = (unsigned long)(arg6); \ 3158 _argvec[7] = (unsigned long)(arg7); \ 3159 _argvec[8] = (unsigned long)(arg8); \ 3160 _argvec[9] = (unsigned long)(arg9); \ 3161 __asm__ volatile( \ 3162 VALGRIND_ALIGN_STACK \ 3163 "sub sp, sp, #4 \n\t" \ 3164 "ldr r0, [%1, #20] \n\t" \ 3165 "ldr r1, [%1, #24] \n\t" \ 3166 "ldr r2, [%1, #28] \n\t" \ 3167 "ldr r3, [%1, #32] \n\t" \ 3168 "ldr r4, [%1, #36] \n\t" \ 3169 "push {r0, r1, r2, r3, r4} \n\t" \ 3170 "ldr r0, [%1, #4] \n\t" \ 3171 "ldr r1, [%1, #8] \n\t" \ 3172 "ldr r2, [%1, #12] \n\t" \ 3173 "ldr r3, [%1, #16] \n\t" \ 3174 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3175 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3176 VALGRIND_RESTORE_STACK \ 3177 "mov %0, r0" \ 3178 : /*out*/ "=r" (_res) \ 3179 : /*in*/ "0" (&_argvec[0]) \ 3180 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3181 ); \ 3182 lval = (__typeof__(lval)) _res; \ 3183 } while (0) 3184 3185#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 3186 arg7,arg8,arg9,arg10) \ 3187 do { \ 3188 volatile OrigFn _orig = (orig); \ 3189 volatile unsigned long _argvec[11]; \ 3190 volatile unsigned long _res; \ 3191 _argvec[0] = (unsigned long)_orig.nraddr; \ 3192 _argvec[1] = (unsigned long)(arg1); \ 3193 _argvec[2] = (unsigned long)(arg2); \ 3194 _argvec[3] = (unsigned long)(arg3); \ 3195 _argvec[4] = (unsigned long)(arg4); \ 3196 _argvec[5] = (unsigned long)(arg5); \ 3197 _argvec[6] = (unsigned long)(arg6); \ 3198 _argvec[7] = (unsigned long)(arg7); \ 3199 _argvec[8] = (unsigned long)(arg8); \ 3200 _argvec[9] = (unsigned long)(arg9); \ 3201 _argvec[10] = (unsigned long)(arg10); \ 3202 __asm__ volatile( \ 3203 VALGRIND_ALIGN_STACK \ 3204 "ldr r0, [%1, #40] \n\t" \ 3205 "push {r0} \n\t" \ 3206 "ldr r0, [%1, #20] \n\t" \ 3207 "ldr r1, [%1, #24] \n\t" \ 3208 "ldr r2, [%1, #28] \n\t" \ 3209 "ldr r3, [%1, #32] \n\t" \ 3210 "ldr r4, [%1, #36] \n\t" \ 3211 "push {r0, r1, r2, r3, r4} \n\t" \ 3212 "ldr r0, [%1, #4] \n\t" \ 3213 "ldr r1, [%1, #8] \n\t" \ 3214 "ldr r2, [%1, #12] \n\t" \ 3215 "ldr r3, [%1, #16] \n\t" \ 3216 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3217 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3218 VALGRIND_RESTORE_STACK \ 3219 "mov %0, r0" \ 3220 : /*out*/ "=r" (_res) \ 3221 : /*in*/ "0" (&_argvec[0]) \ 3222 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3223 ); \ 3224 lval = (__typeof__(lval)) _res; \ 3225 } while (0) 3226 3227#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ 3228 arg6,arg7,arg8,arg9,arg10, \ 3229 arg11) \ 3230 do { \ 3231 volatile OrigFn _orig = (orig); \ 3232 volatile unsigned long _argvec[12]; \ 3233 volatile unsigned long _res; \ 3234 _argvec[0] = (unsigned long)_orig.nraddr; \ 3235 _argvec[1] = (unsigned long)(arg1); \ 3236 _argvec[2] = (unsigned long)(arg2); \ 3237 _argvec[3] = (unsigned long)(arg3); \ 3238 _argvec[4] = (unsigned long)(arg4); \ 3239 _argvec[5] = (unsigned long)(arg5); \ 3240 _argvec[6] = (unsigned long)(arg6); \ 3241 _argvec[7] = (unsigned long)(arg7); \ 3242 _argvec[8] = (unsigned long)(arg8); \ 3243 _argvec[9] = (unsigned long)(arg9); \ 3244 _argvec[10] = (unsigned long)(arg10); \ 3245 _argvec[11] = (unsigned long)(arg11); \ 3246 __asm__ volatile( \ 3247 VALGRIND_ALIGN_STACK \ 3248 "sub sp, sp, #4 \n\t" \ 3249 "ldr r0, [%1, #40] \n\t" \ 3250 "ldr r1, [%1, #44] \n\t" \ 3251 "push {r0, r1} \n\t" \ 3252 "ldr r0, [%1, #20] \n\t" \ 3253 "ldr r1, [%1, #24] \n\t" \ 3254 "ldr r2, [%1, #28] \n\t" \ 3255 "ldr r3, [%1, #32] \n\t" \ 3256 "ldr r4, [%1, #36] \n\t" \ 3257 "push {r0, r1, r2, r3, r4} \n\t" \ 3258 "ldr r0, [%1, #4] \n\t" \ 3259 "ldr r1, [%1, #8] \n\t" \ 3260 "ldr r2, [%1, #12] \n\t" \ 3261 "ldr r3, [%1, #16] \n\t" \ 3262 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3263 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3264 VALGRIND_RESTORE_STACK \ 3265 "mov %0, r0" \ 3266 : /*out*/ "=r" (_res) \ 3267 : /*in*/ "0" (&_argvec[0]) \ 3268 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3269 ); \ 3270 lval = (__typeof__(lval)) _res; \ 3271 } while (0) 3272 3273#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ 3274 arg6,arg7,arg8,arg9,arg10, \ 3275 arg11,arg12) \ 3276 do { \ 3277 volatile OrigFn _orig = (orig); \ 3278 volatile unsigned long _argvec[13]; \ 3279 volatile unsigned long _res; \ 3280 _argvec[0] = (unsigned long)_orig.nraddr; \ 3281 _argvec[1] = (unsigned long)(arg1); \ 3282 _argvec[2] = (unsigned long)(arg2); \ 3283 _argvec[3] = (unsigned long)(arg3); \ 3284 _argvec[4] = (unsigned long)(arg4); \ 3285 _argvec[5] = (unsigned long)(arg5); \ 3286 _argvec[6] = (unsigned long)(arg6); \ 3287 _argvec[7] = (unsigned long)(arg7); \ 3288 _argvec[8] = (unsigned long)(arg8); \ 3289 _argvec[9] = (unsigned long)(arg9); \ 3290 _argvec[10] = (unsigned long)(arg10); \ 3291 _argvec[11] = (unsigned long)(arg11); \ 3292 _argvec[12] = (unsigned long)(arg12); \ 3293 __asm__ volatile( \ 3294 VALGRIND_ALIGN_STACK \ 3295 "ldr r0, [%1, #40] \n\t" \ 3296 "ldr r1, [%1, #44] \n\t" \ 3297 "ldr r2, [%1, #48] \n\t" \ 3298 "push {r0, r1, r2} \n\t" \ 3299 "ldr r0, [%1, #20] \n\t" \ 3300 "ldr r1, [%1, #24] \n\t" \ 3301 "ldr r2, [%1, #28] \n\t" \ 3302 "ldr r3, [%1, #32] \n\t" \ 3303 "ldr r4, [%1, #36] \n\t" \ 3304 "push {r0, r1, r2, r3, r4} \n\t" \ 3305 "ldr r0, [%1, #4] \n\t" \ 3306 "ldr r1, [%1, #8] \n\t" \ 3307 "ldr r2, [%1, #12] \n\t" \ 3308 "ldr r3, [%1, #16] \n\t" \ 3309 "ldr r4, [%1] \n\t" /* target->r4 */ \ 3310 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ 3311 VALGRIND_RESTORE_STACK \ 3312 "mov %0, r0" \ 3313 : /*out*/ "=r" (_res) \ 3314 : /*in*/ "0" (&_argvec[0]) \ 3315 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r11" \ 3316 ); \ 3317 lval = (__typeof__(lval)) _res; \ 3318 } while (0) 3319 3320#endif /* PLAT_arm_linux */ 3321 3322/* ------------------------- s390x-linux ------------------------- */ 3323 3324#if defined(PLAT_s390x_linux) 3325 3326/* Similar workaround as amd64 (see above), but we use r11 as frame 3327 pointer and save the old r11 in r7. r11 might be used for 3328 argvec, therefore we copy argvec in r1 since r1 is clobbered 3329 after the call anyway. */ 3330#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) 3331# define __FRAME_POINTER \ 3332 ,"d"(__builtin_dwarf_cfa()) 3333# define VALGRIND_CFI_PROLOGUE \ 3334 ".cfi_remember_state\n\t" \ 3335 "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ 3336 "lgr 7,11\n\t" \ 3337 "lgr 11,%2\n\t" \ 3338 ".cfi_def_cfa r11, 0\n\t" 3339# define VALGRIND_CFI_EPILOGUE \ 3340 "lgr 11, 7\n\t" \ 3341 ".cfi_restore_state\n\t" 3342#else 3343# define __FRAME_POINTER 3344# define VALGRIND_CFI_PROLOGUE \ 3345 "lgr 1,%1\n\t" 3346# define VALGRIND_CFI_EPILOGUE 3347#endif 3348 3349 3350 3351 3352/* These regs are trashed by the hidden call. Note that we overwrite 3353 r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the 3354 function a proper return address. All others are ABI defined call 3355 clobbers. */ 3356#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \ 3357 "f0","f1","f2","f3","f4","f5","f6","f7" 3358 3359 3360#define CALL_FN_W_v(lval, orig) \ 3361 do { \ 3362 volatile OrigFn _orig = (orig); \ 3363 volatile unsigned long _argvec[1]; \ 3364 volatile unsigned long _res; \ 3365 _argvec[0] = (unsigned long)_orig.nraddr; \ 3366 __asm__ volatile( \ 3367 VALGRIND_CFI_PROLOGUE \ 3368 "aghi 15,-160\n\t" \ 3369 "lg 1, 0(1)\n\t" /* target->r1 */ \ 3370 VALGRIND_CALL_NOREDIR_R1 \ 3371 "lgr %0, 2\n\t" \ 3372 "aghi 15,160\n\t" \ 3373 VALGRIND_CFI_EPILOGUE \ 3374 : /*out*/ "=d" (_res) \ 3375 : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ 3376 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ 3377 ); \ 3378 lval = (__typeof__(lval)) _res; \ 3379 } while (0) 3380 3381/* The call abi has the arguments in r2-r6 and stack */ 3382#define CALL_FN_W_W(lval, orig, arg1) \ 3383 do { \ 3384 volatile OrigFn _orig = (orig); \ 3385 volatile unsigned long _argvec[2]; \ 3386 volatile unsigned long _res; \ 3387 _argvec[0] = (unsigned long)_orig.nraddr; \ 3388 _argvec[1] = (unsigned long)arg1; \ 3389 __asm__ volatile( \ 3390 VALGRIND_CFI_PROLOGUE \ 3391 "aghi 15,-160\n\t" \ 3392 "lg 2, 8(1)\n\t" \ 3393 "lg 1, 0(1)\n\t" \ 3394 VALGRIND_CALL_NOREDIR_R1 \ 3395 "lgr %0, 2\n\t" \ 3396 "aghi 15,160\n\t" \ 3397 VALGRIND_CFI_EPILOGUE \ 3398 : /*out*/ "=d" (_res) \ 3399 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3400 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ 3401 ); \ 3402 lval = (__typeof__(lval)) _res; \ 3403 } while (0) 3404 3405#define CALL_FN_W_WW(lval, orig, arg1, arg2) \ 3406 do { \ 3407 volatile OrigFn _orig = (orig); \ 3408 volatile unsigned long _argvec[3]; \ 3409 volatile unsigned long _res; \ 3410 _argvec[0] = (unsigned long)_orig.nraddr; \ 3411 _argvec[1] = (unsigned long)arg1; \ 3412 _argvec[2] = (unsigned long)arg2; \ 3413 __asm__ volatile( \ 3414 VALGRIND_CFI_PROLOGUE \ 3415 "aghi 15,-160\n\t" \ 3416 "lg 2, 8(1)\n\t" \ 3417 "lg 3,16(1)\n\t" \ 3418 "lg 1, 0(1)\n\t" \ 3419 VALGRIND_CALL_NOREDIR_R1 \ 3420 "lgr %0, 2\n\t" \ 3421 "aghi 15,160\n\t" \ 3422 VALGRIND_CFI_EPILOGUE \ 3423 : /*out*/ "=d" (_res) \ 3424 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3425 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ 3426 ); \ 3427 lval = (__typeof__(lval)) _res; \ 3428 } while (0) 3429 3430#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ 3431 do { \ 3432 volatile OrigFn _orig = (orig); \ 3433 volatile unsigned long _argvec[4]; \ 3434 volatile unsigned long _res; \ 3435 _argvec[0] = (unsigned long)_orig.nraddr; \ 3436 _argvec[1] = (unsigned long)arg1; \ 3437 _argvec[2] = (unsigned long)arg2; \ 3438 _argvec[3] = (unsigned long)arg3; \ 3439 __asm__ volatile( \ 3440 VALGRIND_CFI_PROLOGUE \ 3441 "aghi 15,-160\n\t" \ 3442 "lg 2, 8(1)\n\t" \ 3443 "lg 3,16(1)\n\t" \ 3444 "lg 4,24(1)\n\t" \ 3445 "lg 1, 0(1)\n\t" \ 3446 VALGRIND_CALL_NOREDIR_R1 \ 3447 "lgr %0, 2\n\t" \ 3448 "aghi 15,160\n\t" \ 3449 VALGRIND_CFI_EPILOGUE \ 3450 : /*out*/ "=d" (_res) \ 3451 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3452 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ 3453 ); \ 3454 lval = (__typeof__(lval)) _res; \ 3455 } while (0) 3456 3457#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ 3458 do { \ 3459 volatile OrigFn _orig = (orig); \ 3460 volatile unsigned long _argvec[5]; \ 3461 volatile unsigned long _res; \ 3462 _argvec[0] = (unsigned long)_orig.nraddr; \ 3463 _argvec[1] = (unsigned long)arg1; \ 3464 _argvec[2] = (unsigned long)arg2; \ 3465 _argvec[3] = (unsigned long)arg3; \ 3466 _argvec[4] = (unsigned long)arg4; \ 3467 __asm__ volatile( \ 3468 VALGRIND_CFI_PROLOGUE \ 3469 "aghi 15,-160\n\t" \ 3470 "lg 2, 8(1)\n\t" \ 3471 "lg 3,16(1)\n\t" \ 3472 "lg 4,24(1)\n\t" \ 3473 "lg 5,32(1)\n\t" \ 3474 "lg 1, 0(1)\n\t" \ 3475 VALGRIND_CALL_NOREDIR_R1 \ 3476 "lgr %0, 2\n\t" \ 3477 "aghi 15,160\n\t" \ 3478 VALGRIND_CFI_EPILOGUE \ 3479 : /*out*/ "=d" (_res) \ 3480 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3481 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ 3482 ); \ 3483 lval = (__typeof__(lval)) _res; \ 3484 } while (0) 3485 3486#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ 3487 do { \ 3488 volatile OrigFn _orig = (orig); \ 3489 volatile unsigned long _argvec[6]; \ 3490 volatile unsigned long _res; \ 3491 _argvec[0] = (unsigned long)_orig.nraddr; \ 3492 _argvec[1] = (unsigned long)arg1; \ 3493 _argvec[2] = (unsigned long)arg2; \ 3494 _argvec[3] = (unsigned long)arg3; \ 3495 _argvec[4] = (unsigned long)arg4; \ 3496 _argvec[5] = (unsigned long)arg5; \ 3497 __asm__ volatile( \ 3498 VALGRIND_CFI_PROLOGUE \ 3499 "aghi 15,-160\n\t" \ 3500 "lg 2, 8(1)\n\t" \ 3501 "lg 3,16(1)\n\t" \ 3502 "lg 4,24(1)\n\t" \ 3503 "lg 5,32(1)\n\t" \ 3504 "lg 6,40(1)\n\t" \ 3505 "lg 1, 0(1)\n\t" \ 3506 VALGRIND_CALL_NOREDIR_R1 \ 3507 "lgr %0, 2\n\t" \ 3508 "aghi 15,160\n\t" \ 3509 VALGRIND_CFI_EPILOGUE \ 3510 : /*out*/ "=d" (_res) \ 3511 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3512 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3513 ); \ 3514 lval = (__typeof__(lval)) _res; \ 3515 } while (0) 3516 3517#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3518 arg6) \ 3519 do { \ 3520 volatile OrigFn _orig = (orig); \ 3521 volatile unsigned long _argvec[7]; \ 3522 volatile unsigned long _res; \ 3523 _argvec[0] = (unsigned long)_orig.nraddr; \ 3524 _argvec[1] = (unsigned long)arg1; \ 3525 _argvec[2] = (unsigned long)arg2; \ 3526 _argvec[3] = (unsigned long)arg3; \ 3527 _argvec[4] = (unsigned long)arg4; \ 3528 _argvec[5] = (unsigned long)arg5; \ 3529 _argvec[6] = (unsigned long)arg6; \ 3530 __asm__ volatile( \ 3531 VALGRIND_CFI_PROLOGUE \ 3532 "aghi 15,-168\n\t" \ 3533 "lg 2, 8(1)\n\t" \ 3534 "lg 3,16(1)\n\t" \ 3535 "lg 4,24(1)\n\t" \ 3536 "lg 5,32(1)\n\t" \ 3537 "lg 6,40(1)\n\t" \ 3538 "mvc 160(8,15), 48(1)\n\t" \ 3539 "lg 1, 0(1)\n\t" \ 3540 VALGRIND_CALL_NOREDIR_R1 \ 3541 "lgr %0, 2\n\t" \ 3542 "aghi 15,168\n\t" \ 3543 VALGRIND_CFI_EPILOGUE \ 3544 : /*out*/ "=d" (_res) \ 3545 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3546 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3547 ); \ 3548 lval = (__typeof__(lval)) _res; \ 3549 } while (0) 3550 3551#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3552 arg6, arg7) \ 3553 do { \ 3554 volatile OrigFn _orig = (orig); \ 3555 volatile unsigned long _argvec[8]; \ 3556 volatile unsigned long _res; \ 3557 _argvec[0] = (unsigned long)_orig.nraddr; \ 3558 _argvec[1] = (unsigned long)arg1; \ 3559 _argvec[2] = (unsigned long)arg2; \ 3560 _argvec[3] = (unsigned long)arg3; \ 3561 _argvec[4] = (unsigned long)arg4; \ 3562 _argvec[5] = (unsigned long)arg5; \ 3563 _argvec[6] = (unsigned long)arg6; \ 3564 _argvec[7] = (unsigned long)arg7; \ 3565 __asm__ volatile( \ 3566 VALGRIND_CFI_PROLOGUE \ 3567 "aghi 15,-176\n\t" \ 3568 "lg 2, 8(1)\n\t" \ 3569 "lg 3,16(1)\n\t" \ 3570 "lg 4,24(1)\n\t" \ 3571 "lg 5,32(1)\n\t" \ 3572 "lg 6,40(1)\n\t" \ 3573 "mvc 160(8,15), 48(1)\n\t" \ 3574 "mvc 168(8,15), 56(1)\n\t" \ 3575 "lg 1, 0(1)\n\t" \ 3576 VALGRIND_CALL_NOREDIR_R1 \ 3577 "lgr %0, 2\n\t" \ 3578 "aghi 15,176\n\t" \ 3579 VALGRIND_CFI_EPILOGUE \ 3580 : /*out*/ "=d" (_res) \ 3581 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3582 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3583 ); \ 3584 lval = (__typeof__(lval)) _res; \ 3585 } while (0) 3586 3587#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3588 arg6, arg7 ,arg8) \ 3589 do { \ 3590 volatile OrigFn _orig = (orig); \ 3591 volatile unsigned long _argvec[9]; \ 3592 volatile unsigned long _res; \ 3593 _argvec[0] = (unsigned long)_orig.nraddr; \ 3594 _argvec[1] = (unsigned long)arg1; \ 3595 _argvec[2] = (unsigned long)arg2; \ 3596 _argvec[3] = (unsigned long)arg3; \ 3597 _argvec[4] = (unsigned long)arg4; \ 3598 _argvec[5] = (unsigned long)arg5; \ 3599 _argvec[6] = (unsigned long)arg6; \ 3600 _argvec[7] = (unsigned long)arg7; \ 3601 _argvec[8] = (unsigned long)arg8; \ 3602 __asm__ volatile( \ 3603 VALGRIND_CFI_PROLOGUE \ 3604 "aghi 15,-184\n\t" \ 3605 "lg 2, 8(1)\n\t" \ 3606 "lg 3,16(1)\n\t" \ 3607 "lg 4,24(1)\n\t" \ 3608 "lg 5,32(1)\n\t" \ 3609 "lg 6,40(1)\n\t" \ 3610 "mvc 160(8,15), 48(1)\n\t" \ 3611 "mvc 168(8,15), 56(1)\n\t" \ 3612 "mvc 176(8,15), 64(1)\n\t" \ 3613 "lg 1, 0(1)\n\t" \ 3614 VALGRIND_CALL_NOREDIR_R1 \ 3615 "lgr %0, 2\n\t" \ 3616 "aghi 15,184\n\t" \ 3617 VALGRIND_CFI_EPILOGUE \ 3618 : /*out*/ "=d" (_res) \ 3619 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3620 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3621 ); \ 3622 lval = (__typeof__(lval)) _res; \ 3623 } while (0) 3624 3625#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3626 arg6, arg7 ,arg8, arg9) \ 3627 do { \ 3628 volatile OrigFn _orig = (orig); \ 3629 volatile unsigned long _argvec[10]; \ 3630 volatile unsigned long _res; \ 3631 _argvec[0] = (unsigned long)_orig.nraddr; \ 3632 _argvec[1] = (unsigned long)arg1; \ 3633 _argvec[2] = (unsigned long)arg2; \ 3634 _argvec[3] = (unsigned long)arg3; \ 3635 _argvec[4] = (unsigned long)arg4; \ 3636 _argvec[5] = (unsigned long)arg5; \ 3637 _argvec[6] = (unsigned long)arg6; \ 3638 _argvec[7] = (unsigned long)arg7; \ 3639 _argvec[8] = (unsigned long)arg8; \ 3640 _argvec[9] = (unsigned long)arg9; \ 3641 __asm__ volatile( \ 3642 VALGRIND_CFI_PROLOGUE \ 3643 "aghi 15,-192\n\t" \ 3644 "lg 2, 8(1)\n\t" \ 3645 "lg 3,16(1)\n\t" \ 3646 "lg 4,24(1)\n\t" \ 3647 "lg 5,32(1)\n\t" \ 3648 "lg 6,40(1)\n\t" \ 3649 "mvc 160(8,15), 48(1)\n\t" \ 3650 "mvc 168(8,15), 56(1)\n\t" \ 3651 "mvc 176(8,15), 64(1)\n\t" \ 3652 "mvc 184(8,15), 72(1)\n\t" \ 3653 "lg 1, 0(1)\n\t" \ 3654 VALGRIND_CALL_NOREDIR_R1 \ 3655 "lgr %0, 2\n\t" \ 3656 "aghi 15,192\n\t" \ 3657 VALGRIND_CFI_EPILOGUE \ 3658 : /*out*/ "=d" (_res) \ 3659 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3660 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3661 ); \ 3662 lval = (__typeof__(lval)) _res; \ 3663 } while (0) 3664 3665#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3666 arg6, arg7 ,arg8, arg9, arg10) \ 3667 do { \ 3668 volatile OrigFn _orig = (orig); \ 3669 volatile unsigned long _argvec[11]; \ 3670 volatile unsigned long _res; \ 3671 _argvec[0] = (unsigned long)_orig.nraddr; \ 3672 _argvec[1] = (unsigned long)arg1; \ 3673 _argvec[2] = (unsigned long)arg2; \ 3674 _argvec[3] = (unsigned long)arg3; \ 3675 _argvec[4] = (unsigned long)arg4; \ 3676 _argvec[5] = (unsigned long)arg5; \ 3677 _argvec[6] = (unsigned long)arg6; \ 3678 _argvec[7] = (unsigned long)arg7; \ 3679 _argvec[8] = (unsigned long)arg8; \ 3680 _argvec[9] = (unsigned long)arg9; \ 3681 _argvec[10] = (unsigned long)arg10; \ 3682 __asm__ volatile( \ 3683 VALGRIND_CFI_PROLOGUE \ 3684 "aghi 15,-200\n\t" \ 3685 "lg 2, 8(1)\n\t" \ 3686 "lg 3,16(1)\n\t" \ 3687 "lg 4,24(1)\n\t" \ 3688 "lg 5,32(1)\n\t" \ 3689 "lg 6,40(1)\n\t" \ 3690 "mvc 160(8,15), 48(1)\n\t" \ 3691 "mvc 168(8,15), 56(1)\n\t" \ 3692 "mvc 176(8,15), 64(1)\n\t" \ 3693 "mvc 184(8,15), 72(1)\n\t" \ 3694 "mvc 192(8,15), 80(1)\n\t" \ 3695 "lg 1, 0(1)\n\t" \ 3696 VALGRIND_CALL_NOREDIR_R1 \ 3697 "lgr %0, 2\n\t" \ 3698 "aghi 15,200\n\t" \ 3699 VALGRIND_CFI_EPILOGUE \ 3700 : /*out*/ "=d" (_res) \ 3701 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3702 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3703 ); \ 3704 lval = (__typeof__(lval)) _res; \ 3705 } while (0) 3706 3707#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3708 arg6, arg7 ,arg8, arg9, arg10, arg11) \ 3709 do { \ 3710 volatile OrigFn _orig = (orig); \ 3711 volatile unsigned long _argvec[12]; \ 3712 volatile unsigned long _res; \ 3713 _argvec[0] = (unsigned long)_orig.nraddr; \ 3714 _argvec[1] = (unsigned long)arg1; \ 3715 _argvec[2] = (unsigned long)arg2; \ 3716 _argvec[3] = (unsigned long)arg3; \ 3717 _argvec[4] = (unsigned long)arg4; \ 3718 _argvec[5] = (unsigned long)arg5; \ 3719 _argvec[6] = (unsigned long)arg6; \ 3720 _argvec[7] = (unsigned long)arg7; \ 3721 _argvec[8] = (unsigned long)arg8; \ 3722 _argvec[9] = (unsigned long)arg9; \ 3723 _argvec[10] = (unsigned long)arg10; \ 3724 _argvec[11] = (unsigned long)arg11; \ 3725 __asm__ volatile( \ 3726 VALGRIND_CFI_PROLOGUE \ 3727 "aghi 15,-208\n\t" \ 3728 "lg 2, 8(1)\n\t" \ 3729 "lg 3,16(1)\n\t" \ 3730 "lg 4,24(1)\n\t" \ 3731 "lg 5,32(1)\n\t" \ 3732 "lg 6,40(1)\n\t" \ 3733 "mvc 160(8,15), 48(1)\n\t" \ 3734 "mvc 168(8,15), 56(1)\n\t" \ 3735 "mvc 176(8,15), 64(1)\n\t" \ 3736 "mvc 184(8,15), 72(1)\n\t" \ 3737 "mvc 192(8,15), 80(1)\n\t" \ 3738 "mvc 200(8,15), 88(1)\n\t" \ 3739 "lg 1, 0(1)\n\t" \ 3740 VALGRIND_CALL_NOREDIR_R1 \ 3741 "lgr %0, 2\n\t" \ 3742 "aghi 15,208\n\t" \ 3743 VALGRIND_CFI_EPILOGUE \ 3744 : /*out*/ "=d" (_res) \ 3745 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3746 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3747 ); \ 3748 lval = (__typeof__(lval)) _res; \ 3749 } while (0) 3750 3751#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ 3752 arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ 3753 do { \ 3754 volatile OrigFn _orig = (orig); \ 3755 volatile unsigned long _argvec[13]; \ 3756 volatile unsigned long _res; \ 3757 _argvec[0] = (unsigned long)_orig.nraddr; \ 3758 _argvec[1] = (unsigned long)arg1; \ 3759 _argvec[2] = (unsigned long)arg2; \ 3760 _argvec[3] = (unsigned long)arg3; \ 3761 _argvec[4] = (unsigned long)arg4; \ 3762 _argvec[5] = (unsigned long)arg5; \ 3763 _argvec[6] = (unsigned long)arg6; \ 3764 _argvec[7] = (unsigned long)arg7; \ 3765 _argvec[8] = (unsigned long)arg8; \ 3766 _argvec[9] = (unsigned long)arg9; \ 3767 _argvec[10] = (unsigned long)arg10; \ 3768 _argvec[11] = (unsigned long)arg11; \ 3769 _argvec[12] = (unsigned long)arg12; \ 3770 __asm__ volatile( \ 3771 VALGRIND_CFI_PROLOGUE \ 3772 "aghi 15,-216\n\t" \ 3773 "lg 2, 8(1)\n\t" \ 3774 "lg 3,16(1)\n\t" \ 3775 "lg 4,24(1)\n\t" \ 3776 "lg 5,32(1)\n\t" \ 3777 "lg 6,40(1)\n\t" \ 3778 "mvc 160(8,15), 48(1)\n\t" \ 3779 "mvc 168(8,15), 56(1)\n\t" \ 3780 "mvc 176(8,15), 64(1)\n\t" \ 3781 "mvc 184(8,15), 72(1)\n\t" \ 3782 "mvc 192(8,15), 80(1)\n\t" \ 3783 "mvc 200(8,15), 88(1)\n\t" \ 3784 "mvc 208(8,15), 96(1)\n\t" \ 3785 "lg 1, 0(1)\n\t" \ 3786 VALGRIND_CALL_NOREDIR_R1 \ 3787 "lgr %0, 2\n\t" \ 3788 "aghi 15,216\n\t" \ 3789 VALGRIND_CFI_EPILOGUE \ 3790 : /*out*/ "=d" (_res) \ 3791 : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ 3792 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ 3793 ); \ 3794 lval = (__typeof__(lval)) _res; \ 3795 } while (0) 3796 3797 3798#endif /* PLAT_s390x_linux */ 3799 3800/* ------------------------- mips-linux ------------------------- */ 3801 3802#if defined(PLAT_mips32_linux) 3803 3804/* These regs are trashed by the hidden call. */ 3805#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ 3806"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ 3807"$25", "$31" 3808 3809/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned 3810 long) == 4. */ 3811 3812#define CALL_FN_W_v(lval, orig) \ 3813 do { \ 3814 volatile OrigFn _orig = (orig); \ 3815 volatile unsigned long _argvec[1]; \ 3816 volatile unsigned long _res; \ 3817 _argvec[0] = (unsigned long)_orig.nraddr; \ 3818 __asm__ volatile( \ 3819 "subu $29, $29, 8 \n\t" \ 3820 "sw $gp, 0($sp) \n\t" \ 3821 "sw $ra, 4($sp) \n\t" \ 3822 "subu $29, $29, 16 \n\t" \ 3823 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 3824 VALGRIND_CALL_NOREDIR_T9 \ 3825 "addu $29, $29, 16\n\t" \ 3826 "lw $gp, 0($sp) \n\t" \ 3827 "lw $ra, 4($sp) \n\t" \ 3828 "addu $29, $29, 8 \n\t" \ 3829 "move %0, $v0\n" \ 3830 : /*out*/ "=r" (_res) \ 3831 : /*in*/ "0" (&_argvec[0]) \ 3832 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 3833 ); \ 3834 lval = (__typeof__(lval)) _res; \ 3835 } while (0) 3836 3837#define CALL_FN_W_W(lval, orig, arg1) \ 3838 do { \ 3839 volatile OrigFn _orig = (orig); \ 3840 volatile unsigned long _argvec[2]; \ 3841 volatile unsigned long _res; \ 3842 _argvec[0] = (unsigned long)_orig.nraddr; \ 3843 _argvec[1] = (unsigned long)(arg1); \ 3844 __asm__ volatile( \ 3845 "subu $29, $29, 8 \n\t" \ 3846 "sw $gp, 0($sp) \n\t" \ 3847 "sw $ra, 4($sp) \n\t" \ 3848 "subu $29, $29, 16 \n\t" \ 3849 "lw $a0, 4(%1) \n\t" /* arg1*/ \ 3850 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 3851 VALGRIND_CALL_NOREDIR_T9 \ 3852 "addu $29, $29, 16 \n\t" \ 3853 "lw $gp, 0($sp) \n\t" \ 3854 "lw $ra, 4($sp) \n\t" \ 3855 "addu $29, $29, 8 \n\t" \ 3856 "move %0, $v0\n" \ 3857 : /*out*/ "=r" (_res) \ 3858 : /*in*/ "0" (&_argvec[0]) \ 3859 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 3860 ); \ 3861 lval = (__typeof__(lval)) _res; \ 3862 } while (0) 3863 3864#define CALL_FN_W_WW(lval, orig, arg1,arg2) \ 3865 do { \ 3866 volatile OrigFn _orig = (orig); \ 3867 volatile unsigned long _argvec[3]; \ 3868 volatile unsigned long _res; \ 3869 _argvec[0] = (unsigned long)_orig.nraddr; \ 3870 _argvec[1] = (unsigned long)(arg1); \ 3871 _argvec[2] = (unsigned long)(arg2); \ 3872 __asm__ volatile( \ 3873 "subu $29, $29, 8 \n\t" \ 3874 "sw $gp, 0($sp) \n\t" \ 3875 "sw $ra, 4($sp) \n\t" \ 3876 "subu $29, $29, 16 \n\t" \ 3877 "lw $a0, 4(%1) \n\t" \ 3878 "lw $a1, 8(%1) \n\t" \ 3879 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 3880 VALGRIND_CALL_NOREDIR_T9 \ 3881 "addu $29, $29, 16 \n\t" \ 3882 "lw $gp, 0($sp) \n\t" \ 3883 "lw $ra, 4($sp) \n\t" \ 3884 "addu $29, $29, 8 \n\t" \ 3885 "move %0, $v0\n" \ 3886 : /*out*/ "=r" (_res) \ 3887 : /*in*/ "0" (&_argvec[0]) \ 3888 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 3889 ); \ 3890 lval = (__typeof__(lval)) _res; \ 3891 } while (0) 3892 3893#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ 3894 do { \ 3895 volatile OrigFn _orig = (orig); \ 3896 volatile unsigned long _argvec[4]; \ 3897 volatile unsigned long _res; \ 3898 _argvec[0] = (unsigned long)_orig.nraddr; \ 3899 _argvec[1] = (unsigned long)(arg1); \ 3900 _argvec[2] = (unsigned long)(arg2); \ 3901 _argvec[3] = (unsigned long)(arg3); \ 3902 __asm__ volatile( \ 3903 "subu $29, $29, 8 \n\t" \ 3904 "sw $gp, 0($sp) \n\t" \ 3905 "sw $ra, 4($sp) \n\t" \ 3906 "subu $29, $29, 16 \n\t" \ 3907 "lw $a0, 4(%1) \n\t" \ 3908 "lw $a1, 8(%1) \n\t" \ 3909 "lw $a2, 12(%1) \n\t" \ 3910 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 3911 VALGRIND_CALL_NOREDIR_T9 \ 3912 "addu $29, $29, 16 \n\t" \ 3913 "lw $gp, 0($sp) \n\t" \ 3914 "lw $ra, 4($sp) \n\t" \ 3915 "addu $29, $29, 8 \n\t" \ 3916 "move %0, $v0\n" \ 3917 : /*out*/ "=r" (_res) \ 3918 : /*in*/ "0" (&_argvec[0]) \ 3919 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 3920 ); \ 3921 lval = (__typeof__(lval)) _res; \ 3922 } while (0) 3923 3924#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ 3925 do { \ 3926 volatile OrigFn _orig = (orig); \ 3927 volatile unsigned long _argvec[5]; \ 3928 volatile unsigned long _res; \ 3929 _argvec[0] = (unsigned long)_orig.nraddr; \ 3930 _argvec[1] = (unsigned long)(arg1); \ 3931 _argvec[2] = (unsigned long)(arg2); \ 3932 _argvec[3] = (unsigned long)(arg3); \ 3933 _argvec[4] = (unsigned long)(arg4); \ 3934 __asm__ volatile( \ 3935 "subu $29, $29, 8 \n\t" \ 3936 "sw $gp, 0($sp) \n\t" \ 3937 "sw $ra, 4($sp) \n\t" \ 3938 "subu $29, $29, 16 \n\t" \ 3939 "lw $a0, 4(%1) \n\t" \ 3940 "lw $a1, 8(%1) \n\t" \ 3941 "lw $a2, 12(%1) \n\t" \ 3942 "lw $a3, 16(%1) \n\t" \ 3943 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 3944 VALGRIND_CALL_NOREDIR_T9 \ 3945 "addu $29, $29, 16 \n\t" \ 3946 "lw $gp, 0($sp) \n\t" \ 3947 "lw $ra, 4($sp) \n\t" \ 3948 "addu $29, $29, 8 \n\t" \ 3949 "move %0, $v0\n" \ 3950 : /*out*/ "=r" (_res) \ 3951 : /*in*/ "0" (&_argvec[0]) \ 3952 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 3953 ); \ 3954 lval = (__typeof__(lval)) _res; \ 3955 } while (0) 3956 3957#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ 3958 do { \ 3959 volatile OrigFn _orig = (orig); \ 3960 volatile unsigned long _argvec[6]; \ 3961 volatile unsigned long _res; \ 3962 _argvec[0] = (unsigned long)_orig.nraddr; \ 3963 _argvec[1] = (unsigned long)(arg1); \ 3964 _argvec[2] = (unsigned long)(arg2); \ 3965 _argvec[3] = (unsigned long)(arg3); \ 3966 _argvec[4] = (unsigned long)(arg4); \ 3967 _argvec[5] = (unsigned long)(arg5); \ 3968 __asm__ volatile( \ 3969 "subu $29, $29, 8 \n\t" \ 3970 "sw $gp, 0($sp) \n\t" \ 3971 "sw $ra, 4($sp) \n\t" \ 3972 "lw $a0, 20(%1) \n\t" \ 3973 "subu $sp, $sp, 24\n\t" \ 3974 "sw $a0, 16($sp) \n\t" \ 3975 "lw $a0, 4(%1) \n\t" \ 3976 "lw $a1, 8(%1) \n\t" \ 3977 "lw $a2, 12(%1) \n\t" \ 3978 "lw $a3, 16(%1) \n\t" \ 3979 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 3980 VALGRIND_CALL_NOREDIR_T9 \ 3981 "addu $29, $29, 24 \n\t" \ 3982 "lw $gp, 0($sp) \n\t" \ 3983 "lw $ra, 4($sp) \n\t" \ 3984 "addu $sp, $sp, 8 \n\t" \ 3985 "move %0, $v0\n" \ 3986 : /*out*/ "=r" (_res) \ 3987 : /*in*/ "0" (&_argvec[0]) \ 3988 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 3989 ); \ 3990 lval = (__typeof__(lval)) _res; \ 3991 } while (0) 3992#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ 3993 do { \ 3994 volatile OrigFn _orig = (orig); \ 3995 volatile unsigned long _argvec[7]; \ 3996 volatile unsigned long _res; \ 3997 _argvec[0] = (unsigned long)_orig.nraddr; \ 3998 _argvec[1] = (unsigned long)(arg1); \ 3999 _argvec[2] = (unsigned long)(arg2); \ 4000 _argvec[3] = (unsigned long)(arg3); \ 4001 _argvec[4] = (unsigned long)(arg4); \ 4002 _argvec[5] = (unsigned long)(arg5); \ 4003 _argvec[6] = (unsigned long)(arg6); \ 4004 __asm__ volatile( \ 4005 "subu $29, $29, 8 \n\t" \ 4006 "sw $gp, 0($sp) \n\t" \ 4007 "sw $ra, 4($sp) \n\t" \ 4008 "lw $a0, 20(%1) \n\t" \ 4009 "subu $sp, $sp, 32\n\t" \ 4010 "sw $a0, 16($sp) \n\t" \ 4011 "lw $a0, 24(%1) \n\t" \ 4012 "nop\n\t" \ 4013 "sw $a0, 20($sp) \n\t" \ 4014 "lw $a0, 4(%1) \n\t" \ 4015 "lw $a1, 8(%1) \n\t" \ 4016 "lw $a2, 12(%1) \n\t" \ 4017 "lw $a3, 16(%1) \n\t" \ 4018 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4019 VALGRIND_CALL_NOREDIR_T9 \ 4020 "addu $sp, $sp, 32 \n\t" \ 4021 "lw $gp, 0($sp) \n\t" \ 4022 "lw $ra, 4($sp) \n\t" \ 4023 "addu $sp, $sp, 8 \n\t" \ 4024 "move %0, $v0\n" \ 4025 : /*out*/ "=r" (_res) \ 4026 : /*in*/ "0" (&_argvec[0]) \ 4027 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4028 ); \ 4029 lval = (__typeof__(lval)) _res; \ 4030 } while (0) 4031 4032#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 4033 arg7) \ 4034 do { \ 4035 volatile OrigFn _orig = (orig); \ 4036 volatile unsigned long _argvec[8]; \ 4037 volatile unsigned long _res; \ 4038 _argvec[0] = (unsigned long)_orig.nraddr; \ 4039 _argvec[1] = (unsigned long)(arg1); \ 4040 _argvec[2] = (unsigned long)(arg2); \ 4041 _argvec[3] = (unsigned long)(arg3); \ 4042 _argvec[4] = (unsigned long)(arg4); \ 4043 _argvec[5] = (unsigned long)(arg5); \ 4044 _argvec[6] = (unsigned long)(arg6); \ 4045 _argvec[7] = (unsigned long)(arg7); \ 4046 __asm__ volatile( \ 4047 "subu $29, $29, 8 \n\t" \ 4048 "sw $gp, 0($sp) \n\t" \ 4049 "sw $ra, 4($sp) \n\t" \ 4050 "lw $a0, 20(%1) \n\t" \ 4051 "subu $sp, $sp, 32\n\t" \ 4052 "sw $a0, 16($sp) \n\t" \ 4053 "lw $a0, 24(%1) \n\t" \ 4054 "sw $a0, 20($sp) \n\t" \ 4055 "lw $a0, 28(%1) \n\t" \ 4056 "sw $a0, 24($sp) \n\t" \ 4057 "lw $a0, 4(%1) \n\t" \ 4058 "lw $a1, 8(%1) \n\t" \ 4059 "lw $a2, 12(%1) \n\t" \ 4060 "lw $a3, 16(%1) \n\t" \ 4061 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4062 VALGRIND_CALL_NOREDIR_T9 \ 4063 "addu $sp, $sp, 32 \n\t" \ 4064 "lw $gp, 0($sp) \n\t" \ 4065 "lw $ra, 4($sp) \n\t" \ 4066 "addu $sp, $sp, 8 \n\t" \ 4067 "move %0, $v0\n" \ 4068 : /*out*/ "=r" (_res) \ 4069 : /*in*/ "0" (&_argvec[0]) \ 4070 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4071 ); \ 4072 lval = (__typeof__(lval)) _res; \ 4073 } while (0) 4074 4075#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 4076 arg7,arg8) \ 4077 do { \ 4078 volatile OrigFn _orig = (orig); \ 4079 volatile unsigned long _argvec[9]; \ 4080 volatile unsigned long _res; \ 4081 _argvec[0] = (unsigned long)_orig.nraddr; \ 4082 _argvec[1] = (unsigned long)(arg1); \ 4083 _argvec[2] = (unsigned long)(arg2); \ 4084 _argvec[3] = (unsigned long)(arg3); \ 4085 _argvec[4] = (unsigned long)(arg4); \ 4086 _argvec[5] = (unsigned long)(arg5); \ 4087 _argvec[6] = (unsigned long)(arg6); \ 4088 _argvec[7] = (unsigned long)(arg7); \ 4089 _argvec[8] = (unsigned long)(arg8); \ 4090 __asm__ volatile( \ 4091 "subu $29, $29, 8 \n\t" \ 4092 "sw $gp, 0($sp) \n\t" \ 4093 "sw $ra, 4($sp) \n\t" \ 4094 "lw $a0, 20(%1) \n\t" \ 4095 "subu $sp, $sp, 40\n\t" \ 4096 "sw $a0, 16($sp) \n\t" \ 4097 "lw $a0, 24(%1) \n\t" \ 4098 "sw $a0, 20($sp) \n\t" \ 4099 "lw $a0, 28(%1) \n\t" \ 4100 "sw $a0, 24($sp) \n\t" \ 4101 "lw $a0, 32(%1) \n\t" \ 4102 "sw $a0, 28($sp) \n\t" \ 4103 "lw $a0, 4(%1) \n\t" \ 4104 "lw $a1, 8(%1) \n\t" \ 4105 "lw $a2, 12(%1) \n\t" \ 4106 "lw $a3, 16(%1) \n\t" \ 4107 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4108 VALGRIND_CALL_NOREDIR_T9 \ 4109 "addu $sp, $sp, 40 \n\t" \ 4110 "lw $gp, 0($sp) \n\t" \ 4111 "lw $ra, 4($sp) \n\t" \ 4112 "addu $sp, $sp, 8 \n\t" \ 4113 "move %0, $v0\n" \ 4114 : /*out*/ "=r" (_res) \ 4115 : /*in*/ "0" (&_argvec[0]) \ 4116 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4117 ); \ 4118 lval = (__typeof__(lval)) _res; \ 4119 } while (0) 4120 4121#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 4122 arg7,arg8,arg9) \ 4123 do { \ 4124 volatile OrigFn _orig = (orig); \ 4125 volatile unsigned long _argvec[10]; \ 4126 volatile unsigned long _res; \ 4127 _argvec[0] = (unsigned long)_orig.nraddr; \ 4128 _argvec[1] = (unsigned long)(arg1); \ 4129 _argvec[2] = (unsigned long)(arg2); \ 4130 _argvec[3] = (unsigned long)(arg3); \ 4131 _argvec[4] = (unsigned long)(arg4); \ 4132 _argvec[5] = (unsigned long)(arg5); \ 4133 _argvec[6] = (unsigned long)(arg6); \ 4134 _argvec[7] = (unsigned long)(arg7); \ 4135 _argvec[8] = (unsigned long)(arg8); \ 4136 _argvec[9] = (unsigned long)(arg9); \ 4137 __asm__ volatile( \ 4138 "subu $29, $29, 8 \n\t" \ 4139 "sw $gp, 0($sp) \n\t" \ 4140 "sw $ra, 4($sp) \n\t" \ 4141 "lw $a0, 20(%1) \n\t" \ 4142 "subu $sp, $sp, 40\n\t" \ 4143 "sw $a0, 16($sp) \n\t" \ 4144 "lw $a0, 24(%1) \n\t" \ 4145 "sw $a0, 20($sp) \n\t" \ 4146 "lw $a0, 28(%1) \n\t" \ 4147 "sw $a0, 24($sp) \n\t" \ 4148 "lw $a0, 32(%1) \n\t" \ 4149 "sw $a0, 28($sp) \n\t" \ 4150 "lw $a0, 36(%1) \n\t" \ 4151 "sw $a0, 32($sp) \n\t" \ 4152 "lw $a0, 4(%1) \n\t" \ 4153 "lw $a1, 8(%1) \n\t" \ 4154 "lw $a2, 12(%1) \n\t" \ 4155 "lw $a3, 16(%1) \n\t" \ 4156 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4157 VALGRIND_CALL_NOREDIR_T9 \ 4158 "addu $sp, $sp, 40 \n\t" \ 4159 "lw $gp, 0($sp) \n\t" \ 4160 "lw $ra, 4($sp) \n\t" \ 4161 "addu $sp, $sp, 8 \n\t" \ 4162 "move %0, $v0\n" \ 4163 : /*out*/ "=r" (_res) \ 4164 : /*in*/ "0" (&_argvec[0]) \ 4165 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4166 ); \ 4167 lval = (__typeof__(lval)) _res; \ 4168 } while (0) 4169 4170#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ 4171 arg7,arg8,arg9,arg10) \ 4172 do { \ 4173 volatile OrigFn _orig = (orig); \ 4174 volatile unsigned long _argvec[11]; \ 4175 volatile unsigned long _res; \ 4176 _argvec[0] = (unsigned long)_orig.nraddr; \ 4177 _argvec[1] = (unsigned long)(arg1); \ 4178 _argvec[2] = (unsigned long)(arg2); \ 4179 _argvec[3] = (unsigned long)(arg3); \ 4180 _argvec[4] = (unsigned long)(arg4); \ 4181 _argvec[5] = (unsigned long)(arg5); \ 4182 _argvec[6] = (unsigned long)(arg6); \ 4183 _argvec[7] = (unsigned long)(arg7); \ 4184 _argvec[8] = (unsigned long)(arg8); \ 4185 _argvec[9] = (unsigned long)(arg9); \ 4186 _argvec[10] = (unsigned long)(arg10); \ 4187 __asm__ volatile( \ 4188 "subu $29, $29, 8 \n\t" \ 4189 "sw $gp, 0($sp) \n\t" \ 4190 "sw $ra, 4($sp) \n\t" \ 4191 "lw $a0, 20(%1) \n\t" \ 4192 "subu $sp, $sp, 48\n\t" \ 4193 "sw $a0, 16($sp) \n\t" \ 4194 "lw $a0, 24(%1) \n\t" \ 4195 "sw $a0, 20($sp) \n\t" \ 4196 "lw $a0, 28(%1) \n\t" \ 4197 "sw $a0, 24($sp) \n\t" \ 4198 "lw $a0, 32(%1) \n\t" \ 4199 "sw $a0, 28($sp) \n\t" \ 4200 "lw $a0, 36(%1) \n\t" \ 4201 "sw $a0, 32($sp) \n\t" \ 4202 "lw $a0, 40(%1) \n\t" \ 4203 "sw $a0, 36($sp) \n\t" \ 4204 "lw $a0, 4(%1) \n\t" \ 4205 "lw $a1, 8(%1) \n\t" \ 4206 "lw $a2, 12(%1) \n\t" \ 4207 "lw $a3, 16(%1) \n\t" \ 4208 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4209 VALGRIND_CALL_NOREDIR_T9 \ 4210 "addu $sp, $sp, 48 \n\t" \ 4211 "lw $gp, 0($sp) \n\t" \ 4212 "lw $ra, 4($sp) \n\t" \ 4213 "addu $sp, $sp, 8 \n\t" \ 4214 "move %0, $v0\n" \ 4215 : /*out*/ "=r" (_res) \ 4216 : /*in*/ "0" (&_argvec[0]) \ 4217 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4218 ); \ 4219 lval = (__typeof__(lval)) _res; \ 4220 } while (0) 4221 4222#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ 4223 arg6,arg7,arg8,arg9,arg10, \ 4224 arg11) \ 4225 do { \ 4226 volatile OrigFn _orig = (orig); \ 4227 volatile unsigned long _argvec[12]; \ 4228 volatile unsigned long _res; \ 4229 _argvec[0] = (unsigned long)_orig.nraddr; \ 4230 _argvec[1] = (unsigned long)(arg1); \ 4231 _argvec[2] = (unsigned long)(arg2); \ 4232 _argvec[3] = (unsigned long)(arg3); \ 4233 _argvec[4] = (unsigned long)(arg4); \ 4234 _argvec[5] = (unsigned long)(arg5); \ 4235 _argvec[6] = (unsigned long)(arg6); \ 4236 _argvec[7] = (unsigned long)(arg7); \ 4237 _argvec[8] = (unsigned long)(arg8); \ 4238 _argvec[9] = (unsigned long)(arg9); \ 4239 _argvec[10] = (unsigned long)(arg10); \ 4240 _argvec[11] = (unsigned long)(arg11); \ 4241 __asm__ volatile( \ 4242 "subu $29, $29, 8 \n\t" \ 4243 "sw $gp, 0($sp) \n\t" \ 4244 "sw $ra, 4($sp) \n\t" \ 4245 "lw $a0, 20(%1) \n\t" \ 4246 "subu $sp, $sp, 48\n\t" \ 4247 "sw $a0, 16($sp) \n\t" \ 4248 "lw $a0, 24(%1) \n\t" \ 4249 "sw $a0, 20($sp) \n\t" \ 4250 "lw $a0, 28(%1) \n\t" \ 4251 "sw $a0, 24($sp) \n\t" \ 4252 "lw $a0, 32(%1) \n\t" \ 4253 "sw $a0, 28($sp) \n\t" \ 4254 "lw $a0, 36(%1) \n\t" \ 4255 "sw $a0, 32($sp) \n\t" \ 4256 "lw $a0, 40(%1) \n\t" \ 4257 "sw $a0, 36($sp) \n\t" \ 4258 "lw $a0, 44(%1) \n\t" \ 4259 "sw $a0, 40($sp) \n\t" \ 4260 "lw $a0, 4(%1) \n\t" \ 4261 "lw $a1, 8(%1) \n\t" \ 4262 "lw $a2, 12(%1) \n\t" \ 4263 "lw $a3, 16(%1) \n\t" \ 4264 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4265 VALGRIND_CALL_NOREDIR_T9 \ 4266 "addu $sp, $sp, 48 \n\t" \ 4267 "lw $gp, 0($sp) \n\t" \ 4268 "lw $ra, 4($sp) \n\t" \ 4269 "addu $sp, $sp, 8 \n\t" \ 4270 "move %0, $v0\n" \ 4271 : /*out*/ "=r" (_res) \ 4272 : /*in*/ "0" (&_argvec[0]) \ 4273 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4274 ); \ 4275 lval = (__typeof__(lval)) _res; \ 4276 } while (0) 4277 4278#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ 4279 arg6,arg7,arg8,arg9,arg10, \ 4280 arg11,arg12) \ 4281 do { \ 4282 volatile OrigFn _orig = (orig); \ 4283 volatile unsigned long _argvec[13]; \ 4284 volatile unsigned long _res; \ 4285 _argvec[0] = (unsigned long)_orig.nraddr; \ 4286 _argvec[1] = (unsigned long)(arg1); \ 4287 _argvec[2] = (unsigned long)(arg2); \ 4288 _argvec[3] = (unsigned long)(arg3); \ 4289 _argvec[4] = (unsigned long)(arg4); \ 4290 _argvec[5] = (unsigned long)(arg5); \ 4291 _argvec[6] = (unsigned long)(arg6); \ 4292 _argvec[7] = (unsigned long)(arg7); \ 4293 _argvec[8] = (unsigned long)(arg8); \ 4294 _argvec[9] = (unsigned long)(arg9); \ 4295 _argvec[10] = (unsigned long)(arg10); \ 4296 _argvec[11] = (unsigned long)(arg11); \ 4297 _argvec[12] = (unsigned long)(arg12); \ 4298 __asm__ volatile( \ 4299 "subu $29, $29, 8 \n\t" \ 4300 "sw $gp, 0($sp) \n\t" \ 4301 "sw $ra, 4($sp) \n\t" \ 4302 "lw $a0, 20(%1) \n\t" \ 4303 "subu $sp, $sp, 56\n\t" \ 4304 "sw $a0, 16($sp) \n\t" \ 4305 "lw $a0, 24(%1) \n\t" \ 4306 "sw $a0, 20($sp) \n\t" \ 4307 "lw $a0, 28(%1) \n\t" \ 4308 "sw $a0, 24($sp) \n\t" \ 4309 "lw $a0, 32(%1) \n\t" \ 4310 "sw $a0, 28($sp) \n\t" \ 4311 "lw $a0, 36(%1) \n\t" \ 4312 "sw $a0, 32($sp) \n\t" \ 4313 "lw $a0, 40(%1) \n\t" \ 4314 "sw $a0, 36($sp) \n\t" \ 4315 "lw $a0, 44(%1) \n\t" \ 4316 "sw $a0, 40($sp) \n\t" \ 4317 "lw $a0, 48(%1) \n\t" \ 4318 "sw $a0, 44($sp) \n\t" \ 4319 "lw $a0, 4(%1) \n\t" \ 4320 "lw $a1, 8(%1) \n\t" \ 4321 "lw $a2, 12(%1) \n\t" \ 4322 "lw $a3, 16(%1) \n\t" \ 4323 "lw $t9, 0(%1) \n\t" /* target->t9 */ \ 4324 VALGRIND_CALL_NOREDIR_T9 \ 4325 "addu $sp, $sp, 56 \n\t" \ 4326 "lw $gp, 0($sp) \n\t" \ 4327 "lw $ra, 4($sp) \n\t" \ 4328 "addu $sp, $sp, 8 \n\t" \ 4329 "move %0, $v0\n" \ 4330 : /*out*/ "=r" (_res) \ 4331 : /*in*/ "0" (&_argvec[0]) \ 4332 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \ 4333 ); \ 4334 lval = (__typeof__(lval)) _res; \ 4335 } while (0) 4336 4337#endif /* PLAT_mips32_linux */ 4338 4339 4340/* ------------------------------------------------------------------ */ 4341/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ 4342/* */ 4343/* ------------------------------------------------------------------ */ 4344 4345/* Some request codes. There are many more of these, but most are not 4346 exposed to end-user view. These are the public ones, all of the 4347 form 0x1000 + small_number. 4348 4349 Core ones are in the range 0x00000000--0x0000ffff. The non-public 4350 ones start at 0x2000. 4351*/ 4352 4353/* These macros are used by tools -- they must be public, but don't 4354 embed them into other programs. */ 4355#define VG_USERREQ_TOOL_BASE(a,b) \ 4356 ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) 4357#define VG_IS_TOOL_USERREQ(a, b, v) \ 4358 (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) 4359 4360/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 4361 This enum comprises an ABI exported by Valgrind to programs 4362 which use client requests. DO NOT CHANGE THE ORDER OF THESE 4363 ENTRIES, NOR DELETE ANY -- add new ones at the end. */ 4364typedef 4365 enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, 4366 VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, 4367 4368 /* These allow any function to be called from the simulated 4369 CPU but run on the real CPU. Nb: the first arg passed to 4370 the function is always the ThreadId of the running 4371 thread! So CLIENT_CALL0 actually requires a 1 arg 4372 function, etc. */ 4373 VG_USERREQ__CLIENT_CALL0 = 0x1101, 4374 VG_USERREQ__CLIENT_CALL1 = 0x1102, 4375 VG_USERREQ__CLIENT_CALL2 = 0x1103, 4376 VG_USERREQ__CLIENT_CALL3 = 0x1104, 4377 4378 /* Can be useful in regression testing suites -- eg. can 4379 send Valgrind's output to /dev/null and still count 4380 errors. */ 4381 VG_USERREQ__COUNT_ERRORS = 0x1201, 4382 4383 /* Allows a string (gdb monitor command) to be passed to the tool 4384 Used for interaction with vgdb/gdb */ 4385 VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, 4386 4387 /* These are useful and can be interpreted by any tool that 4388 tracks malloc() et al, by using vg_replace_malloc.c. */ 4389 VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, 4390 VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, 4391 VG_USERREQ__FREELIKE_BLOCK = 0x1302, 4392 /* Memory pool support. */ 4393 VG_USERREQ__CREATE_MEMPOOL = 0x1303, 4394 VG_USERREQ__DESTROY_MEMPOOL = 0x1304, 4395 VG_USERREQ__MEMPOOL_ALLOC = 0x1305, 4396 VG_USERREQ__MEMPOOL_FREE = 0x1306, 4397 VG_USERREQ__MEMPOOL_TRIM = 0x1307, 4398 VG_USERREQ__MOVE_MEMPOOL = 0x1308, 4399 VG_USERREQ__MEMPOOL_CHANGE = 0x1309, 4400 VG_USERREQ__MEMPOOL_EXISTS = 0x130a, 4401 4402 /* Allow printfs to valgrind log. */ 4403 /* The first two pass the va_list argument by value, which 4404 assumes it is the same size as or smaller than a UWord, 4405 which generally isn't the case. Hence are deprecated. 4406 The second two pass the vargs by reference and so are 4407 immune to this problem. */ 4408 /* both :: char* fmt, va_list vargs (DEPRECATED) */ 4409 VG_USERREQ__PRINTF = 0x1401, 4410 VG_USERREQ__PRINTF_BACKTRACE = 0x1402, 4411 /* both :: char* fmt, va_list* vargs */ 4412 VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, 4413 VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, 4414 4415 /* Stack support. */ 4416 VG_USERREQ__STACK_REGISTER = 0x1501, 4417 VG_USERREQ__STACK_DEREGISTER = 0x1502, 4418 VG_USERREQ__STACK_CHANGE = 0x1503, 4419 4420 /* Wine support */ 4421 VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, 4422 4423 /* Querying of debug info. */ 4424 VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, 4425 4426 /* Disable/enable error reporting level. Takes a single 4427 Word arg which is the delta to this thread's error 4428 disablement indicator. Hence 1 disables or further 4429 disables errors, and -1 moves back towards enablement. 4430 Other values are not allowed. */ 4431 VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801 4432 } Vg_ClientRequest; 4433 4434#if !defined(__GNUC__) 4435# define __extension__ /* */ 4436#endif 4437 4438 4439/* Returns the number of Valgrinds this code is running under. That 4440 is, 0 if running natively, 1 if running under Valgrind, 2 if 4441 running under Valgrind which is running under another Valgrind, 4442 etc. */ 4443#define RUNNING_ON_VALGRIND \ 4444 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ 4445 VG_USERREQ__RUNNING_ON_VALGRIND, \ 4446 0, 0, 0, 0, 0) \ 4447 4448 4449/* Discard translation of code in the range [_qzz_addr .. _qzz_addr + 4450 _qzz_len - 1]. Useful if you are debugging a JITter or some such, 4451 since it provides a way to make sure valgrind will retranslate the 4452 invalidated area. Returns no value. */ 4453#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ 4454 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ 4455 _qzz_addr, _qzz_len, 0, 0, 0) 4456 4457 4458/* These requests are for getting Valgrind itself to print something. 4459 Possibly with a backtrace. This is a really ugly hack. The return value 4460 is the number of characters printed, excluding the "**<pid>** " part at the 4461 start and the backtrace (if present). */ 4462 4463#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) 4464/* Modern GCC will optimize the static routine out if unused, 4465 and unused attribute will shut down warnings about it. */ 4466static int VALGRIND_PRINTF(const char *format, ...) 4467 __attribute__((format(__printf__, 1, 2), __unused__)); 4468#endif 4469static int 4470#if defined(_MSC_VER) 4471__inline 4472#endif 4473VALGRIND_PRINTF(const char *format, ...) 4474{ 4475#if defined(NVALGRIND) 4476 return 0; 4477#else /* NVALGRIND */ 4478#if defined(_MSC_VER) 4479 uintptr_t _qzz_res; 4480#else 4481 unsigned long _qzz_res; 4482#endif 4483 va_list vargs; 4484 va_start(vargs, format); 4485#if defined(_MSC_VER) 4486 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, 4487 VG_USERREQ__PRINTF_VALIST_BY_REF, 4488 (uintptr_t)format, 4489 (uintptr_t)&vargs, 4490 0, 0, 0); 4491#else 4492 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, 4493 VG_USERREQ__PRINTF_VALIST_BY_REF, 4494 (unsigned long)format, 4495 (unsigned long)&vargs, 4496 0, 0, 0); 4497#endif 4498 va_end(vargs); 4499 return (int)_qzz_res; 4500#endif /* NVALGRIND */ 4501} 4502 4503#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) 4504static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) 4505 __attribute__((format(__printf__, 1, 2), __unused__)); 4506#endif 4507static int 4508#if defined(_MSC_VER) 4509__inline 4510#endif 4511VALGRIND_PRINTF_BACKTRACE(const char *format, ...) 4512{ 4513#if defined(NVALGRIND) 4514 return 0; 4515#else /* NVALGRIND */ 4516#if defined(_MSC_VER) 4517 uintptr_t _qzz_res; 4518#else 4519 unsigned long _qzz_res; 4520#endif 4521 va_list vargs; 4522 va_start(vargs, format); 4523#if defined(_MSC_VER) 4524 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, 4525 VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, 4526 (uintptr_t)format, 4527 (uintptr_t)&vargs, 4528 0, 0, 0); 4529#else 4530 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, 4531 VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, 4532 (unsigned long)format, 4533 (unsigned long)&vargs, 4534 0, 0, 0); 4535#endif 4536 va_end(vargs); 4537 return (int)_qzz_res; 4538#endif /* NVALGRIND */ 4539} 4540 4541 4542/* These requests allow control to move from the simulated CPU to the 4543 real CPU, calling an arbitary function. 4544 4545 Note that the current ThreadId is inserted as the first argument. 4546 So this call: 4547 4548 VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) 4549 4550 requires f to have this signature: 4551 4552 Word f(Word tid, Word arg1, Word arg2) 4553 4554 where "Word" is a word-sized type. 4555 4556 Note that these client requests are not entirely reliable. For example, 4557 if you call a function with them that subsequently calls printf(), 4558 there's a high chance Valgrind will crash. Generally, your prospects of 4559 these working are made higher if the called function does not refer to 4560 any global variables, and does not refer to any libc or other functions 4561 (printf et al). Any kind of entanglement with libc or dynamic linking is 4562 likely to have a bad outcome, for tricky reasons which we've grappled 4563 with a lot in the past. 4564*/ 4565#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ 4566 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ 4567 VG_USERREQ__CLIENT_CALL0, \ 4568 _qyy_fn, \ 4569 0, 0, 0, 0) 4570 4571#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ 4572 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ 4573 VG_USERREQ__CLIENT_CALL1, \ 4574 _qyy_fn, \ 4575 _qyy_arg1, 0, 0, 0) 4576 4577#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ 4578 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ 4579 VG_USERREQ__CLIENT_CALL2, \ 4580 _qyy_fn, \ 4581 _qyy_arg1, _qyy_arg2, 0, 0) 4582 4583#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ 4584 VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ 4585 VG_USERREQ__CLIENT_CALL3, \ 4586 _qyy_fn, \ 4587 _qyy_arg1, _qyy_arg2, \ 4588 _qyy_arg3, 0) 4589 4590 4591/* Counts the number of errors that have been recorded by a tool. Nb: 4592 the tool must record the errors with VG_(maybe_record_error)() or 4593 VG_(unique_error)() for them to be counted. */ 4594#define VALGRIND_COUNT_ERRORS \ 4595 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 4596 0 /* default return */, \ 4597 VG_USERREQ__COUNT_ERRORS, \ 4598 0, 0, 0, 0, 0) 4599 4600/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing 4601 when heap blocks are allocated in order to give accurate results. This 4602 happens automatically for the standard allocator functions such as 4603 malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, 4604 delete[], etc. 4605 4606 But if your program uses a custom allocator, this doesn't automatically 4607 happen, and Valgrind will not do as well. For example, if you allocate 4608 superblocks with mmap() and then allocates chunks of the superblocks, all 4609 Valgrind's observations will be at the mmap() level and it won't know that 4610 the chunks should be considered separate entities. In Memcheck's case, 4611 that means you probably won't get heap block overrun detection (because 4612 there won't be redzones marked as unaddressable) and you definitely won't 4613 get any leak detection. 4614 4615 The following client requests allow a custom allocator to be annotated so 4616 that it can be handled accurately by Valgrind. 4617 4618 VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated 4619 by a malloc()-like function. For Memcheck (an illustrative case), this 4620 does two things: 4621 4622 - It records that the block has been allocated. This means any addresses 4623 within the block mentioned in error messages will be 4624 identified as belonging to the block. It also means that if the block 4625 isn't freed it will be detected by the leak checker. 4626 4627 - It marks the block as being addressable and undefined (if 'is_zeroed' is 4628 not set), or addressable and defined (if 'is_zeroed' is set). This 4629 controls how accesses to the block by the program are handled. 4630 4631 'addr' is the start of the usable block (ie. after any 4632 redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator 4633 can apply redzones -- these are blocks of padding at the start and end of 4634 each block. Adding redzones is recommended as it makes it much more likely 4635 Valgrind will spot block overruns. `is_zeroed' indicates if the memory is 4636 zeroed (or filled with another predictable value), as is the case for 4637 calloc(). 4638 4639 VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a 4640 heap block -- that will be used by the client program -- is allocated. 4641 It's best to put it at the outermost level of the allocator if possible; 4642 for example, if you have a function my_alloc() which calls 4643 internal_alloc(), and the client request is put inside internal_alloc(), 4644 stack traces relating to the heap block will contain entries for both 4645 my_alloc() and internal_alloc(), which is probably not what you want. 4646 4647 For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out 4648 custom blocks from within a heap block, B, that has been allocated with 4649 malloc/calloc/new/etc, then block B will be *ignored* during leak-checking 4650 -- the custom blocks will take precedence. 4651 4652 VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For 4653 Memcheck, it does two things: 4654 4655 - It records that the block has been deallocated. This assumes that the 4656 block was annotated as having been allocated via 4657 VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. 4658 4659 - It marks the block as being unaddressable. 4660 4661 VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a 4662 heap block is deallocated. 4663 4664 VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For 4665 Memcheck, it does four things: 4666 4667 - It records that the size of a block has been changed. This assumes that 4668 the block was annotated as having been allocated via 4669 VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. 4670 4671 - If the block shrunk, it marks the freed memory as being unaddressable. 4672 4673 - If the block grew, it marks the new area as undefined and defines a red 4674 zone past the end of the new block. 4675 4676 - The V-bits of the overlap between the old and the new block are preserved. 4677 4678 VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block 4679 and before deallocation of the old block. 4680 4681 In many cases, these three client requests will not be enough to get your 4682 allocator working well with Memcheck. More specifically, if your allocator 4683 writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call 4684 will be necessary to mark the memory as addressable just before the zeroing 4685 occurs, otherwise you'll get a lot of invalid write errors. For example, 4686 you'll need to do this if your allocator recycles freed blocks, but it 4687 zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). 4688 Alternatively, if your allocator reuses freed blocks for allocator-internal 4689 data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. 4690 4691 Really, what's happening is a blurring of the lines between the client 4692 program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the 4693 memory should be considered unaddressable to the client program, but the 4694 allocator knows more than the rest of the client program and so may be able 4695 to safely access it. Extra client requests are necessary for Valgrind to 4696 understand the distinction between the allocator and the rest of the 4697 program. 4698 4699 Ignored if addr == 0. 4700*/ 4701#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ 4702 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ 4703 addr, sizeB, rzB, is_zeroed, 0) 4704 4705/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. 4706 Ignored if addr == 0. 4707*/ 4708#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ 4709 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ 4710 addr, oldSizeB, newSizeB, rzB, 0) 4711 4712/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. 4713 Ignored if addr == 0. 4714*/ 4715#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ 4716 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ 4717 addr, rzB, 0, 0, 0) 4718 4719/* Create a memory pool. */ 4720#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ 4721 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ 4722 pool, rzB, is_zeroed, 0, 0) 4723 4724/* Destroy a memory pool. */ 4725#define VALGRIND_DESTROY_MEMPOOL(pool) \ 4726 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ 4727 pool, 0, 0, 0, 0) 4728 4729/* Associate a piece of memory with a memory pool. */ 4730#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ 4731 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ 4732 pool, addr, size, 0, 0) 4733 4734/* Disassociate a piece of memory from a memory pool. */ 4735#define VALGRIND_MEMPOOL_FREE(pool, addr) \ 4736 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ 4737 pool, addr, 0, 0, 0) 4738 4739/* Disassociate any pieces outside a particular range. */ 4740#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ 4741 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ 4742 pool, addr, size, 0, 0) 4743 4744/* Resize and/or move a piece associated with a memory pool. */ 4745#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ 4746 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ 4747 poolA, poolB, 0, 0, 0) 4748 4749/* Resize and/or move a piece associated with a memory pool. */ 4750#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ 4751 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ 4752 pool, addrA, addrB, size, 0) 4753 4754/* Return 1 if a mempool exists, else 0. */ 4755#define VALGRIND_MEMPOOL_EXISTS(pool) \ 4756 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ 4757 VG_USERREQ__MEMPOOL_EXISTS, \ 4758 pool, 0, 0, 0, 0) 4759 4760/* Mark a piece of memory as being a stack. Returns a stack id. */ 4761#define VALGRIND_STACK_REGISTER(start, end) \ 4762 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ 4763 VG_USERREQ__STACK_REGISTER, \ 4764 start, end, 0, 0, 0) 4765 4766/* Unmark the piece of memory associated with a stack id as being a 4767 stack. */ 4768#define VALGRIND_STACK_DEREGISTER(id) \ 4769 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ 4770 id, 0, 0, 0, 0) 4771 4772/* Change the start and end address of the stack id. */ 4773#define VALGRIND_STACK_CHANGE(id, start, end) \ 4774 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ 4775 id, start, end, 0, 0) 4776 4777/* Load PDB debug info for Wine PE image_map. */ 4778#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ 4779 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ 4780 fd, ptr, total_size, delta, 0) 4781 4782/* Map a code address to a source file name and line number. buf64 4783 must point to a 64-byte buffer in the caller's address space. The 4784 result will be dumped in there and is guaranteed to be zero 4785 terminated. If no info is found, the first byte is set to zero. */ 4786#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ 4787 (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ 4788 VG_USERREQ__MAP_IP_TO_SRCLOC, \ 4789 addr, buf64, 0, 0, 0) 4790 4791/* Disable error reporting for this thread. Behaves in a stack like 4792 way, so you can safely call this multiple times provided that 4793 VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times 4794 to re-enable reporting. The first call of this macro disables 4795 reporting. Subsequent calls have no effect except to increase the 4796 number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable 4797 reporting. Child threads do not inherit this setting from their 4798 parents -- they are always created with reporting enabled. */ 4799#define VALGRIND_DISABLE_ERROR_REPORTING \ 4800 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ 4801 1, 0, 0, 0, 0) 4802 4803/* Re-enable error reporting, as per comments on 4804 VALGRIND_DISABLE_ERROR_REPORTING. */ 4805#define VALGRIND_ENABLE_ERROR_REPORTING \ 4806 VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ 4807 -1, 0, 0, 0, 0) 4808 4809#undef PLAT_x86_darwin 4810#undef PLAT_amd64_darwin 4811#undef PLAT_x86_win32 4812#undef PLAT_x86_linux 4813#undef PLAT_amd64_linux 4814#undef PLAT_ppc32_linux 4815#undef PLAT_ppc64_linux 4816#undef PLAT_arm_linux 4817#undef PLAT_s390x_linux 4818#undef PLAT_mips32_linux 4819 4820#endif /* __VALGRIND_H */ 4821