InterpC-x86.c revision b51ea11c70602918c42764bfafe92a997d3b1803
1/* 2 * This file was generated automatically by gen-mterp.py for 'x86'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: c/header.c */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24/* common includes */ 25#include "Dalvik.h" 26#include "interp/InterpDefs.h" 27#include "mterp/Mterp.h" 28#include <math.h> // needed for fmod, fmodf 29 30/* 31 * Configuration defines. These affect the C implementations, i.e. the 32 * portable interpreter(s) and C stubs. 33 * 34 * Some defines are controlled by the Makefile, e.g.: 35 * WITH_PROFILER 36 * WITH_DEBUGGER 37 * WITH_INSTR_CHECKS 38 * WITH_TRACKREF_CHECKS 39 * EASY_GDB 40 * NDEBUG 41 * 42 * If THREADED_INTERP is not defined, we use a classic "while true / switch" 43 * interpreter. If it is defined, then the tail end of each instruction 44 * handler fetches the next instruction and jumps directly to the handler. 45 * This increases the size of the "Std" interpreter by about 10%, but 46 * provides a speedup of about the same magnitude. 47 * 48 * There's a "hybrid" approach that uses a goto table instead of a switch 49 * statement, avoiding the "is the opcode in range" tests required for switch. 50 * The performance is close to the threaded version, and without the 10% 51 * size increase, but the benchmark results are off enough that it's not 52 * worth adding as a third option. 53 */ 54#define THREADED_INTERP /* threaded vs. while-loop interpreter */ 55 56#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */ 57# define CHECK_BRANCH_OFFSETS 58# define CHECK_REGISTER_INDICES 59#endif 60 61/* 62 * ARM EABI requires 64-bit alignment for access to 64-bit data types. We 63 * can't just use pointers to copy 64-bit values out of our interpreted 64 * register set, because gcc will generate ldrd/strd. 65 * 66 * The __UNION version copies data in and out of a union. The __MEMCPY 67 * version uses a memcpy() call to do the transfer; gcc is smart enough to 68 * not actually call memcpy(). The __UNION version is very bad on ARM; 69 * it only uses one more instruction than __MEMCPY, but for some reason 70 * gcc thinks it needs separate storage for every instance of the union. 71 * On top of that, it feels the need to zero them out at the start of the 72 * method. Net result is we zero out ~700 bytes of stack space at the top 73 * of the interpreter using ARM STM instructions. 74 */ 75#if defined(__ARM_EABI__) 76//# define NO_UNALIGN_64__UNION 77# define NO_UNALIGN_64__MEMCPY 78#endif 79 80//#define LOG_INSTR /* verbose debugging */ 81/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */ 82 83/* 84 * Keep a tally of accesses to fields. Currently only works if full DEX 85 * optimization is disabled. 86 */ 87#ifdef PROFILE_FIELD_ACCESS 88# define UPDATE_FIELD_GET(_field) { (_field)->gets++; } 89# define UPDATE_FIELD_PUT(_field) { (_field)->puts++; } 90#else 91# define UPDATE_FIELD_GET(_field) ((void)0) 92# define UPDATE_FIELD_PUT(_field) ((void)0) 93#endif 94 95/* 96 * Export another copy of the PC on every instruction; this is largely 97 * redundant with EXPORT_PC and the debugger code. This value can be 98 * compared against what we have stored on the stack with EXPORT_PC to 99 * help ensure that we aren't missing any export calls. 100 */ 101#if WITH_EXTRA_GC_CHECKS > 1 102# define EXPORT_EXTRA_PC() (self->currentPc2 = pc) 103#else 104# define EXPORT_EXTRA_PC() 105#endif 106 107/* 108 * Adjust the program counter. "_offset" is a signed int, in 16-bit units. 109 * 110 * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns". 111 * 112 * We don't advance the program counter until we finish an instruction or 113 * branch, because we do want to have to unroll the PC if there's an 114 * exception. 115 */ 116#ifdef CHECK_BRANCH_OFFSETS 117# define ADJUST_PC(_offset) do { \ 118 int myoff = _offset; /* deref only once */ \ 119 if (pc + myoff < curMethod->insns || \ 120 pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \ 121 { \ 122 char* desc; \ 123 desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \ 124 LOGE("Invalid branch %d at 0x%04x in %s.%s %s\n", \ 125 myoff, (int) (pc - curMethod->insns), \ 126 curMethod->clazz->descriptor, curMethod->name, desc); \ 127 free(desc); \ 128 dvmAbort(); \ 129 } \ 130 pc += myoff; \ 131 EXPORT_EXTRA_PC(); \ 132 } while (false) 133#else 134# define ADJUST_PC(_offset) do { \ 135 pc += _offset; \ 136 EXPORT_EXTRA_PC(); \ 137 } while (false) 138#endif 139 140/* 141 * If enabled, log instructions as we execute them. 142 */ 143#ifdef LOG_INSTR 144# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__) 145# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__) 146# define ILOG(_level, ...) do { \ 147 char debugStrBuf[128]; \ 148 snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \ 149 if (curMethod != NULL) \ 150 LOG(_level, LOG_TAG"i", "%-2d|%04x%s\n", \ 151 self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \ 152 else \ 153 LOG(_level, LOG_TAG"i", "%-2d|####%s\n", \ 154 self->threadId, debugStrBuf); \ 155 } while(false) 156void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly); 157# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly) 158static const char kSpacing[] = " "; 159#else 160# define ILOGD(...) ((void)0) 161# define ILOGV(...) ((void)0) 162# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0) 163#endif 164 165/* get a long from an array of u4 */ 166static inline s8 getLongFromArray(const u4* ptr, int idx) 167{ 168#if defined(NO_UNALIGN_64__UNION) 169 union { s8 ll; u4 parts[2]; } conv; 170 171 ptr += idx; 172 conv.parts[0] = ptr[0]; 173 conv.parts[1] = ptr[1]; 174 return conv.ll; 175#elif defined(NO_UNALIGN_64__MEMCPY) 176 s8 val; 177 memcpy(&val, &ptr[idx], 8); 178 return val; 179#else 180 return *((s8*) &ptr[idx]); 181#endif 182} 183 184/* store a long into an array of u4 */ 185static inline void putLongToArray(u4* ptr, int idx, s8 val) 186{ 187#if defined(NO_UNALIGN_64__UNION) 188 union { s8 ll; u4 parts[2]; } conv; 189 190 ptr += idx; 191 conv.ll = val; 192 ptr[0] = conv.parts[0]; 193 ptr[1] = conv.parts[1]; 194#elif defined(NO_UNALIGN_64__MEMCPY) 195 memcpy(&ptr[idx], &val, 8); 196#else 197 *((s8*) &ptr[idx]) = val; 198#endif 199} 200 201/* get a double from an array of u4 */ 202static inline double getDoubleFromArray(const u4* ptr, int idx) 203{ 204#if defined(NO_UNALIGN_64__UNION) 205 union { double d; u4 parts[2]; } conv; 206 207 ptr += idx; 208 conv.parts[0] = ptr[0]; 209 conv.parts[1] = ptr[1]; 210 return conv.d; 211#elif defined(NO_UNALIGN_64__MEMCPY) 212 double dval; 213 memcpy(&dval, &ptr[idx], 8); 214 return dval; 215#else 216 return *((double*) &ptr[idx]); 217#endif 218} 219 220/* store a double into an array of u4 */ 221static inline void putDoubleToArray(u4* ptr, int idx, double dval) 222{ 223#if defined(NO_UNALIGN_64__UNION) 224 union { double d; u4 parts[2]; } conv; 225 226 ptr += idx; 227 conv.d = dval; 228 ptr[0] = conv.parts[0]; 229 ptr[1] = conv.parts[1]; 230#elif defined(NO_UNALIGN_64__MEMCPY) 231 memcpy(&ptr[idx], &dval, 8); 232#else 233 *((double*) &ptr[idx]) = dval; 234#endif 235} 236 237/* 238 * If enabled, validate the register number on every access. Otherwise, 239 * just do an array access. 240 * 241 * Assumes the existence of "u4* fp". 242 * 243 * "_idx" may be referenced more than once. 244 */ 245#ifdef CHECK_REGISTER_INDICES 246# define GET_REGISTER(_idx) \ 247 ( (_idx) < curMethod->registersSize ? \ 248 (fp[(_idx)]) : (assert(!"bad reg"),1969) ) 249# define SET_REGISTER(_idx, _val) \ 250 ( (_idx) < curMethod->registersSize ? \ 251 (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) ) 252# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx)) 253# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 254# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx)) 255# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 256# define GET_REGISTER_WIDE(_idx) \ 257 ( (_idx) < curMethod->registersSize-1 ? \ 258 getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) ) 259# define SET_REGISTER_WIDE(_idx, _val) \ 260 ( (_idx) < curMethod->registersSize-1 ? \ 261 putLongToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969) ) 262# define GET_REGISTER_FLOAT(_idx) \ 263 ( (_idx) < curMethod->registersSize ? \ 264 (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) ) 265# define SET_REGISTER_FLOAT(_idx, _val) \ 266 ( (_idx) < curMethod->registersSize ? \ 267 (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) ) 268# define GET_REGISTER_DOUBLE(_idx) \ 269 ( (_idx) < curMethod->registersSize-1 ? \ 270 getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) ) 271# define SET_REGISTER_DOUBLE(_idx, _val) \ 272 ( (_idx) < curMethod->registersSize-1 ? \ 273 putDoubleToArray(fp, (_idx), (_val)) : (assert(!"bad reg"),1969.0) ) 274#else 275# define GET_REGISTER(_idx) (fp[(_idx)]) 276# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val)) 277# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)]) 278# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val)) 279# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx)) 280# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 281# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx)) 282# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val)) 283# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)])) 284# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val)) 285# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx)) 286# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val)) 287#endif 288 289/* 290 * Get 16 bits from the specified offset of the program counter. We always 291 * want to load 16 bits at a time from the instruction stream -- it's more 292 * efficient than 8 and won't have the alignment problems that 32 might. 293 * 294 * Assumes existence of "const u2* pc". 295 */ 296#define FETCH(_offset) (pc[(_offset)]) 297 298/* 299 * Extract instruction byte from 16-bit fetch (_inst is a u2). 300 */ 301#define INST_INST(_inst) ((_inst) & 0xff) 302 303/* 304 * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2). 305 */ 306#define INST_A(_inst) (((_inst) >> 8) & 0x0f) 307#define INST_B(_inst) ((_inst) >> 12) 308 309/* 310 * Get the 8-bit "vAA" 8-bit register index from the instruction word. 311 * (_inst is u2) 312 */ 313#define INST_AA(_inst) ((_inst) >> 8) 314 315/* 316 * The current PC must be available to Throwable constructors, e.g. 317 * those created by dvmThrowException(), so that the exception stack 318 * trace can be generated correctly. If we don't do this, the offset 319 * within the current method won't be shown correctly. See the notes 320 * in Exception.c. 321 * 322 * This is also used to determine the address for precise GC. 323 * 324 * Assumes existence of "u4* fp" and "const u2* pc". 325 */ 326#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc) 327 328/* 329 * Determine if we need to switch to a different interpreter. "_current" 330 * is either INTERP_STD or INTERP_DBG. It should be fixed for a given 331 * interpreter generation file, which should remove the outer conditional 332 * from the following. 333 * 334 * If we're building without debug and profiling support, we never switch. 335 */ 336#if defined(WITH_PROFILER) || defined(WITH_DEBUGGER) 337# define NEED_INTERP_SWITCH(_current) ( \ 338 (_current == INTERP_STD) ? \ 339 dvmDebuggerOrProfilerActive() : !dvmDebuggerOrProfilerActive() ) 340#else 341# define NEED_INTERP_SWITCH(_current) (false) 342#endif 343 344/* 345 * Look up an interface on a class using the cache. 346 */ 347INLINE Method* dvmFindInterfaceMethodInCache(ClassObject* thisClass, 348 u4 methodIdx, const Method* method, DvmDex* methodClassDex) 349{ 350#define ATOMIC_CACHE_CALC \ 351 dvmInterpFindInterfaceMethod(thisClass, methodIdx, method, methodClassDex) 352 353 return (Method*) ATOMIC_CACHE_LOOKUP(methodClassDex->pInterfaceCache, 354 DEX_INTERFACE_CACHE_SIZE, thisClass, methodIdx); 355 356#undef ATOMIC_CACHE_CALC 357} 358 359/* 360 * Check to see if "obj" is NULL. If so, throw an exception. Assumes the 361 * pc has already been exported to the stack. 362 * 363 * Perform additional checks on debug builds. 364 * 365 * Use this to check for NULL when the instruction handler calls into 366 * something that could throw an exception (so we have already called 367 * EXPORT_PC at the top). 368 */ 369static inline bool checkForNull(Object* obj) 370{ 371 if (obj == NULL) { 372 dvmThrowException("Ljava/lang/NullPointerException;", NULL); 373 return false; 374 } 375#ifdef WITH_EXTRA_OBJECT_VALIDATION 376 if (!dvmIsValidObject(obj)) { 377 LOGE("Invalid object %p\n", obj); 378 dvmAbort(); 379 } 380#endif 381#ifndef NDEBUG 382 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { 383 /* probable heap corruption */ 384 LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj); 385 dvmAbort(); 386 } 387#endif 388 return true; 389} 390 391/* 392 * Check to see if "obj" is NULL. If so, export the PC into the stack 393 * frame and throw an exception. 394 * 395 * Perform additional checks on debug builds. 396 * 397 * Use this to check for NULL when the instruction handler doesn't do 398 * anything else that can throw an exception. 399 */ 400static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) 401{ 402 if (obj == NULL) { 403 EXPORT_PC(); 404 dvmThrowException("Ljava/lang/NullPointerException;", NULL); 405 return false; 406 } 407#ifdef WITH_EXTRA_OBJECT_VALIDATION 408 if (!dvmIsValidObject(obj)) { 409 LOGE("Invalid object %p\n", obj); 410 dvmAbort(); 411 } 412#endif 413#ifndef NDEBUG 414 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { 415 /* probable heap corruption */ 416 LOGE("Invalid object class %p (in %p)\n", obj->clazz, obj); 417 dvmAbort(); 418 } 419#endif 420 return true; 421} 422 423 424/* File: cstubs/stubdefs.c */ 425/* this is a standard (no debug support) interpreter */ 426#define INTERP_TYPE INTERP_STD 427#define CHECK_DEBUG_AND_PROF() ((void)0) 428# define CHECK_TRACKED_REFS() ((void)0) 429 430/* 431 * In the C mterp stubs, "goto" is a function call followed immediately 432 * by a return. 433 */ 434 435#define GOTO_TARGET_DECL(_target, ...) \ 436 void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__); 437 438#define GOTO_TARGET(_target, ...) \ 439 void dvmMterp_##_target(MterpGlue* glue, ## __VA_ARGS__) { \ 440 u2 ref, vsrc1, vsrc2, vdst; \ 441 u2 inst = FETCH(0); \ 442 const Method* methodToCall; \ 443 StackSaveArea* debugSaveArea; 444 445#define GOTO_TARGET_END } 446 447/* 448 * Redefine what used to be local variable accesses into MterpGlue struct 449 * references. (These are undefined down in "footer.c".) 450 */ 451#define retval glue->retval 452#define pc glue->pc 453#define fp glue->fp 454#define curMethod glue->method 455#define methodClassDex glue->methodClassDex 456#define self glue->self 457#define debugTrackedRefStart glue->debugTrackedRefStart 458 459/* ugh */ 460#define STUB_HACK(x) x 461 462 463/* 464 * Opcode handler framing macros. Here, each opcode is a separate function 465 * that takes a "glue" argument and returns void. We can't declare 466 * these "static" because they may be called from an assembly stub. 467 */ 468#define HANDLE_OPCODE(_op) \ 469 void dvmMterp_##_op(MterpGlue* glue) { \ 470 u2 ref, vsrc1, vsrc2, vdst; \ 471 u2 inst = FETCH(0); 472 473#define OP_END } 474 475/* 476 * Like the "portable" FINISH, but don't reload "inst", and return to caller 477 * when done. 478 */ 479#define FINISH(_offset) { \ 480 ADJUST_PC(_offset); \ 481 CHECK_DEBUG_AND_PROF(); \ 482 CHECK_TRACKED_REFS(); \ 483 return; \ 484 } 485 486 487/* 488 * The "goto label" statements turn into function calls followed by 489 * return statements. Some of the functions take arguments, which in the 490 * portable interpreter are handled by assigning values to globals. 491 */ 492 493#define GOTO_exceptionThrown() \ 494 do { \ 495 dvmMterp_exceptionThrown(glue); \ 496 return; \ 497 } while(false) 498 499#define GOTO_returnFromMethod() \ 500 do { \ 501 dvmMterp_returnFromMethod(glue); \ 502 return; \ 503 } while(false) 504 505#define GOTO_invoke(_target, _methodCallRange) \ 506 do { \ 507 dvmMterp_##_target(glue, _methodCallRange); \ 508 return; \ 509 } while(false) 510 511#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \ 512 do { \ 513 dvmMterp_invokeMethod(glue, _methodCallRange, _methodToCall, \ 514 _vsrc1, _vdst); \ 515 return; \ 516 } while(false) 517 518/* 519 * As a special case, "goto bail" turns into a longjmp. Use "bail_switch" 520 * if we need to switch to the other interpreter upon our return. 521 */ 522#define GOTO_bail() \ 523 dvmMterpStdBail(glue, false); 524#define GOTO_bail_switch() \ 525 dvmMterpStdBail(glue, true); 526 527/* 528 * Periodically check for thread suspension. 529 * 530 * While we're at it, see if a debugger has attached or the profiler has 531 * started. If so, switch to a different "goto" table. 532 */ 533#define PERIODIC_CHECKS(_entryPoint, _pcadj) { \ 534 if (dvmCheckSuspendQuick(self)) { \ 535 EXPORT_PC(); /* need for precise GC */ \ 536 dvmCheckSuspendPending(self); \ 537 } \ 538 if (NEED_INTERP_SWITCH(INTERP_TYPE)) { \ 539 ADJUST_PC(_pcadj); \ 540 glue->entryPoint = _entryPoint; \ 541 LOGVV("threadid=%d: switch to STD ep=%d adj=%d\n", \ 542 glue->self->threadId, (_entryPoint), (_pcadj)); \ 543 GOTO_bail_switch(); \ 544 } \ 545 } 546 547 548/* File: c/opcommon.c */ 549/* forward declarations of goto targets */ 550GOTO_TARGET_DECL(filledNewArray, bool methodCallRange); 551GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange); 552GOTO_TARGET_DECL(invokeSuper, bool methodCallRange); 553GOTO_TARGET_DECL(invokeInterface, bool methodCallRange); 554GOTO_TARGET_DECL(invokeDirect, bool methodCallRange); 555GOTO_TARGET_DECL(invokeStatic, bool methodCallRange); 556GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange); 557GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange); 558GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall, 559 u2 count, u2 regs); 560GOTO_TARGET_DECL(returnFromMethod); 561GOTO_TARGET_DECL(exceptionThrown); 562 563/* 564 * =========================================================================== 565 * 566 * What follows are opcode definitions shared between multiple opcodes with 567 * minor substitutions handled by the C pre-processor. These should probably 568 * use the mterp substitution mechanism instead, with the code here moved 569 * into common fragment files (like the asm "binop.S"), although it's hard 570 * to give up the C preprocessor in favor of the much simpler text subst. 571 * 572 * =========================================================================== 573 */ 574 575#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \ 576 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 577 vdst = INST_A(inst); \ 578 vsrc1 = INST_B(inst); \ 579 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 580 SET_REGISTER##_totype(vdst, \ 581 GET_REGISTER##_fromtype(vsrc1)); \ 582 FINISH(1); 583 584#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \ 585 _tovtype, _tortype) \ 586 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 587 { \ 588 /* spec defines specific handling for +/- inf and NaN values */ \ 589 _fromvtype val; \ 590 _tovtype intMin, intMax, result; \ 591 vdst = INST_A(inst); \ 592 vsrc1 = INST_B(inst); \ 593 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 594 val = GET_REGISTER##_fromrtype(vsrc1); \ 595 intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \ 596 intMax = ~intMin; \ 597 result = (_tovtype) val; \ 598 if (val >= intMax) /* +inf */ \ 599 result = intMax; \ 600 else if (val <= intMin) /* -inf */ \ 601 result = intMin; \ 602 else if (val != val) /* NaN */ \ 603 result = 0; \ 604 else \ 605 result = (_tovtype) val; \ 606 SET_REGISTER##_tortype(vdst, result); \ 607 } \ 608 FINISH(1); 609 610#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \ 611 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 612 vdst = INST_A(inst); \ 613 vsrc1 = INST_B(inst); \ 614 ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \ 615 SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \ 616 FINISH(1); 617 618/* NOTE: the comparison result is always a signed 4-byte integer */ 619#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \ 620 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 621 { \ 622 int result; \ 623 u2 regs; \ 624 _varType val1, val2; \ 625 vdst = INST_AA(inst); \ 626 regs = FETCH(1); \ 627 vsrc1 = regs & 0xff; \ 628 vsrc2 = regs >> 8; \ 629 ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 630 val1 = GET_REGISTER##_type(vsrc1); \ 631 val2 = GET_REGISTER##_type(vsrc2); \ 632 if (val1 == val2) \ 633 result = 0; \ 634 else if (val1 < val2) \ 635 result = -1; \ 636 else if (val1 > val2) \ 637 result = 1; \ 638 else \ 639 result = (_nanVal); \ 640 ILOGV("+ result=%d\n", result); \ 641 SET_REGISTER(vdst, result); \ 642 } \ 643 FINISH(2); 644 645#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \ 646 HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \ 647 vsrc1 = INST_A(inst); \ 648 vsrc2 = INST_B(inst); \ 649 if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \ 650 int branchOffset = (s2)FETCH(1); /* sign-extended */ \ 651 ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \ 652 branchOffset); \ 653 ILOGV("> branch taken"); \ 654 if (branchOffset < 0) \ 655 PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \ 656 FINISH(branchOffset); \ 657 } else { \ 658 ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \ 659 FINISH(2); \ 660 } 661 662#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \ 663 HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \ 664 vsrc1 = INST_AA(inst); \ 665 if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \ 666 int branchOffset = (s2)FETCH(1); /* sign-extended */ \ 667 ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \ 668 ILOGV("> branch taken"); \ 669 if (branchOffset < 0) \ 670 PERIODIC_CHECKS(kInterpEntryInstr, branchOffset); \ 671 FINISH(branchOffset); \ 672 } else { \ 673 ILOGV("|if-%s v%d,-", (_opname), vsrc1); \ 674 FINISH(2); \ 675 } 676 677#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \ 678 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 679 vdst = INST_A(inst); \ 680 vsrc1 = INST_B(inst); \ 681 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 682 SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \ 683 FINISH(1); 684 685#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \ 686 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 687 { \ 688 u2 srcRegs; \ 689 vdst = INST_AA(inst); \ 690 srcRegs = FETCH(1); \ 691 vsrc1 = srcRegs & 0xff; \ 692 vsrc2 = srcRegs >> 8; \ 693 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \ 694 if (_chkdiv != 0) { \ 695 s4 firstVal, secondVal, result; \ 696 firstVal = GET_REGISTER(vsrc1); \ 697 secondVal = GET_REGISTER(vsrc2); \ 698 if (secondVal == 0) { \ 699 EXPORT_PC(); \ 700 dvmThrowException("Ljava/lang/ArithmeticException;", \ 701 "divide by zero"); \ 702 GOTO_exceptionThrown(); \ 703 } \ 704 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \ 705 if (_chkdiv == 1) \ 706 result = firstVal; /* division */ \ 707 else \ 708 result = 0; /* remainder */ \ 709 } else { \ 710 result = firstVal _op secondVal; \ 711 } \ 712 SET_REGISTER(vdst, result); \ 713 } else { \ 714 /* non-div/rem case */ \ 715 SET_REGISTER(vdst, \ 716 (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \ 717 } \ 718 } \ 719 FINISH(2); 720 721#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \ 722 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 723 { \ 724 u2 srcRegs; \ 725 vdst = INST_AA(inst); \ 726 srcRegs = FETCH(1); \ 727 vsrc1 = srcRegs & 0xff; \ 728 vsrc2 = srcRegs >> 8; \ 729 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \ 730 SET_REGISTER(vdst, \ 731 _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \ 732 } \ 733 FINISH(2); 734 735#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \ 736 HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \ 737 vdst = INST_A(inst); \ 738 vsrc1 = INST_B(inst); \ 739 vsrc2 = FETCH(1); \ 740 ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \ 741 (_opname), vdst, vsrc1, vsrc2); \ 742 if (_chkdiv != 0) { \ 743 s4 firstVal, result; \ 744 firstVal = GET_REGISTER(vsrc1); \ 745 if ((s2) vsrc2 == 0) { \ 746 EXPORT_PC(); \ 747 dvmThrowException("Ljava/lang/ArithmeticException;", \ 748 "divide by zero"); \ 749 GOTO_exceptionThrown(); \ 750 } \ 751 if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \ 752 /* won't generate /lit16 instr for this; check anyway */ \ 753 if (_chkdiv == 1) \ 754 result = firstVal; /* division */ \ 755 else \ 756 result = 0; /* remainder */ \ 757 } else { \ 758 result = firstVal _op (s2) vsrc2; \ 759 } \ 760 SET_REGISTER(vdst, result); \ 761 } else { \ 762 /* non-div/rem case */ \ 763 SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \ 764 } \ 765 FINISH(2); 766 767#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \ 768 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \ 769 { \ 770 u2 litInfo; \ 771 vdst = INST_AA(inst); \ 772 litInfo = FETCH(1); \ 773 vsrc1 = litInfo & 0xff; \ 774 vsrc2 = litInfo >> 8; /* constant */ \ 775 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \ 776 (_opname), vdst, vsrc1, vsrc2); \ 777 if (_chkdiv != 0) { \ 778 s4 firstVal, result; \ 779 firstVal = GET_REGISTER(vsrc1); \ 780 if ((s1) vsrc2 == 0) { \ 781 EXPORT_PC(); \ 782 dvmThrowException("Ljava/lang/ArithmeticException;", \ 783 "divide by zero"); \ 784 GOTO_exceptionThrown(); \ 785 } \ 786 if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \ 787 if (_chkdiv == 1) \ 788 result = firstVal; /* division */ \ 789 else \ 790 result = 0; /* remainder */ \ 791 } else { \ 792 result = firstVal _op ((s1) vsrc2); \ 793 } \ 794 SET_REGISTER(vdst, result); \ 795 } else { \ 796 SET_REGISTER(vdst, \ 797 (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \ 798 } \ 799 } \ 800 FINISH(2); 801 802#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \ 803 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \ 804 { \ 805 u2 litInfo; \ 806 vdst = INST_AA(inst); \ 807 litInfo = FETCH(1); \ 808 vsrc1 = litInfo & 0xff; \ 809 vsrc2 = litInfo >> 8; /* constant */ \ 810 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \ 811 (_opname), vdst, vsrc1, vsrc2); \ 812 SET_REGISTER(vdst, \ 813 _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \ 814 } \ 815 FINISH(2); 816 817#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \ 818 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 819 vdst = INST_A(inst); \ 820 vsrc1 = INST_B(inst); \ 821 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 822 if (_chkdiv != 0) { \ 823 s4 firstVal, secondVal, result; \ 824 firstVal = GET_REGISTER(vdst); \ 825 secondVal = GET_REGISTER(vsrc1); \ 826 if (secondVal == 0) { \ 827 EXPORT_PC(); \ 828 dvmThrowException("Ljava/lang/ArithmeticException;", \ 829 "divide by zero"); \ 830 GOTO_exceptionThrown(); \ 831 } \ 832 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \ 833 if (_chkdiv == 1) \ 834 result = firstVal; /* division */ \ 835 else \ 836 result = 0; /* remainder */ \ 837 } else { \ 838 result = firstVal _op secondVal; \ 839 } \ 840 SET_REGISTER(vdst, result); \ 841 } else { \ 842 SET_REGISTER(vdst, \ 843 (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \ 844 } \ 845 FINISH(1); 846 847#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \ 848 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 849 vdst = INST_A(inst); \ 850 vsrc1 = INST_B(inst); \ 851 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 852 SET_REGISTER(vdst, \ 853 _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \ 854 FINISH(1); 855 856#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \ 857 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 858 { \ 859 u2 srcRegs; \ 860 vdst = INST_AA(inst); \ 861 srcRegs = FETCH(1); \ 862 vsrc1 = srcRegs & 0xff; \ 863 vsrc2 = srcRegs >> 8; \ 864 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 865 if (_chkdiv != 0) { \ 866 s8 firstVal, secondVal, result; \ 867 firstVal = GET_REGISTER_WIDE(vsrc1); \ 868 secondVal = GET_REGISTER_WIDE(vsrc2); \ 869 if (secondVal == 0LL) { \ 870 EXPORT_PC(); \ 871 dvmThrowException("Ljava/lang/ArithmeticException;", \ 872 "divide by zero"); \ 873 GOTO_exceptionThrown(); \ 874 } \ 875 if ((u8)firstVal == 0x8000000000000000ULL && \ 876 secondVal == -1LL) \ 877 { \ 878 if (_chkdiv == 1) \ 879 result = firstVal; /* division */ \ 880 else \ 881 result = 0; /* remainder */ \ 882 } else { \ 883 result = firstVal _op secondVal; \ 884 } \ 885 SET_REGISTER_WIDE(vdst, result); \ 886 } else { \ 887 SET_REGISTER_WIDE(vdst, \ 888 (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \ 889 } \ 890 } \ 891 FINISH(2); 892 893#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \ 894 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 895 { \ 896 u2 srcRegs; \ 897 vdst = INST_AA(inst); \ 898 srcRegs = FETCH(1); \ 899 vsrc1 = srcRegs & 0xff; \ 900 vsrc2 = srcRegs >> 8; \ 901 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 902 SET_REGISTER_WIDE(vdst, \ 903 _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \ 904 } \ 905 FINISH(2); 906 907#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \ 908 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 909 vdst = INST_A(inst); \ 910 vsrc1 = INST_B(inst); \ 911 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 912 if (_chkdiv != 0) { \ 913 s8 firstVal, secondVal, result; \ 914 firstVal = GET_REGISTER_WIDE(vdst); \ 915 secondVal = GET_REGISTER_WIDE(vsrc1); \ 916 if (secondVal == 0LL) { \ 917 EXPORT_PC(); \ 918 dvmThrowException("Ljava/lang/ArithmeticException;", \ 919 "divide by zero"); \ 920 GOTO_exceptionThrown(); \ 921 } \ 922 if ((u8)firstVal == 0x8000000000000000ULL && \ 923 secondVal == -1LL) \ 924 { \ 925 if (_chkdiv == 1) \ 926 result = firstVal; /* division */ \ 927 else \ 928 result = 0; /* remainder */ \ 929 } else { \ 930 result = firstVal _op secondVal; \ 931 } \ 932 SET_REGISTER_WIDE(vdst, result); \ 933 } else { \ 934 SET_REGISTER_WIDE(vdst, \ 935 (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\ 936 } \ 937 FINISH(1); 938 939#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \ 940 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 941 vdst = INST_A(inst); \ 942 vsrc1 = INST_B(inst); \ 943 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 944 SET_REGISTER_WIDE(vdst, \ 945 _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \ 946 FINISH(1); 947 948#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \ 949 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 950 { \ 951 u2 srcRegs; \ 952 vdst = INST_AA(inst); \ 953 srcRegs = FETCH(1); \ 954 vsrc1 = srcRegs & 0xff; \ 955 vsrc2 = srcRegs >> 8; \ 956 ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 957 SET_REGISTER_FLOAT(vdst, \ 958 GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \ 959 } \ 960 FINISH(2); 961 962#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \ 963 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 964 { \ 965 u2 srcRegs; \ 966 vdst = INST_AA(inst); \ 967 srcRegs = FETCH(1); \ 968 vsrc1 = srcRegs & 0xff; \ 969 vsrc2 = srcRegs >> 8; \ 970 ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 971 SET_REGISTER_DOUBLE(vdst, \ 972 GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \ 973 } \ 974 FINISH(2); 975 976#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \ 977 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 978 vdst = INST_A(inst); \ 979 vsrc1 = INST_B(inst); \ 980 ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 981 SET_REGISTER_FLOAT(vdst, \ 982 GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \ 983 FINISH(1); 984 985#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \ 986 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 987 vdst = INST_A(inst); \ 988 vsrc1 = INST_B(inst); \ 989 ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 990 SET_REGISTER_DOUBLE(vdst, \ 991 GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \ 992 FINISH(1); 993 994#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \ 995 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 996 { \ 997 ArrayObject* arrayObj; \ 998 u2 arrayInfo; \ 999 EXPORT_PC(); \ 1000 vdst = INST_AA(inst); \ 1001 arrayInfo = FETCH(1); \ 1002 vsrc1 = arrayInfo & 0xff; /* array ptr */ \ 1003 vsrc2 = arrayInfo >> 8; /* index */ \ 1004 ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 1005 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \ 1006 if (!checkForNull((Object*) arrayObj)) \ 1007 GOTO_exceptionThrown(); \ 1008 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \ 1009 LOGV("Invalid array access: %p %d (len=%d)\n", \ 1010 arrayObj, vsrc2, arrayObj->length); \ 1011 dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \ 1012 NULL); \ 1013 GOTO_exceptionThrown(); \ 1014 } \ 1015 SET_REGISTER##_regsize(vdst, \ 1016 ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)]); \ 1017 ILOGV("+ AGET[%d]=0x%x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \ 1018 } \ 1019 FINISH(2); 1020 1021#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \ 1022 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 1023 { \ 1024 ArrayObject* arrayObj; \ 1025 u2 arrayInfo; \ 1026 EXPORT_PC(); \ 1027 vdst = INST_AA(inst); /* AA: source value */ \ 1028 arrayInfo = FETCH(1); \ 1029 vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \ 1030 vsrc2 = arrayInfo >> 8; /* CC: index */ \ 1031 ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 1032 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \ 1033 if (!checkForNull((Object*) arrayObj)) \ 1034 GOTO_exceptionThrown(); \ 1035 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \ 1036 dvmThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", \ 1037 NULL); \ 1038 GOTO_exceptionThrown(); \ 1039 } \ 1040 ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\ 1041 ((_type*) arrayObj->contents)[GET_REGISTER(vsrc2)] = \ 1042 GET_REGISTER##_regsize(vdst); \ 1043 } \ 1044 FINISH(2); 1045 1046/* 1047 * It's possible to get a bad value out of a field with sub-32-bit stores 1048 * because the -quick versions always operate on 32 bits. Consider: 1049 * short foo = -1 (sets a 32-bit register to 0xffffffff) 1050 * iput-quick foo (writes all 32 bits to the field) 1051 * short bar = 1 (sets a 32-bit register to 0x00000001) 1052 * iput-short (writes the low 16 bits to the field) 1053 * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001) 1054 * This can only happen when optimized and non-optimized code has interleaved 1055 * access to the same field. This is unlikely but possible. 1056 * 1057 * The easiest way to fix this is to always read/write 32 bits at a time. On 1058 * a device with a 16-bit data bus this is sub-optimal. (The alternative 1059 * approach is to have sub-int versions of iget-quick, but now we're wasting 1060 * Dalvik instruction space and making it less likely that handler code will 1061 * already be in the CPU i-cache.) 1062 */ 1063#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \ 1064 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1065 { \ 1066 InstField* ifield; \ 1067 Object* obj; \ 1068 EXPORT_PC(); \ 1069 vdst = INST_A(inst); \ 1070 vsrc1 = INST_B(inst); /* object ptr */ \ 1071 ref = FETCH(1); /* field ref */ \ 1072 ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \ 1073 obj = (Object*) GET_REGISTER(vsrc1); \ 1074 if (!checkForNull(obj)) \ 1075 GOTO_exceptionThrown(); \ 1076 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1077 if (ifield == NULL) { \ 1078 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1079 if (ifield == NULL) \ 1080 GOTO_exceptionThrown(); \ 1081 } \ 1082 SET_REGISTER##_regsize(vdst, \ 1083 dvmGetField##_ftype(obj, ifield->byteOffset)); \ 1084 ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \ 1085 (u8) GET_REGISTER##_regsize(vdst)); \ 1086 UPDATE_FIELD_GET(&ifield->field); \ 1087 } \ 1088 FINISH(2); 1089 1090#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \ 1091 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1092 { \ 1093 Object* obj; \ 1094 vdst = INST_A(inst); \ 1095 vsrc1 = INST_B(inst); /* object ptr */ \ 1096 ref = FETCH(1); /* field offset */ \ 1097 ILOGV("|iget%s-quick v%d,v%d,field@+%u", \ 1098 (_opname), vdst, vsrc1, ref); \ 1099 obj = (Object*) GET_REGISTER(vsrc1); \ 1100 if (!checkForNullExportPC(obj, fp, pc)) \ 1101 GOTO_exceptionThrown(); \ 1102 SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \ 1103 ILOGV("+ IGETQ %d=0x%08llx", ref, \ 1104 (u8) GET_REGISTER##_regsize(vdst)); \ 1105 } \ 1106 FINISH(2); 1107 1108#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \ 1109 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1110 { \ 1111 InstField* ifield; \ 1112 Object* obj; \ 1113 EXPORT_PC(); \ 1114 vdst = INST_A(inst); \ 1115 vsrc1 = INST_B(inst); /* object ptr */ \ 1116 ref = FETCH(1); /* field ref */ \ 1117 ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \ 1118 obj = (Object*) GET_REGISTER(vsrc1); \ 1119 if (!checkForNull(obj)) \ 1120 GOTO_exceptionThrown(); \ 1121 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1122 if (ifield == NULL) { \ 1123 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1124 if (ifield == NULL) \ 1125 GOTO_exceptionThrown(); \ 1126 } \ 1127 dvmSetField##_ftype(obj, ifield->byteOffset, \ 1128 GET_REGISTER##_regsize(vdst)); \ 1129 ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \ 1130 (u8) GET_REGISTER##_regsize(vdst)); \ 1131 UPDATE_FIELD_PUT(&ifield->field); \ 1132 } \ 1133 FINISH(2); 1134 1135#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \ 1136 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1137 { \ 1138 Object* obj; \ 1139 vdst = INST_A(inst); \ 1140 vsrc1 = INST_B(inst); /* object ptr */ \ 1141 ref = FETCH(1); /* field offset */ \ 1142 ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \ 1143 (_opname), vdst, vsrc1, ref); \ 1144 obj = (Object*) GET_REGISTER(vsrc1); \ 1145 if (!checkForNullExportPC(obj, fp, pc)) \ 1146 GOTO_exceptionThrown(); \ 1147 dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \ 1148 ILOGV("+ IPUTQ %d=0x%08llx", ref, \ 1149 (u8) GET_REGISTER##_regsize(vdst)); \ 1150 } \ 1151 FINISH(2); 1152 1153#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \ 1154 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \ 1155 { \ 1156 StaticField* sfield; \ 1157 vdst = INST_AA(inst); \ 1158 ref = FETCH(1); /* field ref */ \ 1159 ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \ 1160 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1161 if (sfield == NULL) { \ 1162 EXPORT_PC(); \ 1163 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1164 if (sfield == NULL) \ 1165 GOTO_exceptionThrown(); \ 1166 } \ 1167 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \ 1168 ILOGV("+ SGET '%s'=0x%08llx", \ 1169 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \ 1170 UPDATE_FIELD_GET(&sfield->field); \ 1171 } \ 1172 FINISH(2); 1173 1174#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \ 1175 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \ 1176 { \ 1177 StaticField* sfield; \ 1178 vdst = INST_AA(inst); \ 1179 ref = FETCH(1); /* field ref */ \ 1180 ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \ 1181 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1182 if (sfield == NULL) { \ 1183 EXPORT_PC(); \ 1184 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1185 if (sfield == NULL) \ 1186 GOTO_exceptionThrown(); \ 1187 } \ 1188 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \ 1189 ILOGV("+ SPUT '%s'=0x%08llx", \ 1190 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \ 1191 UPDATE_FIELD_PUT(&sfield->field); \ 1192 } \ 1193 FINISH(2); 1194 1195 1196/* File: c/OP_THROW_VERIFICATION_ERROR.c */ 1197HANDLE_OPCODE(OP_THROW_VERIFICATION_ERROR) 1198 EXPORT_PC(); 1199 vsrc1 = INST_AA(inst); 1200 ref = FETCH(1); /* class/field/method ref */ 1201 dvmThrowVerificationError(curMethod, vsrc1, ref); 1202 GOTO_exceptionThrown(); 1203OP_END 1204 1205/* File: c/gotoTargets.c */ 1206/* 1207 * C footer. This has some common code shared by the various targets. 1208 */ 1209 1210/* 1211 * Everything from here on is a "goto target". In the basic interpreter 1212 * we jump into these targets and then jump directly to the handler for 1213 * next instruction. Here, these are subroutines that return to the caller. 1214 */ 1215 1216GOTO_TARGET(filledNewArray, bool methodCallRange) 1217 { 1218 ClassObject* arrayClass; 1219 ArrayObject* newArray; 1220 u4* contents; 1221 char typeCh; 1222 int i; 1223 u4 arg5; 1224 1225 EXPORT_PC(); 1226 1227 ref = FETCH(1); /* class ref */ 1228 vdst = FETCH(2); /* first 4 regs -or- range base */ 1229 1230 if (methodCallRange) { 1231 vsrc1 = INST_AA(inst); /* #of elements */ 1232 arg5 = -1; /* silence compiler warning */ 1233 ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}", 1234 vsrc1, ref, vdst, vdst+vsrc1-1); 1235 } else { 1236 arg5 = INST_A(inst); 1237 vsrc1 = INST_B(inst); /* #of elements */ 1238 ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}", 1239 vsrc1, ref, vdst, arg5); 1240 } 1241 1242 /* 1243 * Resolve the array class. 1244 */ 1245 arrayClass = dvmDexGetResolvedClass(methodClassDex, ref); 1246 if (arrayClass == NULL) { 1247 arrayClass = dvmResolveClass(curMethod->clazz, ref, false); 1248 if (arrayClass == NULL) 1249 GOTO_exceptionThrown(); 1250 } 1251 /* 1252 if (!dvmIsArrayClass(arrayClass)) { 1253 dvmThrowException("Ljava/lang/RuntimeError;", 1254 "filled-new-array needs array class"); 1255 GOTO_exceptionThrown(); 1256 } 1257 */ 1258 /* verifier guarantees this is an array class */ 1259 assert(dvmIsArrayClass(arrayClass)); 1260 assert(dvmIsClassInitialized(arrayClass)); 1261 1262 /* 1263 * Create an array of the specified type. 1264 */ 1265 LOGVV("+++ filled-new-array type is '%s'\n", arrayClass->descriptor); 1266 typeCh = arrayClass->descriptor[1]; 1267 if (typeCh == 'D' || typeCh == 'J') { 1268 /* category 2 primitives not allowed */ 1269 dvmThrowException("Ljava/lang/RuntimeError;", 1270 "bad filled array req"); 1271 GOTO_exceptionThrown(); 1272 } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') { 1273 /* TODO: requires multiple "fill in" loops with different widths */ 1274 LOGE("non-int primitives not implemented\n"); 1275 dvmThrowException("Ljava/lang/InternalError;", 1276 "filled-new-array not implemented for anything but 'int'"); 1277 GOTO_exceptionThrown(); 1278 } 1279 1280 newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK); 1281 if (newArray == NULL) 1282 GOTO_exceptionThrown(); 1283 1284 /* 1285 * Fill in the elements. It's legal for vsrc1 to be zero. 1286 */ 1287 contents = (u4*) newArray->contents; 1288 if (methodCallRange) { 1289 for (i = 0; i < vsrc1; i++) 1290 contents[i] = GET_REGISTER(vdst+i); 1291 } else { 1292 assert(vsrc1 <= 5); 1293 if (vsrc1 == 5) { 1294 contents[4] = GET_REGISTER(arg5); 1295 vsrc1--; 1296 } 1297 for (i = 0; i < vsrc1; i++) { 1298 contents[i] = GET_REGISTER(vdst & 0x0f); 1299 vdst >>= 4; 1300 } 1301 } 1302 1303 retval.l = newArray; 1304 } 1305 FINISH(3); 1306GOTO_TARGET_END 1307 1308 1309GOTO_TARGET(invokeVirtual, bool methodCallRange) 1310 { 1311 Method* baseMethod; 1312 Object* thisPtr; 1313 1314 EXPORT_PC(); 1315 1316 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1317 ref = FETCH(1); /* method ref */ 1318 vdst = FETCH(2); /* 4 regs -or- first reg */ 1319 1320 /* 1321 * The object against which we are executing a method is always 1322 * in the first argument. 1323 */ 1324 if (methodCallRange) { 1325 assert(vsrc1 > 0); 1326 ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}", 1327 vsrc1, ref, vdst, vdst+vsrc1-1); 1328 thisPtr = (Object*) GET_REGISTER(vdst); 1329 } else { 1330 assert((vsrc1>>4) > 0); 1331 ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}", 1332 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1333 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1334 } 1335 1336 if (!checkForNull(thisPtr)) 1337 GOTO_exceptionThrown(); 1338 1339 /* 1340 * Resolve the method. This is the correct method for the static 1341 * type of the object. We also verify access permissions here. 1342 */ 1343 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref); 1344 if (baseMethod == NULL) { 1345 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL); 1346 if (baseMethod == NULL) { 1347 ILOGV("+ unknown method or access denied\n"); 1348 GOTO_exceptionThrown(); 1349 } 1350 } 1351 1352 /* 1353 * Combine the object we found with the vtable offset in the 1354 * method. 1355 */ 1356 assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount); 1357 methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex]; 1358 1359#if 0 1360 if (dvmIsAbstractMethod(methodToCall)) { 1361 /* 1362 * This can happen if you create two classes, Base and Sub, where 1363 * Sub is a sub-class of Base. Declare a protected abstract 1364 * method foo() in Base, and invoke foo() from a method in Base. 1365 * Base is an "abstract base class" and is never instantiated 1366 * directly. Now, Override foo() in Sub, and use Sub. This 1367 * Works fine unless Sub stops providing an implementation of 1368 * the method. 1369 */ 1370 dvmThrowException("Ljava/lang/AbstractMethodError;", 1371 "abstract method not implemented"); 1372 GOTO_exceptionThrown(); 1373 } 1374#else 1375 assert(!dvmIsAbstractMethod(methodToCall) || 1376 methodToCall->nativeFunc != NULL); 1377#endif 1378 1379 LOGVV("+++ base=%s.%s virtual[%d]=%s.%s\n", 1380 baseMethod->clazz->descriptor, baseMethod->name, 1381 (u4) baseMethod->methodIndex, 1382 methodToCall->clazz->descriptor, methodToCall->name); 1383 assert(methodToCall != NULL); 1384 1385#if 0 1386 if (vsrc1 != methodToCall->insSize) { 1387 LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s\n", 1388 baseMethod->clazz->descriptor, baseMethod->name, 1389 (u4) baseMethod->methodIndex, 1390 methodToCall->clazz->descriptor, methodToCall->name); 1391 //dvmDumpClass(baseMethod->clazz); 1392 //dvmDumpClass(methodToCall->clazz); 1393 dvmDumpAllClasses(0); 1394 } 1395#endif 1396 1397 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1398 } 1399GOTO_TARGET_END 1400 1401GOTO_TARGET(invokeSuper, bool methodCallRange) 1402 { 1403 Method* baseMethod; 1404 u2 thisReg; 1405 1406 EXPORT_PC(); 1407 1408 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1409 ref = FETCH(1); /* method ref */ 1410 vdst = FETCH(2); /* 4 regs -or- first reg */ 1411 1412 if (methodCallRange) { 1413 ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}", 1414 vsrc1, ref, vdst, vdst+vsrc1-1); 1415 thisReg = vdst; 1416 } else { 1417 ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}", 1418 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1419 thisReg = vdst & 0x0f; 1420 } 1421 /* impossible in well-formed code, but we must check nevertheless */ 1422 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1423 GOTO_exceptionThrown(); 1424 1425 /* 1426 * Resolve the method. This is the correct method for the static 1427 * type of the object. We also verify access permissions here. 1428 * The first arg to dvmResolveMethod() is just the referring class 1429 * (used for class loaders and such), so we don't want to pass 1430 * the superclass into the resolution call. 1431 */ 1432 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref); 1433 if (baseMethod == NULL) { 1434 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL); 1435 if (baseMethod == NULL) { 1436 ILOGV("+ unknown method or access denied\n"); 1437 GOTO_exceptionThrown(); 1438 } 1439 } 1440 1441 /* 1442 * Combine the object we found with the vtable offset in the 1443 * method's class. 1444 * 1445 * We're using the current method's class' superclass, not the 1446 * superclass of "this". This is because we might be executing 1447 * in a method inherited from a superclass, and we want to run 1448 * in that class' superclass. 1449 */ 1450 if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) { 1451 /* 1452 * Method does not exist in the superclass. Could happen if 1453 * superclass gets updated. 1454 */ 1455 dvmThrowException("Ljava/lang/NoSuchMethodError;", 1456 baseMethod->name); 1457 GOTO_exceptionThrown(); 1458 } 1459 methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex]; 1460#if 0 1461 if (dvmIsAbstractMethod(methodToCall)) { 1462 dvmThrowException("Ljava/lang/AbstractMethodError;", 1463 "abstract method not implemented"); 1464 GOTO_exceptionThrown(); 1465 } 1466#else 1467 assert(!dvmIsAbstractMethod(methodToCall) || 1468 methodToCall->nativeFunc != NULL); 1469#endif 1470 LOGVV("+++ base=%s.%s super-virtual=%s.%s\n", 1471 baseMethod->clazz->descriptor, baseMethod->name, 1472 methodToCall->clazz->descriptor, methodToCall->name); 1473 assert(methodToCall != NULL); 1474 1475 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1476 } 1477GOTO_TARGET_END 1478 1479GOTO_TARGET(invokeInterface, bool methodCallRange) 1480 { 1481 Object* thisPtr; 1482 ClassObject* thisClass; 1483 1484 EXPORT_PC(); 1485 1486 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1487 ref = FETCH(1); /* method ref */ 1488 vdst = FETCH(2); /* 4 regs -or- first reg */ 1489 1490 /* 1491 * The object against which we are executing a method is always 1492 * in the first argument. 1493 */ 1494 if (methodCallRange) { 1495 assert(vsrc1 > 0); 1496 ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}", 1497 vsrc1, ref, vdst, vdst+vsrc1-1); 1498 thisPtr = (Object*) GET_REGISTER(vdst); 1499 } else { 1500 assert((vsrc1>>4) > 0); 1501 ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}", 1502 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1503 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1504 } 1505 if (!checkForNull(thisPtr)) 1506 GOTO_exceptionThrown(); 1507 1508 thisClass = thisPtr->clazz; 1509 1510 /* 1511 * Given a class and a method index, find the Method* with the 1512 * actual code we want to execute. 1513 */ 1514 methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod, 1515 methodClassDex); 1516 if (methodToCall == NULL) { 1517 assert(dvmCheckException(self)); 1518 GOTO_exceptionThrown(); 1519 } 1520 1521 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1522 } 1523GOTO_TARGET_END 1524 1525GOTO_TARGET(invokeDirect, bool methodCallRange) 1526 { 1527 u2 thisReg; 1528 1529 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1530 ref = FETCH(1); /* method ref */ 1531 vdst = FETCH(2); /* 4 regs -or- first reg */ 1532 1533 EXPORT_PC(); 1534 1535 if (methodCallRange) { 1536 ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}", 1537 vsrc1, ref, vdst, vdst+vsrc1-1); 1538 thisReg = vdst; 1539 } else { 1540 ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}", 1541 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1542 thisReg = vdst & 0x0f; 1543 } 1544 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1545 GOTO_exceptionThrown(); 1546 1547 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref); 1548 if (methodToCall == NULL) { 1549 methodToCall = dvmResolveMethod(curMethod->clazz, ref, 1550 METHOD_DIRECT); 1551 if (methodToCall == NULL) { 1552 ILOGV("+ unknown direct method\n"); // should be impossible 1553 GOTO_exceptionThrown(); 1554 } 1555 } 1556 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1557 } 1558GOTO_TARGET_END 1559 1560GOTO_TARGET(invokeStatic, bool methodCallRange) 1561 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1562 ref = FETCH(1); /* method ref */ 1563 vdst = FETCH(2); /* 4 regs -or- first reg */ 1564 1565 EXPORT_PC(); 1566 1567 if (methodCallRange) 1568 ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}", 1569 vsrc1, ref, vdst, vdst+vsrc1-1); 1570 else 1571 ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}", 1572 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1573 1574 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref); 1575 if (methodToCall == NULL) { 1576 methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC); 1577 if (methodToCall == NULL) { 1578 ILOGV("+ unknown method\n"); 1579 GOTO_exceptionThrown(); 1580 } 1581 } 1582 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1583GOTO_TARGET_END 1584 1585GOTO_TARGET(invokeVirtualQuick, bool methodCallRange) 1586 { 1587 Object* thisPtr; 1588 1589 EXPORT_PC(); 1590 1591 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1592 ref = FETCH(1); /* vtable index */ 1593 vdst = FETCH(2); /* 4 regs -or- first reg */ 1594 1595 /* 1596 * The object against which we are executing a method is always 1597 * in the first argument. 1598 */ 1599 if (methodCallRange) { 1600 assert(vsrc1 > 0); 1601 ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}", 1602 vsrc1, ref, vdst, vdst+vsrc1-1); 1603 thisPtr = (Object*) GET_REGISTER(vdst); 1604 } else { 1605 assert((vsrc1>>4) > 0); 1606 ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}", 1607 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1608 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1609 } 1610 1611 if (!checkForNull(thisPtr)) 1612 GOTO_exceptionThrown(); 1613 1614 /* 1615 * Combine the object we found with the vtable offset in the 1616 * method. 1617 */ 1618 assert(ref < thisPtr->clazz->vtableCount); 1619 methodToCall = thisPtr->clazz->vtable[ref]; 1620 1621#if 0 1622 if (dvmIsAbstractMethod(methodToCall)) { 1623 dvmThrowException("Ljava/lang/AbstractMethodError;", 1624 "abstract method not implemented"); 1625 GOTO_exceptionThrown(); 1626 } 1627#else 1628 assert(!dvmIsAbstractMethod(methodToCall) || 1629 methodToCall->nativeFunc != NULL); 1630#endif 1631 1632 LOGVV("+++ virtual[%d]=%s.%s\n", 1633 ref, methodToCall->clazz->descriptor, methodToCall->name); 1634 assert(methodToCall != NULL); 1635 1636 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1637 } 1638GOTO_TARGET_END 1639 1640GOTO_TARGET(invokeSuperQuick, bool methodCallRange) 1641 { 1642 u2 thisReg; 1643 1644 EXPORT_PC(); 1645 1646 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1647 ref = FETCH(1); /* vtable index */ 1648 vdst = FETCH(2); /* 4 regs -or- first reg */ 1649 1650 if (methodCallRange) { 1651 ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}", 1652 vsrc1, ref, vdst, vdst+vsrc1-1); 1653 thisReg = vdst; 1654 } else { 1655 ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}", 1656 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1657 thisReg = vdst & 0x0f; 1658 } 1659 /* impossible in well-formed code, but we must check nevertheless */ 1660 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1661 GOTO_exceptionThrown(); 1662 1663#if 0 /* impossible in optimized + verified code */ 1664 if (ref >= curMethod->clazz->super->vtableCount) { 1665 dvmThrowException("Ljava/lang/NoSuchMethodError;", NULL); 1666 GOTO_exceptionThrown(); 1667 } 1668#else 1669 assert(ref < curMethod->clazz->super->vtableCount); 1670#endif 1671 1672 /* 1673 * Combine the object we found with the vtable offset in the 1674 * method's class. 1675 * 1676 * We're using the current method's class' superclass, not the 1677 * superclass of "this". This is because we might be executing 1678 * in a method inherited from a superclass, and we want to run 1679 * in the method's class' superclass. 1680 */ 1681 methodToCall = curMethod->clazz->super->vtable[ref]; 1682 1683#if 0 1684 if (dvmIsAbstractMethod(methodToCall)) { 1685 dvmThrowException("Ljava/lang/AbstractMethodError;", 1686 "abstract method not implemented"); 1687 GOTO_exceptionThrown(); 1688 } 1689#else 1690 assert(!dvmIsAbstractMethod(methodToCall) || 1691 methodToCall->nativeFunc != NULL); 1692#endif 1693 LOGVV("+++ super-virtual[%d]=%s.%s\n", 1694 ref, methodToCall->clazz->descriptor, methodToCall->name); 1695 assert(methodToCall != NULL); 1696 1697 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1698 } 1699GOTO_TARGET_END 1700 1701 1702 1703 /* 1704 * General handling for return-void, return, and return-wide. Put the 1705 * return value in "retval" before jumping here. 1706 */ 1707GOTO_TARGET(returnFromMethod) 1708 { 1709 StackSaveArea* saveArea; 1710 1711 /* 1712 * We must do this BEFORE we pop the previous stack frame off, so 1713 * that the GC can see the return value (if any) in the local vars. 1714 * 1715 * Since this is now an interpreter switch point, we must do it before 1716 * we do anything at all. 1717 */ 1718 PERIODIC_CHECKS(kInterpEntryReturn, 0); 1719 1720 ILOGV("> retval=0x%llx (leaving %s.%s %s)", 1721 retval.j, curMethod->clazz->descriptor, curMethod->name, 1722 curMethod->signature); 1723 //DUMP_REGS(curMethod, fp); 1724 1725 saveArea = SAVEAREA_FROM_FP(fp); 1726 1727#ifdef EASY_GDB 1728 debugSaveArea = saveArea; 1729#endif 1730#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER) 1731 TRACE_METHOD_EXIT(self, curMethod); 1732#endif 1733 1734 /* back up to previous frame and see if we hit a break */ 1735 fp = saveArea->prevFrame; 1736 assert(fp != NULL); 1737 if (dvmIsBreakFrame(fp)) { 1738 /* bail without popping the method frame from stack */ 1739 LOGVV("+++ returned into break frame\n"); 1740 GOTO_bail(); 1741 } 1742 1743 /* update thread FP, and reset local variables */ 1744 self->curFrame = fp; 1745 curMethod = SAVEAREA_FROM_FP(fp)->method; 1746 //methodClass = curMethod->clazz; 1747 methodClassDex = curMethod->clazz->pDvmDex; 1748 pc = saveArea->savedPc; 1749 ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor, 1750 curMethod->name, curMethod->signature); 1751 1752 /* use FINISH on the caller's invoke instruction */ 1753 //u2 invokeInstr = INST_INST(FETCH(0)); 1754 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL && 1755 invokeInstr <= OP_INVOKE_INTERFACE*/) 1756 { 1757 FINISH(3); 1758 } else { 1759 //LOGE("Unknown invoke instr %02x at %d\n", 1760 // invokeInstr, (int) (pc - curMethod->insns)); 1761 assert(false); 1762 } 1763 } 1764GOTO_TARGET_END 1765 1766 1767 /* 1768 * Jump here when the code throws an exception. 1769 * 1770 * By the time we get here, the Throwable has been created and the stack 1771 * trace has been saved off. 1772 */ 1773GOTO_TARGET(exceptionThrown) 1774 { 1775 Object* exception; 1776 int catchRelPc; 1777 1778 /* 1779 * Since this is now an interpreter switch point, we must do it before 1780 * we do anything at all. 1781 */ 1782 PERIODIC_CHECKS(kInterpEntryThrow, 0); 1783 1784 /* 1785 * We save off the exception and clear the exception status. While 1786 * processing the exception we might need to load some Throwable 1787 * classes, and we don't want class loader exceptions to get 1788 * confused with this one. 1789 */ 1790 assert(dvmCheckException(self)); 1791 exception = dvmGetException(self); 1792 dvmAddTrackedAlloc(exception, self); 1793 dvmClearException(self); 1794 1795 LOGV("Handling exception %s at %s:%d\n", 1796 exception->clazz->descriptor, curMethod->name, 1797 dvmLineNumFromPC(curMethod, pc - curMethod->insns)); 1798 1799#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER) 1800 /* 1801 * Tell the debugger about it. 1802 * 1803 * TODO: if the exception was thrown by interpreted code, control 1804 * fell through native, and then back to us, we will report the 1805 * exception at the point of the throw and again here. We can avoid 1806 * this by not reporting exceptions when we jump here directly from 1807 * the native call code above, but then we won't report exceptions 1808 * that were thrown *from* the JNI code (as opposed to *through* it). 1809 * 1810 * The correct solution is probably to ignore from-native exceptions 1811 * here, and have the JNI exception code do the reporting to the 1812 * debugger. 1813 */ 1814 if (gDvm.debuggerActive) { 1815 void* catchFrame; 1816 catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns, 1817 exception, true, &catchFrame); 1818 dvmDbgPostException(fp, pc - curMethod->insns, catchFrame, 1819 catchRelPc, exception); 1820 } 1821#endif 1822 1823 /* 1824 * We need to unroll to the catch block or the nearest "break" 1825 * frame. 1826 * 1827 * A break frame could indicate that we have reached an intermediate 1828 * native call, or have gone off the top of the stack and the thread 1829 * needs to exit. Either way, we return from here, leaving the 1830 * exception raised. 1831 * 1832 * If we do find a catch block, we want to transfer execution to 1833 * that point. 1834 */ 1835 catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns, 1836 exception, false, (void*)&fp); 1837 1838 /* 1839 * Restore the stack bounds after an overflow. This isn't going to 1840 * be correct in all circumstances, e.g. if JNI code devours the 1841 * exception this won't happen until some other exception gets 1842 * thrown. If the code keeps pushing the stack bounds we'll end 1843 * up aborting the VM. 1844 * 1845 * Note we want to do this *after* the call to dvmFindCatchBlock, 1846 * because that may need extra stack space to resolve exception 1847 * classes (e.g. through a class loader). 1848 */ 1849 if (self->stackOverflowed) 1850 dvmCleanupStackOverflow(self); 1851 1852 if (catchRelPc < 0) { 1853 /* falling through to JNI code or off the bottom of the stack */ 1854#if DVM_SHOW_EXCEPTION >= 2 1855 LOGD("Exception %s from %s:%d not caught locally\n", 1856 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod), 1857 dvmLineNumFromPC(curMethod, pc - curMethod->insns)); 1858#endif 1859 dvmSetException(self, exception); 1860 dvmReleaseTrackedAlloc(exception, self); 1861 GOTO_bail(); 1862 } 1863 1864#if DVM_SHOW_EXCEPTION >= 3 1865 { 1866 const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method; 1867 LOGD("Exception %s thrown from %s:%d to %s:%d\n", 1868 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod), 1869 dvmLineNumFromPC(curMethod, pc - curMethod->insns), 1870 dvmGetMethodSourceFile(catchMethod), 1871 dvmLineNumFromPC(catchMethod, catchRelPc)); 1872 } 1873#endif 1874 1875 /* 1876 * Adjust local variables to match self->curFrame and the 1877 * updated PC. 1878 */ 1879 //fp = (u4*) self->curFrame; 1880 curMethod = SAVEAREA_FROM_FP(fp)->method; 1881 //methodClass = curMethod->clazz; 1882 methodClassDex = curMethod->clazz->pDvmDex; 1883 pc = curMethod->insns + catchRelPc; 1884 ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor, 1885 curMethod->name, curMethod->signature); 1886 DUMP_REGS(curMethod, fp, false); // show all regs 1887 1888 /* 1889 * Restore the exception if the handler wants it. 1890 * 1891 * The Dalvik spec mandates that, if an exception handler wants to 1892 * do something with the exception, the first instruction executed 1893 * must be "move-exception". We can pass the exception along 1894 * through the thread struct, and let the move-exception instruction 1895 * clear it for us. 1896 * 1897 * If the handler doesn't call move-exception, we don't want to 1898 * finish here with an exception still pending. 1899 */ 1900 if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION) 1901 dvmSetException(self, exception); 1902 1903 dvmReleaseTrackedAlloc(exception, self); 1904 FINISH(0); 1905 } 1906GOTO_TARGET_END 1907 1908 1909 /* 1910 * General handling for invoke-{virtual,super,direct,static,interface}, 1911 * including "quick" variants. 1912 * 1913 * Set "methodToCall" to the Method we're calling, and "methodCallRange" 1914 * depending on whether this is a "/range" instruction. 1915 * 1916 * For a range call: 1917 * "vsrc1" holds the argument count (8 bits) 1918 * "vdst" holds the first argument in the range 1919 * For a non-range call: 1920 * "vsrc1" holds the argument count (4 bits) and the 5th argument index 1921 * "vdst" holds four 4-bit register indices 1922 * 1923 * The caller must EXPORT_PC before jumping here, because any method 1924 * call can throw a stack overflow exception. 1925 */ 1926GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, 1927 u2 count, u2 regs) 1928 { 1929 STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;); 1930 1931 //printf("range=%d call=%p count=%d regs=0x%04x\n", 1932 // methodCallRange, methodToCall, count, regs); 1933 //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor, 1934 // methodToCall->name, methodToCall->signature); 1935 1936 u4* outs; 1937 int i; 1938 1939 /* 1940 * Copy args. This may corrupt vsrc1/vdst. 1941 */ 1942 if (methodCallRange) { 1943 // could use memcpy or a "Duff's device"; most functions have 1944 // so few args it won't matter much 1945 assert(vsrc1 <= curMethod->outsSize); 1946 assert(vsrc1 == methodToCall->insSize); 1947 outs = OUTS_FROM_FP(fp, vsrc1); 1948 for (i = 0; i < vsrc1; i++) 1949 outs[i] = GET_REGISTER(vdst+i); 1950 } else { 1951 u4 count = vsrc1 >> 4; 1952 1953 assert(count <= curMethod->outsSize); 1954 assert(count == methodToCall->insSize); 1955 assert(count <= 5); 1956 1957 outs = OUTS_FROM_FP(fp, count); 1958#if 0 1959 if (count == 5) { 1960 outs[4] = GET_REGISTER(vsrc1 & 0x0f); 1961 count--; 1962 } 1963 for (i = 0; i < (int) count; i++) { 1964 outs[i] = GET_REGISTER(vdst & 0x0f); 1965 vdst >>= 4; 1966 } 1967#else 1968 // This version executes fewer instructions but is larger 1969 // overall. Seems to be a teensy bit faster. 1970 assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear 1971 switch (count) { 1972 case 5: 1973 outs[4] = GET_REGISTER(vsrc1 & 0x0f); 1974 case 4: 1975 outs[3] = GET_REGISTER(vdst >> 12); 1976 case 3: 1977 outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8); 1978 case 2: 1979 outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4); 1980 case 1: 1981 outs[0] = GET_REGISTER(vdst & 0x0f); 1982 default: 1983 ; 1984 } 1985#endif 1986 } 1987 } 1988 1989 /* 1990 * (This was originally a "goto" target; I've kept it separate from the 1991 * stuff above in case we want to refactor things again.) 1992 * 1993 * At this point, we have the arguments stored in the "outs" area of 1994 * the current method's stack frame, and the method to call in 1995 * "methodToCall". Push a new stack frame. 1996 */ 1997 { 1998 StackSaveArea* newSaveArea; 1999 u4* newFp; 2000 2001 ILOGV("> %s%s.%s %s", 2002 dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "", 2003 methodToCall->clazz->descriptor, methodToCall->name, 2004 methodToCall->signature); 2005 2006 newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize; 2007 newSaveArea = SAVEAREA_FROM_FP(newFp); 2008 2009 /* verify that we have enough space */ 2010 if (true) { 2011 u1* bottom; 2012 bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4); 2013 if (bottom < self->interpStackEnd) { 2014 /* stack overflow */ 2015 LOGV("Stack overflow on method call (start=%p end=%p newBot=%p size=%d '%s')\n", 2016 self->interpStackStart, self->interpStackEnd, bottom, 2017 self->interpStackSize, methodToCall->name); 2018 dvmHandleStackOverflow(self); 2019 assert(dvmCheckException(self)); 2020 GOTO_exceptionThrown(); 2021 } 2022 //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p\n", 2023 // fp, newFp, newSaveArea, bottom); 2024 } 2025 2026#ifdef LOG_INSTR 2027 if (methodToCall->registersSize > methodToCall->insSize) { 2028 /* 2029 * This makes valgrind quiet when we print registers that 2030 * haven't been initialized. Turn it off when the debug 2031 * messages are disabled -- we want valgrind to report any 2032 * used-before-initialized issues. 2033 */ 2034 memset(newFp, 0xcc, 2035 (methodToCall->registersSize - methodToCall->insSize) * 4); 2036 } 2037#endif 2038 2039#ifdef EASY_GDB 2040 newSaveArea->prevSave = SAVEAREA_FROM_FP(fp); 2041#endif 2042 newSaveArea->prevFrame = fp; 2043 newSaveArea->savedPc = pc; 2044 newSaveArea->method = methodToCall; 2045 2046 if (!dvmIsNativeMethod(methodToCall)) { 2047 /* 2048 * "Call" interpreted code. Reposition the PC, update the 2049 * frame pointer and other local state, and continue. 2050 */ 2051 curMethod = methodToCall; 2052 methodClassDex = curMethod->clazz->pDvmDex; 2053 pc = methodToCall->insns; 2054 fp = self->curFrame = newFp; 2055#ifdef EASY_GDB 2056 debugSaveArea = SAVEAREA_FROM_FP(newFp); 2057#endif 2058#if INTERP_TYPE == INTERP_DBG 2059 debugIsMethodEntry = true; // profiling, debugging 2060#endif 2061 ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor, 2062 curMethod->name, curMethod->signature); 2063 DUMP_REGS(curMethod, fp, true); // show input args 2064 FINISH(0); // jump to method start 2065 } else { 2066 /* set this up for JNI locals, even if not a JNI native */ 2067 newSaveArea->xtra.localRefTop = self->jniLocalRefTable.nextEntry; 2068 2069 self->curFrame = newFp; 2070 2071 DUMP_REGS(methodToCall, newFp, true); // show input args 2072 2073#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER) 2074 if (gDvm.debuggerActive) { 2075 dvmDbgPostLocationEvent(methodToCall, -1, 2076 dvmGetThisPtr(curMethod, fp), DBG_METHOD_ENTRY); 2077 } 2078#endif 2079#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER) 2080 TRACE_METHOD_ENTER(self, methodToCall); 2081#endif 2082 2083 ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, 2084 methodToCall->name, methodToCall->signature); 2085 2086 /* 2087 * Jump through native call bridge. Because we leave no 2088 * space for locals on native calls, "newFp" points directly 2089 * to the method arguments. 2090 */ 2091 (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); 2092 2093#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_DEBUGGER) 2094 if (gDvm.debuggerActive) { 2095 dvmDbgPostLocationEvent(methodToCall, -1, 2096 dvmGetThisPtr(curMethod, fp), DBG_METHOD_EXIT); 2097 } 2098#endif 2099#if (INTERP_TYPE == INTERP_DBG) && defined(WITH_PROFILER) 2100 TRACE_METHOD_EXIT(self, methodToCall); 2101#endif 2102 2103 /* pop frame off */ 2104 dvmPopJniLocals(self, newSaveArea); 2105 self->curFrame = fp; 2106 2107 /* 2108 * If the native code threw an exception, or interpreted code 2109 * invoked by the native call threw one and nobody has cleared 2110 * it, jump to our local exception handling. 2111 */ 2112 if (dvmCheckException(self)) { 2113 LOGV("Exception thrown by/below native code\n"); 2114 GOTO_exceptionThrown(); 2115 } 2116 2117 ILOGD("> retval=0x%llx (leaving native)", retval.j); 2118 ILOGD("> (return from native %s.%s to %s.%s %s)", 2119 methodToCall->clazz->descriptor, methodToCall->name, 2120 curMethod->clazz->descriptor, curMethod->name, 2121 curMethod->signature); 2122 2123 //u2 invokeInstr = INST_INST(FETCH(0)); 2124 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL && 2125 invokeInstr <= OP_INVOKE_INTERFACE*/) 2126 { 2127 FINISH(3); 2128 } else { 2129 //LOGE("Unknown invoke instr %02x at %d\n", 2130 // invokeInstr, (int) (pc - curMethod->insns)); 2131 assert(false); 2132 } 2133 } 2134 } 2135 assert(false); // should not get here 2136GOTO_TARGET_END 2137 2138 2139/* File: cstubs/enddefs.c */ 2140 2141/* undefine "magic" name remapping */ 2142#undef retval 2143#undef pc 2144#undef fp 2145#undef curMethod 2146#undef methodClassDex 2147#undef self 2148#undef debugTrackedRefStart 2149 2150