InterpAsm-armv7-a.S revision 63644657f74e0a5d05f2c5fb56a18872e7ac7427
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv7-a'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24/* 25 * ARMv5 definitions and declarations. 26 */ 27 28/* 29ARM EABI general notes: 30 31r0-r3 hold first 4 args to a method; they are not preserved across method calls 32r4-r8 are available for general use 33r9 is given special treatment in some situations, but not for us 34r10 (sl) seems to be generally available 35r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 36r12 (ip) is scratch -- not preserved across method calls 37r13 (sp) should be managed carefully in case a signal arrives 38r14 (lr) must be preserved 39r15 (pc) can be tinkered with directly 40 41r0 holds returns of <= 4 bytes 42r0-r1 hold returns of 8 bytes, low word in r0 43 44Callee must save/restore r4+ (except r12) if it modifies them. If VFP 45is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 46s0-s15 (d0-d7, q0-a3) do not need to be. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59Mterp and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r4 rPC interpreted program counter, used for fetching instructions 65 r5 rFP interpreted frame pointer, used for accessing locals and args 66 r6 rGLUE MterpGlue pointer 67 r7 rINST first 16-bit code unit of current instruction 68 r8 rIBASE interpreted instruction base pointer, used for computed goto 69 70Macros are provided for common operations. Each macro MUST emit only 71one instruction to make instruction-counting easier. They MUST NOT alter 72unspecified registers or condition codes. 73*/ 74 75/* single-purpose registers, given names for clarity */ 76#define rPC r4 77#define rFP r5 78#define rGLUE r6 79#define rINST r7 80#define rIBASE r8 81 82/* save/restore the PC and/or FP from the glue struct */ 83#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 84#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 85#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 86#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 87#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 88#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 89 90/* 91 * "export" the PC to the stack frame, f/b/o future exception objects. Must 92 * be done *before* something calls dvmThrowException. 93 * 94 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 95 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 96 * 97 * It's okay to do this more than once. 98 */ 99#define EXPORT_PC() \ 100 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 101 102/* 103 * Given a frame pointer, find the stack save area. 104 * 105 * In C this is "((StackSaveArea*)(_fp) -1)". 106 */ 107#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 108 sub _reg, _fpreg, #sizeofStackSaveArea 109 110/* 111 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 112 */ 113#define FETCH_INST() ldrh rINST, [rPC] 114 115/* 116 * Fetch the next instruction from the specified offset. Advances rPC 117 * to point to the next instruction. "_count" is in 16-bit code units. 118 * 119 * Because of the limited size of immediate constants on ARM, this is only 120 * suitable for small forward movements (i.e. don't try to implement "goto" 121 * with this). 122 * 123 * This must come AFTER anything that can throw an exception, or the 124 * exception catch may miss. (This also implies that it must come after 125 * EXPORT_PC().) 126 */ 127#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 128 129/* 130 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 131 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 132 */ 133#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 134 ldrh _dreg, [_sreg, #(_count*2)]! 135 136/* 137 * Fetch the next instruction from an offset specified by _reg. Updates 138 * rPC to point to the next instruction. "_reg" must specify the distance 139 * in bytes, *not* 16-bit code units, and may be a signed value. 140 * 141 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 142 * bits that hold the shift distance are used for the half/byte/sign flags. 143 * In some cases we can pre-double _reg for free, so we require a byte offset 144 * here. 145 */ 146#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 147 148/* 149 * Fetch a half-word code unit from an offset past the current PC. The 150 * "_count" value is in 16-bit code units. Does not advance rPC. 151 * 152 * The "_S" variant works the same but treats the value as signed. 153 */ 154#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 155#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 156 157/* 158 * Fetch one byte from an offset past the current PC. Pass in the same 159 * "_count" as you would for FETCH, and an additional 0/1 indicating which 160 * byte of the halfword you want (lo/hi). 161 */ 162#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 163 164/* 165 * Put the instruction's opcode field into the specified register. 166 */ 167#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 168 169/* 170 * Put the prefetched instruction's opcode field into the specified register. 171 */ 172#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 173 174/* 175 * Begin executing the opcode in _reg. Because this only jumps within the 176 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 177 */ 178#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 180#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 181 182/* 183 * Get/set the 32-bit value from a Dalvik register. 184 */ 185#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 186#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 187 188#if defined(WITH_JIT) 189#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 191#endif 192 193/* 194 * Convert a virtual register index into an address. 195 */ 196#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 197 add _reg, rFP, _vreg, lsl #2 198 199/* 200 * This is a #include, not a %include, because we want the C pre-processor 201 * to expand the macros into assembler assignment statements. 202 */ 203#include "../common/asm-constants.h" 204 205#if defined(WITH_JIT) 206#include "../common/jit-config.h" 207#endif 208 209/* File: armv7-a/platform.S */ 210/* 211 * =========================================================================== 212 * CPU-version-specific defines 213 * =========================================================================== 214 */ 215 216#if !defined(ANDROID_SMP) 217# error "Must define ANDROID_SMP" 218#endif 219 220/* 221 * Macro for data memory barrier. 222 */ 223.macro SMP_DMB 224#if ANDROID_SMP != 0 225 dmb 226#else 227 /* not SMP */ 228#endif 229.endm 230 231/* 232 * Macro for data memory barrier (store/store variant). 233 */ 234.macro SMP_DMB_ST 235#if ANDROID_SMP != 0 236 dmb st 237#else 238 /* not SMP */ 239#endif 240.endm 241 242/* File: armv5te/entry.S */ 243/* 244 * Copyright (C) 2008 The Android Open Source Project 245 * 246 * Licensed under the Apache License, Version 2.0 (the "License"); 247 * you may not use this file except in compliance with the License. 248 * You may obtain a copy of the License at 249 * 250 * http://www.apache.org/licenses/LICENSE-2.0 251 * 252 * Unless required by applicable law or agreed to in writing, software 253 * distributed under the License is distributed on an "AS IS" BASIS, 254 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 255 * See the License for the specific language governing permissions and 256 * limitations under the License. 257 */ 258/* 259 * Interpreter entry point. 260 */ 261 262/* 263 * We don't have formal stack frames, so gdb scans upward in the code 264 * to find the start of the function (a label with the %function type), 265 * and then looks at the next few instructions to figure out what 266 * got pushed onto the stack. From this it figures out how to restore 267 * the registers, including PC, for the previous stack frame. If gdb 268 * sees a non-function label, it stops scanning, so either we need to 269 * have nothing but assembler-local labels between the entry point and 270 * the break, or we need to fake it out. 271 * 272 * When this is defined, we add some stuff to make gdb less confused. 273 */ 274#define ASSIST_DEBUGGER 1 275 276 .text 277 .align 2 278 .global dvmMterpStdRun 279 .type dvmMterpStdRun, %function 280 281/* 282 * On entry: 283 * r0 MterpGlue* glue 284 * 285 * This function returns a boolean "changeInterp" value. The return comes 286 * via a call to dvmMterpStdBail(). 287 */ 288dvmMterpStdRun: 289#define MTERP_ENTRY1 \ 290 .save {r4-r10,fp,lr}; \ 291 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 292#define MTERP_ENTRY2 \ 293 .pad #4; \ 294 sub sp, sp, #4 @ align 64 295 296 .fnstart 297 MTERP_ENTRY1 298 MTERP_ENTRY2 299 300 /* save stack pointer, add magic word for debuggerd */ 301 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 302 303 /* set up "named" registers, figure out entry point */ 304 mov rGLUE, r0 @ set rGLUE 305 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 306 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 307 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 308 cmp r1, #kInterpEntryInstr @ usual case? 309 bne .Lnot_instr @ no, handle it 310 311#if defined(WITH_JIT) 312.LentryInstr: 313 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 314 /* Entry is always a possible trace start */ 315 GET_JIT_PROF_TABLE(r0) 316 FETCH_INST() 317 mov r1, #0 @ prepare the value for the new state 318 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 319 cmp r0,#0 @ is profiling disabled? 320#if !defined(WITH_SELF_VERIFICATION) 321 bne common_updateProfile @ profiling is enabled 322#else 323 ldr r2, [r10, #offThread_shadowSpace] @ to find out the jit exit state 324 beq 1f @ profiling is disabled 325 ldr r3, [r2, #offShadowSpace_jitExitState] @ jit exit state 326 cmp r3, #kSVSTraceSelect @ hot trace following? 327 moveq r2,#kJitTSelectRequestHot @ ask for trace selection 328 beq common_selectTrace @ go build the trace 329 cmp r3, #kSVSNoProfile @ don't profile the next instruction? 330 beq 1f @ intrepret the next instruction 331 b common_updateProfile @ collect profiles 332#endif 3331: 334 GET_INST_OPCODE(ip) 335 GOTO_OPCODE(ip) 336#else 337 /* start executing the instruction at rPC */ 338 FETCH_INST() @ load rINST from rPC 339 GET_INST_OPCODE(ip) @ extract opcode from rINST 340 GOTO_OPCODE(ip) @ jump to next instruction 341#endif 342 343.Lnot_instr: 344 cmp r1, #kInterpEntryReturn @ were we returning from a method? 345 beq common_returnFromMethod 346 347.Lnot_return: 348 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 349 beq common_exceptionThrown 350 351#if defined(WITH_JIT) 352.Lnot_throw: 353 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 354 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 355 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 356 bne .Lbad_arg 357 cmp rPC,r2 358 bne .LentryInstr @ must have branched, don't resume 359#if defined(WITH_SELF_VERIFICATION) 360 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 361 b jitSVShadowRunStart @ re-enter the translation after the 362 @ single-stepped instruction 363 @noreturn 364#endif 365 mov r1, #kInterpEntryInstr 366 str r1, [rGLUE, #offGlue_entryPoint] 367 bx r10 @ re-enter the translation 368#endif 369 370.Lbad_arg: 371 ldr r0, strBadEntryPoint 372 @ r1 holds value of entryPoint 373 bl printf 374 bl dvmAbort 375 .fnend 376 377 378 .global dvmMterpStdBail 379 .type dvmMterpStdBail, %function 380 381/* 382 * Restore the stack pointer and PC from the save point established on entry. 383 * This is essentially the same as a longjmp, but should be cheaper. The 384 * last instruction causes us to return to whoever called dvmMterpStdRun. 385 * 386 * We pushed some registers on the stack in dvmMterpStdRun, then saved 387 * SP and LR. Here we restore SP, restore the registers, and then restore 388 * LR to PC. 389 * 390 * On entry: 391 * r0 MterpGlue* glue 392 * r1 bool changeInterp 393 */ 394dvmMterpStdBail: 395 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 396 mov r0, r1 @ return the changeInterp value 397 add sp, sp, #4 @ un-align 64 398 ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return 399 400 401/* 402 * String references. 403 */ 404strBadEntryPoint: 405 .word .LstrBadEntryPoint 406 407 408 .global dvmAsmInstructionStart 409 .type dvmAsmInstructionStart, %function 410dvmAsmInstructionStart = .L_OP_NOP 411 .text 412 413/* ------------------------------ */ 414 .balign 64 415.L_OP_NOP: /* 0x00 */ 416/* File: armv5te/OP_NOP.S */ 417 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 418 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 419 GOTO_OPCODE(ip) @ execute it 420 421#ifdef ASSIST_DEBUGGER 422 /* insert fake function header to help gdb find the stack frame */ 423 .type dalvik_inst, %function 424dalvik_inst: 425 .fnstart 426 MTERP_ENTRY1 427 MTERP_ENTRY2 428 .fnend 429#endif 430 431/* ------------------------------ */ 432 .balign 64 433.L_OP_MOVE: /* 0x01 */ 434/* File: armv6t2/OP_MOVE.S */ 435 /* for move, move-object, long-to-int */ 436 /* op vA, vB */ 437 mov r1, rINST, lsr #12 @ r1<- B from 15:12 438 ubfx r0, rINST, #8, #4 @ r0<- A from 11:8 439 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 440 GET_VREG(r2, r1) @ r2<- fp[B] 441 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 442 SET_VREG(r2, r0) @ fp[A]<- r2 443 GOTO_OPCODE(ip) @ execute next instruction 444 445/* ------------------------------ */ 446 .balign 64 447.L_OP_MOVE_FROM16: /* 0x02 */ 448/* File: armv5te/OP_MOVE_FROM16.S */ 449 /* for: move/from16, move-object/from16 */ 450 /* op vAA, vBBBB */ 451 FETCH(r1, 1) @ r1<- BBBB 452 mov r0, rINST, lsr #8 @ r0<- AA 453 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 454 GET_VREG(r2, r1) @ r2<- fp[BBBB] 455 GET_INST_OPCODE(ip) @ extract opcode from rINST 456 SET_VREG(r2, r0) @ fp[AA]<- r2 457 GOTO_OPCODE(ip) @ jump to next instruction 458 459/* ------------------------------ */ 460 .balign 64 461.L_OP_MOVE_16: /* 0x03 */ 462/* File: armv5te/OP_MOVE_16.S */ 463 /* for: move/16, move-object/16 */ 464 /* op vAAAA, vBBBB */ 465 FETCH(r1, 2) @ r1<- BBBB 466 FETCH(r0, 1) @ r0<- AAAA 467 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 468 GET_VREG(r2, r1) @ r2<- fp[BBBB] 469 GET_INST_OPCODE(ip) @ extract opcode from rINST 470 SET_VREG(r2, r0) @ fp[AAAA]<- r2 471 GOTO_OPCODE(ip) @ jump to next instruction 472 473/* ------------------------------ */ 474 .balign 64 475.L_OP_MOVE_WIDE: /* 0x04 */ 476/* File: armv6t2/OP_MOVE_WIDE.S */ 477 /* move-wide vA, vB */ 478 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 479 mov r3, rINST, lsr #12 @ r3<- B 480 ubfx r2, rINST, #8, #4 @ r2<- A 481 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 482 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 483 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 484 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 485 GET_INST_OPCODE(ip) @ extract opcode from rINST 486 stmia r2, {r0-r1} @ fp[A]<- r0/r1 487 GOTO_OPCODE(ip) @ jump to next instruction 488 489/* ------------------------------ */ 490 .balign 64 491.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 492/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 493 /* move-wide/from16 vAA, vBBBB */ 494 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 495 FETCH(r3, 1) @ r3<- BBBB 496 mov r2, rINST, lsr #8 @ r2<- AA 497 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 498 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 499 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 500 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 501 GET_INST_OPCODE(ip) @ extract opcode from rINST 502 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 503 GOTO_OPCODE(ip) @ jump to next instruction 504 505/* ------------------------------ */ 506 .balign 64 507.L_OP_MOVE_WIDE_16: /* 0x06 */ 508/* File: armv5te/OP_MOVE_WIDE_16.S */ 509 /* move-wide/16 vAAAA, vBBBB */ 510 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 511 FETCH(r3, 2) @ r3<- BBBB 512 FETCH(r2, 1) @ r2<- AAAA 513 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 514 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 515 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 516 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 517 GET_INST_OPCODE(ip) @ extract opcode from rINST 518 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 519 GOTO_OPCODE(ip) @ jump to next instruction 520 521/* ------------------------------ */ 522 .balign 64 523.L_OP_MOVE_OBJECT: /* 0x07 */ 524/* File: armv5te/OP_MOVE_OBJECT.S */ 525/* File: armv5te/OP_MOVE.S */ 526 /* for move, move-object, long-to-int */ 527 /* op vA, vB */ 528 mov r1, rINST, lsr #12 @ r1<- B from 15:12 529 mov r0, rINST, lsr #8 @ r0<- A from 11:8 530 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 531 GET_VREG(r2, r1) @ r2<- fp[B] 532 and r0, r0, #15 533 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 534 SET_VREG(r2, r0) @ fp[A]<- r2 535 GOTO_OPCODE(ip) @ execute next instruction 536 537 538/* ------------------------------ */ 539 .balign 64 540.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 541/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 542/* File: armv5te/OP_MOVE_FROM16.S */ 543 /* for: move/from16, move-object/from16 */ 544 /* op vAA, vBBBB */ 545 FETCH(r1, 1) @ r1<- BBBB 546 mov r0, rINST, lsr #8 @ r0<- AA 547 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 548 GET_VREG(r2, r1) @ r2<- fp[BBBB] 549 GET_INST_OPCODE(ip) @ extract opcode from rINST 550 SET_VREG(r2, r0) @ fp[AA]<- r2 551 GOTO_OPCODE(ip) @ jump to next instruction 552 553 554/* ------------------------------ */ 555 .balign 64 556.L_OP_MOVE_OBJECT_16: /* 0x09 */ 557/* File: armv5te/OP_MOVE_OBJECT_16.S */ 558/* File: armv5te/OP_MOVE_16.S */ 559 /* for: move/16, move-object/16 */ 560 /* op vAAAA, vBBBB */ 561 FETCH(r1, 2) @ r1<- BBBB 562 FETCH(r0, 1) @ r0<- AAAA 563 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 564 GET_VREG(r2, r1) @ r2<- fp[BBBB] 565 GET_INST_OPCODE(ip) @ extract opcode from rINST 566 SET_VREG(r2, r0) @ fp[AAAA]<- r2 567 GOTO_OPCODE(ip) @ jump to next instruction 568 569 570/* ------------------------------ */ 571 .balign 64 572.L_OP_MOVE_RESULT: /* 0x0a */ 573/* File: armv5te/OP_MOVE_RESULT.S */ 574 /* for: move-result, move-result-object */ 575 /* op vAA */ 576 mov r2, rINST, lsr #8 @ r2<- AA 577 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 578 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 579 GET_INST_OPCODE(ip) @ extract opcode from rINST 580 SET_VREG(r0, r2) @ fp[AA]<- r0 581 GOTO_OPCODE(ip) @ jump to next instruction 582 583/* ------------------------------ */ 584 .balign 64 585.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 586/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 587 /* move-result-wide vAA */ 588 mov r2, rINST, lsr #8 @ r2<- AA 589 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 590 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 591 ldmia r3, {r0-r1} @ r0/r1<- retval.j 592 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 593 GET_INST_OPCODE(ip) @ extract opcode from rINST 594 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 595 GOTO_OPCODE(ip) @ jump to next instruction 596 597/* ------------------------------ */ 598 .balign 64 599.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 600/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 601/* File: armv5te/OP_MOVE_RESULT.S */ 602 /* for: move-result, move-result-object */ 603 /* op vAA */ 604 mov r2, rINST, lsr #8 @ r2<- AA 605 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 606 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 607 GET_INST_OPCODE(ip) @ extract opcode from rINST 608 SET_VREG(r0, r2) @ fp[AA]<- r0 609 GOTO_OPCODE(ip) @ jump to next instruction 610 611 612/* ------------------------------ */ 613 .balign 64 614.L_OP_MOVE_EXCEPTION: /* 0x0d */ 615/* File: armv5te/OP_MOVE_EXCEPTION.S */ 616 /* move-exception vAA */ 617 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 618 mov r2, rINST, lsr #8 @ r2<- AA 619 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 620 mov r1, #0 @ r1<- 0 621 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 622 SET_VREG(r3, r2) @ fp[AA]<- exception obj 623 GET_INST_OPCODE(ip) @ extract opcode from rINST 624 str r1, [r0, #offThread_exception] @ dvmClearException bypass 625 GOTO_OPCODE(ip) @ jump to next instruction 626 627/* ------------------------------ */ 628 .balign 64 629.L_OP_RETURN_VOID: /* 0x0e */ 630/* File: armv5te/OP_RETURN_VOID.S */ 631 b common_returnFromMethod 632 633/* ------------------------------ */ 634 .balign 64 635.L_OP_RETURN: /* 0x0f */ 636/* File: armv5te/OP_RETURN.S */ 637 /* 638 * Return a 32-bit value. Copies the return value into the "glue" 639 * structure, then jumps to the return handler. 640 * 641 * for: return, return-object 642 */ 643 /* op vAA */ 644 mov r2, rINST, lsr #8 @ r2<- AA 645 GET_VREG(r0, r2) @ r0<- vAA 646 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 647 b common_returnFromMethod 648 649/* ------------------------------ */ 650 .balign 64 651.L_OP_RETURN_WIDE: /* 0x10 */ 652/* File: armv5te/OP_RETURN_WIDE.S */ 653 /* 654 * Return a 64-bit value. Copies the return value into the "glue" 655 * structure, then jumps to the return handler. 656 */ 657 /* return-wide vAA */ 658 mov r2, rINST, lsr #8 @ r2<- AA 659 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 660 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 661 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 662 stmia r3, {r0-r1} @ retval<- r0/r1 663 b common_returnFromMethod 664 665/* ------------------------------ */ 666 .balign 64 667.L_OP_RETURN_OBJECT: /* 0x11 */ 668/* File: armv5te/OP_RETURN_OBJECT.S */ 669/* File: armv5te/OP_RETURN.S */ 670 /* 671 * Return a 32-bit value. Copies the return value into the "glue" 672 * structure, then jumps to the return handler. 673 * 674 * for: return, return-object 675 */ 676 /* op vAA */ 677 mov r2, rINST, lsr #8 @ r2<- AA 678 GET_VREG(r0, r2) @ r0<- vAA 679 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 680 b common_returnFromMethod 681 682 683/* ------------------------------ */ 684 .balign 64 685.L_OP_CONST_4: /* 0x12 */ 686/* File: armv6t2/OP_CONST_4.S */ 687 /* const/4 vA, #+B */ 688 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 689 ubfx r0, rINST, #8, #4 @ r0<- A 690 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 691 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 692 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 693 SET_VREG(r1, r0) @ fp[A]<- r1 694 GOTO_OPCODE(ip) @ execute next instruction 695 696/* ------------------------------ */ 697 .balign 64 698.L_OP_CONST_16: /* 0x13 */ 699/* File: armv5te/OP_CONST_16.S */ 700 /* const/16 vAA, #+BBBB */ 701 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 702 mov r3, rINST, lsr #8 @ r3<- AA 703 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 704 SET_VREG(r0, r3) @ vAA<- r0 705 GET_INST_OPCODE(ip) @ extract opcode from rINST 706 GOTO_OPCODE(ip) @ jump to next instruction 707 708/* ------------------------------ */ 709 .balign 64 710.L_OP_CONST: /* 0x14 */ 711/* File: armv5te/OP_CONST.S */ 712 /* const vAA, #+BBBBbbbb */ 713 mov r3, rINST, lsr #8 @ r3<- AA 714 FETCH(r0, 1) @ r0<- bbbb (low) 715 FETCH(r1, 2) @ r1<- BBBB (high) 716 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 717 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 718 GET_INST_OPCODE(ip) @ extract opcode from rINST 719 SET_VREG(r0, r3) @ vAA<- r0 720 GOTO_OPCODE(ip) @ jump to next instruction 721 722/* ------------------------------ */ 723 .balign 64 724.L_OP_CONST_HIGH16: /* 0x15 */ 725/* File: armv5te/OP_CONST_HIGH16.S */ 726 /* const/high16 vAA, #+BBBB0000 */ 727 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 728 mov r3, rINST, lsr #8 @ r3<- AA 729 mov r0, r0, lsl #16 @ r0<- BBBB0000 730 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 731 SET_VREG(r0, r3) @ vAA<- r0 732 GET_INST_OPCODE(ip) @ extract opcode from rINST 733 GOTO_OPCODE(ip) @ jump to next instruction 734 735/* ------------------------------ */ 736 .balign 64 737.L_OP_CONST_WIDE_16: /* 0x16 */ 738/* File: armv5te/OP_CONST_WIDE_16.S */ 739 /* const-wide/16 vAA, #+BBBB */ 740 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 741 mov r3, rINST, lsr #8 @ r3<- AA 742 mov r1, r0, asr #31 @ r1<- ssssssss 743 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 744 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 745 GET_INST_OPCODE(ip) @ extract opcode from rINST 746 stmia r3, {r0-r1} @ vAA<- r0/r1 747 GOTO_OPCODE(ip) @ jump to next instruction 748 749/* ------------------------------ */ 750 .balign 64 751.L_OP_CONST_WIDE_32: /* 0x17 */ 752/* File: armv5te/OP_CONST_WIDE_32.S */ 753 /* const-wide/32 vAA, #+BBBBbbbb */ 754 FETCH(r0, 1) @ r0<- 0000bbbb (low) 755 mov r3, rINST, lsr #8 @ r3<- AA 756 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 757 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 758 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 759 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 760 mov r1, r0, asr #31 @ r1<- ssssssss 761 GET_INST_OPCODE(ip) @ extract opcode from rINST 762 stmia r3, {r0-r1} @ vAA<- r0/r1 763 GOTO_OPCODE(ip) @ jump to next instruction 764 765/* ------------------------------ */ 766 .balign 64 767.L_OP_CONST_WIDE: /* 0x18 */ 768/* File: armv5te/OP_CONST_WIDE.S */ 769 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 770 FETCH(r0, 1) @ r0<- bbbb (low) 771 FETCH(r1, 2) @ r1<- BBBB (low middle) 772 FETCH(r2, 3) @ r2<- hhhh (high middle) 773 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 774 FETCH(r3, 4) @ r3<- HHHH (high) 775 mov r9, rINST, lsr #8 @ r9<- AA 776 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 777 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 778 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 779 GET_INST_OPCODE(ip) @ extract opcode from rINST 780 stmia r9, {r0-r1} @ vAA<- r0/r1 781 GOTO_OPCODE(ip) @ jump to next instruction 782 783/* ------------------------------ */ 784 .balign 64 785.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 786/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 787 /* const-wide/high16 vAA, #+BBBB000000000000 */ 788 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 789 mov r3, rINST, lsr #8 @ r3<- AA 790 mov r0, #0 @ r0<- 00000000 791 mov r1, r1, lsl #16 @ r1<- BBBB0000 792 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 793 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 794 GET_INST_OPCODE(ip) @ extract opcode from rINST 795 stmia r3, {r0-r1} @ vAA<- r0/r1 796 GOTO_OPCODE(ip) @ jump to next instruction 797 798/* ------------------------------ */ 799 .balign 64 800.L_OP_CONST_STRING: /* 0x1a */ 801/* File: armv5te/OP_CONST_STRING.S */ 802 /* const/string vAA, String@BBBB */ 803 FETCH(r1, 1) @ r1<- BBBB 804 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 805 mov r9, rINST, lsr #8 @ r9<- AA 806 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 807 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 808 cmp r0, #0 @ not yet resolved? 809 beq .LOP_CONST_STRING_resolve 810 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 811 GET_INST_OPCODE(ip) @ extract opcode from rINST 812 SET_VREG(r0, r9) @ vAA<- r0 813 GOTO_OPCODE(ip) @ jump to next instruction 814 815/* ------------------------------ */ 816 .balign 64 817.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 818/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 819 /* const/string vAA, String@BBBBBBBB */ 820 FETCH(r0, 1) @ r0<- bbbb (low) 821 FETCH(r1, 2) @ r1<- BBBB (high) 822 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 823 mov r9, rINST, lsr #8 @ r9<- AA 824 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 825 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 826 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 827 cmp r0, #0 828 beq .LOP_CONST_STRING_JUMBO_resolve 829 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 830 GET_INST_OPCODE(ip) @ extract opcode from rINST 831 SET_VREG(r0, r9) @ vAA<- r0 832 GOTO_OPCODE(ip) @ jump to next instruction 833 834/* ------------------------------ */ 835 .balign 64 836.L_OP_CONST_CLASS: /* 0x1c */ 837/* File: armv5te/OP_CONST_CLASS.S */ 838 /* const/class vAA, Class@BBBB */ 839 FETCH(r1, 1) @ r1<- BBBB 840 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 841 mov r9, rINST, lsr #8 @ r9<- AA 842 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 843 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 844 cmp r0, #0 @ not yet resolved? 845 beq .LOP_CONST_CLASS_resolve 846 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 847 GET_INST_OPCODE(ip) @ extract opcode from rINST 848 SET_VREG(r0, r9) @ vAA<- r0 849 GOTO_OPCODE(ip) @ jump to next instruction 850 851/* ------------------------------ */ 852 .balign 64 853.L_OP_MONITOR_ENTER: /* 0x1d */ 854/* File: armv5te/OP_MONITOR_ENTER.S */ 855 /* 856 * Synchronize on an object. 857 */ 858 /* monitor-enter vAA */ 859 mov r2, rINST, lsr #8 @ r2<- AA 860 GET_VREG(r1, r2) @ r1<- vAA (object) 861 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 862 cmp r1, #0 @ null object? 863 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 864 beq common_errNullObject @ null object, throw an exception 865 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 866 bl dvmLockObject @ call(self, obj) 867#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 868 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 869 ldr r1, [r0, #offThread_exception] @ check for exception 870 cmp r1, #0 871 bne common_exceptionThrown @ exception raised, bail out 872#endif 873 GET_INST_OPCODE(ip) @ extract opcode from rINST 874 GOTO_OPCODE(ip) @ jump to next instruction 875 876/* ------------------------------ */ 877 .balign 64 878.L_OP_MONITOR_EXIT: /* 0x1e */ 879/* File: armv5te/OP_MONITOR_EXIT.S */ 880 /* 881 * Unlock an object. 882 * 883 * Exceptions that occur when unlocking a monitor need to appear as 884 * if they happened at the following instruction. See the Dalvik 885 * instruction spec. 886 */ 887 /* monitor-exit vAA */ 888 mov r2, rINST, lsr #8 @ r2<- AA 889 EXPORT_PC() @ before fetch: export the PC 890 GET_VREG(r1, r2) @ r1<- vAA (object) 891 cmp r1, #0 @ null object? 892 beq 1f @ yes 893 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 894 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 895 cmp r0, #0 @ failed? 896 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 897 beq common_exceptionThrown @ yes, exception is pending 898 GET_INST_OPCODE(ip) @ extract opcode from rINST 899 GOTO_OPCODE(ip) @ jump to next instruction 9001: 901 FETCH_ADVANCE_INST(1) @ advance before throw 902 b common_errNullObject 903 904/* ------------------------------ */ 905 .balign 64 906.L_OP_CHECK_CAST: /* 0x1f */ 907/* File: armv5te/OP_CHECK_CAST.S */ 908 /* 909 * Check to see if a cast from one class to another is allowed. 910 */ 911 /* check-cast vAA, class@BBBB */ 912 mov r3, rINST, lsr #8 @ r3<- AA 913 FETCH(r2, 1) @ r2<- BBBB 914 GET_VREG(r9, r3) @ r9<- object 915 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 916 cmp r9, #0 @ is object null? 917 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 918 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 919 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 920 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 921 cmp r1, #0 @ have we resolved this before? 922 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 923.LOP_CHECK_CAST_resolved: 924 cmp r0, r1 @ same class (trivial success)? 925 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 926.LOP_CHECK_CAST_okay: 927 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 928 GET_INST_OPCODE(ip) @ extract opcode from rINST 929 GOTO_OPCODE(ip) @ jump to next instruction 930 931/* ------------------------------ */ 932 .balign 64 933.L_OP_INSTANCE_OF: /* 0x20 */ 934/* File: armv5te/OP_INSTANCE_OF.S */ 935 /* 936 * Check to see if an object reference is an instance of a class. 937 * 938 * Most common situation is a non-null object, being compared against 939 * an already-resolved class. 940 */ 941 /* instance-of vA, vB, class@CCCC */ 942 mov r3, rINST, lsr #12 @ r3<- B 943 mov r9, rINST, lsr #8 @ r9<- A+ 944 GET_VREG(r0, r3) @ r0<- vB (object) 945 and r9, r9, #15 @ r9<- A 946 cmp r0, #0 @ is object null? 947 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 948 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 949 FETCH(r3, 1) @ r3<- CCCC 950 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 951 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 952 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 953 cmp r1, #0 @ have we resolved this before? 954 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 955.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 956 cmp r0, r1 @ same class (trivial success)? 957 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 958 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 959 960/* ------------------------------ */ 961 .balign 64 962.L_OP_ARRAY_LENGTH: /* 0x21 */ 963/* File: armv6t2/OP_ARRAY_LENGTH.S */ 964 /* 965 * Return the length of an array. 966 */ 967 mov r1, rINST, lsr #12 @ r1<- B 968 ubfx r2, rINST, #8, #4 @ r2<- A 969 GET_VREG(r0, r1) @ r0<- vB (object ref) 970 cmp r0, #0 @ is object null? 971 beq common_errNullObject @ yup, fail 972 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 973 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 974 GET_INST_OPCODE(ip) @ extract opcode from rINST 975 SET_VREG(r3, r2) @ vB<- length 976 GOTO_OPCODE(ip) @ jump to next instruction 977 978/* ------------------------------ */ 979 .balign 64 980.L_OP_NEW_INSTANCE: /* 0x22 */ 981/* File: armv5te/OP_NEW_INSTANCE.S */ 982 /* 983 * Create a new instance of a class. 984 */ 985 /* new-instance vAA, class@BBBB */ 986 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 987 FETCH(r1, 1) @ r1<- BBBB 988 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 989 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 990 EXPORT_PC() @ req'd for init, resolve, alloc 991 cmp r0, #0 @ already resolved? 992 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 993.LOP_NEW_INSTANCE_resolved: @ r0=class 994 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 995 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 996 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 997.LOP_NEW_INSTANCE_initialized: @ r0=class 998 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 999 bl dvmAllocObject @ r0<- new object 1000 b .LOP_NEW_INSTANCE_finish @ continue 1001 1002/* ------------------------------ */ 1003 .balign 64 1004.L_OP_NEW_ARRAY: /* 0x23 */ 1005/* File: armv5te/OP_NEW_ARRAY.S */ 1006 /* 1007 * Allocate an array of objects, specified with the array class 1008 * and a count. 1009 * 1010 * The verifier guarantees that this is an array class, so we don't 1011 * check for it here. 1012 */ 1013 /* new-array vA, vB, class@CCCC */ 1014 mov r0, rINST, lsr #12 @ r0<- B 1015 FETCH(r2, 1) @ r2<- CCCC 1016 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1017 GET_VREG(r1, r0) @ r1<- vB (array length) 1018 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1019 cmp r1, #0 @ check length 1020 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1021 bmi common_errNegativeArraySize @ negative length, bail 1022 cmp r0, #0 @ already resolved? 1023 EXPORT_PC() @ req'd for resolve, alloc 1024 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1025 b .LOP_NEW_ARRAY_resolve @ do resolve now 1026 1027/* ------------------------------ */ 1028 .balign 64 1029.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1030/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1031 /* 1032 * Create a new array with elements filled from registers. 1033 * 1034 * for: filled-new-array, filled-new-array/range 1035 */ 1036 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1037 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1038 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1039 FETCH(r1, 1) @ r1<- BBBB 1040 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1041 EXPORT_PC() @ need for resolve and alloc 1042 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1043 mov r10, rINST, lsr #8 @ r10<- AA or BA 1044 cmp r0, #0 @ already resolved? 1045 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10468: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1047 mov r2, #0 @ r2<- false 1048 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1049 bl dvmResolveClass @ r0<- call(clazz, ref) 1050 cmp r0, #0 @ got null? 1051 beq common_exceptionThrown @ yes, handle exception 1052 b .LOP_FILLED_NEW_ARRAY_continue 1053 1054/* ------------------------------ */ 1055 .balign 64 1056.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1057/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1058/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1059 /* 1060 * Create a new array with elements filled from registers. 1061 * 1062 * for: filled-new-array, filled-new-array/range 1063 */ 1064 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1065 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1066 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1067 FETCH(r1, 1) @ r1<- BBBB 1068 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1069 EXPORT_PC() @ need for resolve and alloc 1070 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1071 mov r10, rINST, lsr #8 @ r10<- AA or BA 1072 cmp r0, #0 @ already resolved? 1073 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10748: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1075 mov r2, #0 @ r2<- false 1076 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1077 bl dvmResolveClass @ r0<- call(clazz, ref) 1078 cmp r0, #0 @ got null? 1079 beq common_exceptionThrown @ yes, handle exception 1080 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1081 1082 1083/* ------------------------------ */ 1084 .balign 64 1085.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1086/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1087 /* fill-array-data vAA, +BBBBBBBB */ 1088 FETCH(r0, 1) @ r0<- bbbb (lo) 1089 FETCH(r1, 2) @ r1<- BBBB (hi) 1090 mov r3, rINST, lsr #8 @ r3<- AA 1091 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1092 GET_VREG(r0, r3) @ r0<- vAA (array object) 1093 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1094 EXPORT_PC(); 1095 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1096 cmp r0, #0 @ 0 means an exception is thrown 1097 beq common_exceptionThrown @ has exception 1098 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1099 GET_INST_OPCODE(ip) @ extract opcode from rINST 1100 GOTO_OPCODE(ip) @ jump to next instruction 1101 1102/* ------------------------------ */ 1103 .balign 64 1104.L_OP_THROW: /* 0x27 */ 1105/* File: armv5te/OP_THROW.S */ 1106 /* 1107 * Throw an exception object in the current thread. 1108 */ 1109 /* throw vAA */ 1110 mov r2, rINST, lsr #8 @ r2<- AA 1111 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1112 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1113 EXPORT_PC() @ exception handler can throw 1114 cmp r1, #0 @ null object? 1115 beq common_errNullObject @ yes, throw an NPE instead 1116 @ bypass dvmSetException, just store it 1117 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1118 b common_exceptionThrown 1119 1120/* ------------------------------ */ 1121 .balign 64 1122.L_OP_GOTO: /* 0x28 */ 1123/* File: armv5te/OP_GOTO.S */ 1124 /* 1125 * Unconditional branch, 8-bit offset. 1126 * 1127 * The branch distance is a signed code-unit offset, which we need to 1128 * double to get a byte offset. 1129 */ 1130 /* goto +AA */ 1131 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1132 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1133 mov r9, r9, lsl #1 @ r9<- byte offset 1134 bmi common_backwardBranch @ backward branch, do periodic checks 1135#if defined(WITH_JIT) 1136 GET_JIT_PROF_TABLE(r0) 1137 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1138 cmp r0,#0 1139 bne common_updateProfile 1140 GET_INST_OPCODE(ip) @ extract opcode from rINST 1141 GOTO_OPCODE(ip) @ jump to next instruction 1142#else 1143 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1144 GET_INST_OPCODE(ip) @ extract opcode from rINST 1145 GOTO_OPCODE(ip) @ jump to next instruction 1146#endif 1147 1148/* ------------------------------ */ 1149 .balign 64 1150.L_OP_GOTO_16: /* 0x29 */ 1151/* File: armv5te/OP_GOTO_16.S */ 1152 /* 1153 * Unconditional branch, 16-bit offset. 1154 * 1155 * The branch distance is a signed code-unit offset, which we need to 1156 * double to get a byte offset. 1157 */ 1158 /* goto/16 +AAAA */ 1159 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1160 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1161 bmi common_backwardBranch @ backward branch, do periodic checks 1162#if defined(WITH_JIT) 1163 GET_JIT_PROF_TABLE(r0) 1164 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1165 cmp r0,#0 1166 bne common_updateProfile 1167 GET_INST_OPCODE(ip) @ extract opcode from rINST 1168 GOTO_OPCODE(ip) @ jump to next instruction 1169#else 1170 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1171 GET_INST_OPCODE(ip) @ extract opcode from rINST 1172 GOTO_OPCODE(ip) @ jump to next instruction 1173#endif 1174 1175/* ------------------------------ */ 1176 .balign 64 1177.L_OP_GOTO_32: /* 0x2a */ 1178/* File: armv5te/OP_GOTO_32.S */ 1179 /* 1180 * Unconditional branch, 32-bit offset. 1181 * 1182 * The branch distance is a signed code-unit offset, which we need to 1183 * double to get a byte offset. 1184 * 1185 * Unlike most opcodes, this one is allowed to branch to itself, so 1186 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1187 * instruction doesn't affect the V flag, so we need to clear it 1188 * explicitly. 1189 */ 1190 /* goto/32 +AAAAAAAA */ 1191 FETCH(r0, 1) @ r0<- aaaa (lo) 1192 FETCH(r1, 2) @ r1<- AAAA (hi) 1193 cmp ip, ip @ (clear V flag during stall) 1194 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1195 mov r9, r0, asl #1 @ r9<- byte offset 1196 ble common_backwardBranch @ backward branch, do periodic checks 1197#if defined(WITH_JIT) 1198 GET_JIT_PROF_TABLE(r0) 1199 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1200 cmp r0,#0 1201 bne common_updateProfile 1202 GET_INST_OPCODE(ip) @ extract opcode from rINST 1203 GOTO_OPCODE(ip) @ jump to next instruction 1204#else 1205 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1206 GET_INST_OPCODE(ip) @ extract opcode from rINST 1207 GOTO_OPCODE(ip) @ jump to next instruction 1208#endif 1209 1210/* ------------------------------ */ 1211 .balign 64 1212.L_OP_PACKED_SWITCH: /* 0x2b */ 1213/* File: armv5te/OP_PACKED_SWITCH.S */ 1214 /* 1215 * Handle a packed-switch or sparse-switch instruction. In both cases 1216 * we decode it and hand it off to a helper function. 1217 * 1218 * We don't really expect backward branches in a switch statement, but 1219 * they're perfectly legal, so we check for them here. 1220 * 1221 * for: packed-switch, sparse-switch 1222 */ 1223 /* op vAA, +BBBB */ 1224 FETCH(r0, 1) @ r0<- bbbb (lo) 1225 FETCH(r1, 2) @ r1<- BBBB (hi) 1226 mov r3, rINST, lsr #8 @ r3<- AA 1227 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1228 GET_VREG(r1, r3) @ r1<- vAA 1229 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1230 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1231 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1232 bmi common_backwardBranch @ backward branch, do periodic checks 1233 beq common_backwardBranch @ (want to use BLE but V is unknown) 1234#if defined(WITH_JIT) 1235 GET_JIT_PROF_TABLE(r0) 1236 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1237 cmp r0,#0 1238 bne common_updateProfile 1239 GET_INST_OPCODE(ip) @ extract opcode from rINST 1240 GOTO_OPCODE(ip) @ jump to next instruction 1241#else 1242 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1243 GET_INST_OPCODE(ip) @ extract opcode from rINST 1244 GOTO_OPCODE(ip) @ jump to next instruction 1245#endif 1246 1247/* ------------------------------ */ 1248 .balign 64 1249.L_OP_SPARSE_SWITCH: /* 0x2c */ 1250/* File: armv5te/OP_SPARSE_SWITCH.S */ 1251/* File: armv5te/OP_PACKED_SWITCH.S */ 1252 /* 1253 * Handle a packed-switch or sparse-switch instruction. In both cases 1254 * we decode it and hand it off to a helper function. 1255 * 1256 * We don't really expect backward branches in a switch statement, but 1257 * they're perfectly legal, so we check for them here. 1258 * 1259 * for: packed-switch, sparse-switch 1260 */ 1261 /* op vAA, +BBBB */ 1262 FETCH(r0, 1) @ r0<- bbbb (lo) 1263 FETCH(r1, 2) @ r1<- BBBB (hi) 1264 mov r3, rINST, lsr #8 @ r3<- AA 1265 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1266 GET_VREG(r1, r3) @ r1<- vAA 1267 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1268 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1269 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1270 bmi common_backwardBranch @ backward branch, do periodic checks 1271 beq common_backwardBranch @ (want to use BLE but V is unknown) 1272#if defined(WITH_JIT) 1273 GET_JIT_PROF_TABLE(r0) 1274 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1275 cmp r0,#0 1276 bne common_updateProfile 1277 GET_INST_OPCODE(ip) @ extract opcode from rINST 1278 GOTO_OPCODE(ip) @ jump to next instruction 1279#else 1280 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1281 GET_INST_OPCODE(ip) @ extract opcode from rINST 1282 GOTO_OPCODE(ip) @ jump to next instruction 1283#endif 1284 1285 1286/* ------------------------------ */ 1287 .balign 64 1288.L_OP_CMPL_FLOAT: /* 0x2d */ 1289/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1290 /* 1291 * Compare two floating-point values. Puts 0, 1, or -1 into the 1292 * destination register based on the results of the comparison. 1293 * 1294 * int compare(x, y) { 1295 * if (x == y) { 1296 * return 0; 1297 * } else if (x > y) { 1298 * return 1; 1299 * } else if (x < y) { 1300 * return -1; 1301 * } else { 1302 * return -1; 1303 * } 1304 * } 1305 */ 1306 /* op vAA, vBB, vCC */ 1307 FETCH(r0, 1) @ r0<- CCBB 1308 mov r9, rINST, lsr #8 @ r9<- AA 1309 and r2, r0, #255 @ r2<- BB 1310 mov r3, r0, lsr #8 @ r3<- CC 1311 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1312 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1313 flds s0, [r2] @ s0<- vBB 1314 flds s1, [r3] @ s1<- vCC 1315 fcmpes s0, s1 @ compare (vBB, vCC) 1316 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1317 mvn r0, #0 @ r0<- -1 (default) 1318 GET_INST_OPCODE(ip) @ extract opcode from rINST 1319 fmstat @ export status flags 1320 movgt r0, #1 @ (greater than) r1<- 1 1321 moveq r0, #0 @ (equal) r1<- 0 1322 b .LOP_CMPL_FLOAT_finish @ argh 1323 1324 1325/* ------------------------------ */ 1326 .balign 64 1327.L_OP_CMPG_FLOAT: /* 0x2e */ 1328/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1329 /* 1330 * Compare two floating-point values. Puts 0, 1, or -1 into the 1331 * destination register based on the results of the comparison. 1332 * 1333 * int compare(x, y) { 1334 * if (x == y) { 1335 * return 0; 1336 * } else if (x < y) { 1337 * return -1; 1338 * } else if (x > y) { 1339 * return 1; 1340 * } else { 1341 * return 1; 1342 * } 1343 * } 1344 */ 1345 /* op vAA, vBB, vCC */ 1346 FETCH(r0, 1) @ r0<- CCBB 1347 mov r9, rINST, lsr #8 @ r9<- AA 1348 and r2, r0, #255 @ r2<- BB 1349 mov r3, r0, lsr #8 @ r3<- CC 1350 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1351 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1352 flds s0, [r2] @ s0<- vBB 1353 flds s1, [r3] @ s1<- vCC 1354 fcmpes s0, s1 @ compare (vBB, vCC) 1355 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1356 mov r0, #1 @ r0<- 1 (default) 1357 GET_INST_OPCODE(ip) @ extract opcode from rINST 1358 fmstat @ export status flags 1359 mvnmi r0, #0 @ (less than) r1<- -1 1360 moveq r0, #0 @ (equal) r1<- 0 1361 b .LOP_CMPG_FLOAT_finish @ argh 1362 1363 1364/* ------------------------------ */ 1365 .balign 64 1366.L_OP_CMPL_DOUBLE: /* 0x2f */ 1367/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1368 /* 1369 * Compare two floating-point values. Puts 0, 1, or -1 into the 1370 * destination register based on the results of the comparison. 1371 * 1372 * int compare(x, y) { 1373 * if (x == y) { 1374 * return 0; 1375 * } else if (x > y) { 1376 * return 1; 1377 * } else if (x < y) { 1378 * return -1; 1379 * } else { 1380 * return -1; 1381 * } 1382 * } 1383 */ 1384 /* op vAA, vBB, vCC */ 1385 FETCH(r0, 1) @ r0<- CCBB 1386 mov r9, rINST, lsr #8 @ r9<- AA 1387 and r2, r0, #255 @ r2<- BB 1388 mov r3, r0, lsr #8 @ r3<- CC 1389 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1390 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1391 fldd d0, [r2] @ d0<- vBB 1392 fldd d1, [r3] @ d1<- vCC 1393 fcmped d0, d1 @ compare (vBB, vCC) 1394 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1395 mvn r0, #0 @ r0<- -1 (default) 1396 GET_INST_OPCODE(ip) @ extract opcode from rINST 1397 fmstat @ export status flags 1398 movgt r0, #1 @ (greater than) r1<- 1 1399 moveq r0, #0 @ (equal) r1<- 0 1400 b .LOP_CMPL_DOUBLE_finish @ argh 1401 1402 1403/* ------------------------------ */ 1404 .balign 64 1405.L_OP_CMPG_DOUBLE: /* 0x30 */ 1406/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1407 /* 1408 * Compare two floating-point values. Puts 0, 1, or -1 into the 1409 * destination register based on the results of the comparison. 1410 * 1411 * int compare(x, y) { 1412 * if (x == y) { 1413 * return 0; 1414 * } else if (x < y) { 1415 * return -1; 1416 * } else if (x > y) { 1417 * return 1; 1418 * } else { 1419 * return 1; 1420 * } 1421 * } 1422 */ 1423 /* op vAA, vBB, vCC */ 1424 FETCH(r0, 1) @ r0<- CCBB 1425 mov r9, rINST, lsr #8 @ r9<- AA 1426 and r2, r0, #255 @ r2<- BB 1427 mov r3, r0, lsr #8 @ r3<- CC 1428 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1429 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1430 fldd d0, [r2] @ d0<- vBB 1431 fldd d1, [r3] @ d1<- vCC 1432 fcmped d0, d1 @ compare (vBB, vCC) 1433 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1434 mov r0, #1 @ r0<- 1 (default) 1435 GET_INST_OPCODE(ip) @ extract opcode from rINST 1436 fmstat @ export status flags 1437 mvnmi r0, #0 @ (less than) r1<- -1 1438 moveq r0, #0 @ (equal) r1<- 0 1439 b .LOP_CMPG_DOUBLE_finish @ argh 1440 1441 1442/* ------------------------------ */ 1443 .balign 64 1444.L_OP_CMP_LONG: /* 0x31 */ 1445/* File: armv5te/OP_CMP_LONG.S */ 1446 /* 1447 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1448 * register based on the results of the comparison. 1449 * 1450 * We load the full values with LDM, but in practice many values could 1451 * be resolved by only looking at the high word. This could be made 1452 * faster or slower by splitting the LDM into a pair of LDRs. 1453 * 1454 * If we just wanted to set condition flags, we could do this: 1455 * subs ip, r0, r2 1456 * sbcs ip, r1, r3 1457 * subeqs ip, r0, r2 1458 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1459 * integer value, which we can do with 2 conditional mov/mvn instructions 1460 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1461 * us a constant 5-cycle path plus a branch at the end to the 1462 * instruction epilogue code. The multi-compare approach below needs 1463 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1464 * in the worst case (the 64-bit values are equal). 1465 */ 1466 /* cmp-long vAA, vBB, vCC */ 1467 FETCH(r0, 1) @ r0<- CCBB 1468 mov r9, rINST, lsr #8 @ r9<- AA 1469 and r2, r0, #255 @ r2<- BB 1470 mov r3, r0, lsr #8 @ r3<- CC 1471 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1472 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1473 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1474 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1475 cmp r1, r3 @ compare (vBB+1, vCC+1) 1476 blt .LOP_CMP_LONG_less @ signed compare on high part 1477 bgt .LOP_CMP_LONG_greater 1478 subs r1, r0, r2 @ r1<- r0 - r2 1479 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1480 bne .LOP_CMP_LONG_less 1481 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1482 1483/* ------------------------------ */ 1484 .balign 64 1485.L_OP_IF_EQ: /* 0x32 */ 1486/* File: armv6t2/OP_IF_EQ.S */ 1487/* File: armv6t2/bincmp.S */ 1488 /* 1489 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1490 * fragment that specifies the *reverse* comparison to perform, e.g. 1491 * for "if-le" you would use "gt". 1492 * 1493 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1494 */ 1495 /* if-cmp vA, vB, +CCCC */ 1496 mov r1, rINST, lsr #12 @ r1<- B 1497 ubfx r0, rINST, #8, #4 @ r0<- A 1498 GET_VREG(r3, r1) @ r3<- vB 1499 GET_VREG(r2, r0) @ r2<- vA 1500 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1501 cmp r2, r3 @ compare (vA, vB) 1502 bne 1f @ branch to 1 if comparison failed 1503 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1504 movs r9, r9, asl #1 @ convert to bytes, check sign 1505 bmi common_backwardBranch @ yes, do periodic checks 15061: 1507#if defined(WITH_JIT) 1508 GET_JIT_PROF_TABLE(r0) 1509 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1510 b common_testUpdateProfile 1511#else 1512 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1513 GET_INST_OPCODE(ip) @ extract opcode from rINST 1514 GOTO_OPCODE(ip) @ jump to next instruction 1515#endif 1516 1517 1518/* ------------------------------ */ 1519 .balign 64 1520.L_OP_IF_NE: /* 0x33 */ 1521/* File: armv6t2/OP_IF_NE.S */ 1522/* File: armv6t2/bincmp.S */ 1523 /* 1524 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1525 * fragment that specifies the *reverse* comparison to perform, e.g. 1526 * for "if-le" you would use "gt". 1527 * 1528 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1529 */ 1530 /* if-cmp vA, vB, +CCCC */ 1531 mov r1, rINST, lsr #12 @ r1<- B 1532 ubfx r0, rINST, #8, #4 @ r0<- A 1533 GET_VREG(r3, r1) @ r3<- vB 1534 GET_VREG(r2, r0) @ r2<- vA 1535 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1536 cmp r2, r3 @ compare (vA, vB) 1537 beq 1f @ branch to 1 if comparison failed 1538 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1539 movs r9, r9, asl #1 @ convert to bytes, check sign 1540 bmi common_backwardBranch @ yes, do periodic checks 15411: 1542#if defined(WITH_JIT) 1543 GET_JIT_PROF_TABLE(r0) 1544 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1545 b common_testUpdateProfile 1546#else 1547 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1548 GET_INST_OPCODE(ip) @ extract opcode from rINST 1549 GOTO_OPCODE(ip) @ jump to next instruction 1550#endif 1551 1552 1553/* ------------------------------ */ 1554 .balign 64 1555.L_OP_IF_LT: /* 0x34 */ 1556/* File: armv6t2/OP_IF_LT.S */ 1557/* File: armv6t2/bincmp.S */ 1558 /* 1559 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1560 * fragment that specifies the *reverse* comparison to perform, e.g. 1561 * for "if-le" you would use "gt". 1562 * 1563 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1564 */ 1565 /* if-cmp vA, vB, +CCCC */ 1566 mov r1, rINST, lsr #12 @ r1<- B 1567 ubfx r0, rINST, #8, #4 @ r0<- A 1568 GET_VREG(r3, r1) @ r3<- vB 1569 GET_VREG(r2, r0) @ r2<- vA 1570 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1571 cmp r2, r3 @ compare (vA, vB) 1572 bge 1f @ branch to 1 if comparison failed 1573 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1574 movs r9, r9, asl #1 @ convert to bytes, check sign 1575 bmi common_backwardBranch @ yes, do periodic checks 15761: 1577#if defined(WITH_JIT) 1578 GET_JIT_PROF_TABLE(r0) 1579 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1580 b common_testUpdateProfile 1581#else 1582 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1583 GET_INST_OPCODE(ip) @ extract opcode from rINST 1584 GOTO_OPCODE(ip) @ jump to next instruction 1585#endif 1586 1587 1588/* ------------------------------ */ 1589 .balign 64 1590.L_OP_IF_GE: /* 0x35 */ 1591/* File: armv6t2/OP_IF_GE.S */ 1592/* File: armv6t2/bincmp.S */ 1593 /* 1594 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1595 * fragment that specifies the *reverse* comparison to perform, e.g. 1596 * for "if-le" you would use "gt". 1597 * 1598 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1599 */ 1600 /* if-cmp vA, vB, +CCCC */ 1601 mov r1, rINST, lsr #12 @ r1<- B 1602 ubfx r0, rINST, #8, #4 @ r0<- A 1603 GET_VREG(r3, r1) @ r3<- vB 1604 GET_VREG(r2, r0) @ r2<- vA 1605 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1606 cmp r2, r3 @ compare (vA, vB) 1607 blt 1f @ branch to 1 if comparison failed 1608 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1609 movs r9, r9, asl #1 @ convert to bytes, check sign 1610 bmi common_backwardBranch @ yes, do periodic checks 16111: 1612#if defined(WITH_JIT) 1613 GET_JIT_PROF_TABLE(r0) 1614 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1615 b common_testUpdateProfile 1616#else 1617 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1618 GET_INST_OPCODE(ip) @ extract opcode from rINST 1619 GOTO_OPCODE(ip) @ jump to next instruction 1620#endif 1621 1622 1623/* ------------------------------ */ 1624 .balign 64 1625.L_OP_IF_GT: /* 0x36 */ 1626/* File: armv6t2/OP_IF_GT.S */ 1627/* File: armv6t2/bincmp.S */ 1628 /* 1629 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1630 * fragment that specifies the *reverse* comparison to perform, e.g. 1631 * for "if-le" you would use "gt". 1632 * 1633 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1634 */ 1635 /* if-cmp vA, vB, +CCCC */ 1636 mov r1, rINST, lsr #12 @ r1<- B 1637 ubfx r0, rINST, #8, #4 @ r0<- A 1638 GET_VREG(r3, r1) @ r3<- vB 1639 GET_VREG(r2, r0) @ r2<- vA 1640 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1641 cmp r2, r3 @ compare (vA, vB) 1642 ble 1f @ branch to 1 if comparison failed 1643 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1644 movs r9, r9, asl #1 @ convert to bytes, check sign 1645 bmi common_backwardBranch @ yes, do periodic checks 16461: 1647#if defined(WITH_JIT) 1648 GET_JIT_PROF_TABLE(r0) 1649 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1650 b common_testUpdateProfile 1651#else 1652 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1653 GET_INST_OPCODE(ip) @ extract opcode from rINST 1654 GOTO_OPCODE(ip) @ jump to next instruction 1655#endif 1656 1657 1658/* ------------------------------ */ 1659 .balign 64 1660.L_OP_IF_LE: /* 0x37 */ 1661/* File: armv6t2/OP_IF_LE.S */ 1662/* File: armv6t2/bincmp.S */ 1663 /* 1664 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1665 * fragment that specifies the *reverse* comparison to perform, e.g. 1666 * for "if-le" you would use "gt". 1667 * 1668 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1669 */ 1670 /* if-cmp vA, vB, +CCCC */ 1671 mov r1, rINST, lsr #12 @ r1<- B 1672 ubfx r0, rINST, #8, #4 @ r0<- A 1673 GET_VREG(r3, r1) @ r3<- vB 1674 GET_VREG(r2, r0) @ r2<- vA 1675 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1676 cmp r2, r3 @ compare (vA, vB) 1677 bgt 1f @ branch to 1 if comparison failed 1678 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1679 movs r9, r9, asl #1 @ convert to bytes, check sign 1680 bmi common_backwardBranch @ yes, do periodic checks 16811: 1682#if defined(WITH_JIT) 1683 GET_JIT_PROF_TABLE(r0) 1684 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1685 b common_testUpdateProfile 1686#else 1687 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1688 GET_INST_OPCODE(ip) @ extract opcode from rINST 1689 GOTO_OPCODE(ip) @ jump to next instruction 1690#endif 1691 1692 1693/* ------------------------------ */ 1694 .balign 64 1695.L_OP_IF_EQZ: /* 0x38 */ 1696/* File: armv5te/OP_IF_EQZ.S */ 1697/* File: armv5te/zcmp.S */ 1698 /* 1699 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1700 * fragment that specifies the *reverse* comparison to perform, e.g. 1701 * for "if-le" you would use "gt". 1702 * 1703 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1704 */ 1705 /* if-cmp vAA, +BBBB */ 1706 mov r0, rINST, lsr #8 @ r0<- AA 1707 GET_VREG(r2, r0) @ r2<- vAA 1708 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1709 cmp r2, #0 @ compare (vA, 0) 1710 bne 1f @ branch to 1 if comparison failed 1711 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1712 movs r9, r9, asl #1 @ convert to bytes, check sign 1713 bmi common_backwardBranch @ backward branch, do periodic checks 17141: 1715#if defined(WITH_JIT) 1716 GET_JIT_PROF_TABLE(r0) 1717 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1718 cmp r0,#0 1719 bne common_updateProfile 1720 GET_INST_OPCODE(ip) @ extract opcode from rINST 1721 GOTO_OPCODE(ip) @ jump to next instruction 1722#else 1723 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1724 GET_INST_OPCODE(ip) @ extract opcode from rINST 1725 GOTO_OPCODE(ip) @ jump to next instruction 1726#endif 1727 1728 1729/* ------------------------------ */ 1730 .balign 64 1731.L_OP_IF_NEZ: /* 0x39 */ 1732/* File: armv5te/OP_IF_NEZ.S */ 1733/* File: armv5te/zcmp.S */ 1734 /* 1735 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1736 * fragment that specifies the *reverse* comparison to perform, e.g. 1737 * for "if-le" you would use "gt". 1738 * 1739 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1740 */ 1741 /* if-cmp vAA, +BBBB */ 1742 mov r0, rINST, lsr #8 @ r0<- AA 1743 GET_VREG(r2, r0) @ r2<- vAA 1744 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1745 cmp r2, #0 @ compare (vA, 0) 1746 beq 1f @ branch to 1 if comparison failed 1747 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1748 movs r9, r9, asl #1 @ convert to bytes, check sign 1749 bmi common_backwardBranch @ backward branch, do periodic checks 17501: 1751#if defined(WITH_JIT) 1752 GET_JIT_PROF_TABLE(r0) 1753 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1754 cmp r0,#0 1755 bne common_updateProfile 1756 GET_INST_OPCODE(ip) @ extract opcode from rINST 1757 GOTO_OPCODE(ip) @ jump to next instruction 1758#else 1759 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1760 GET_INST_OPCODE(ip) @ extract opcode from rINST 1761 GOTO_OPCODE(ip) @ jump to next instruction 1762#endif 1763 1764 1765/* ------------------------------ */ 1766 .balign 64 1767.L_OP_IF_LTZ: /* 0x3a */ 1768/* File: armv5te/OP_IF_LTZ.S */ 1769/* File: armv5te/zcmp.S */ 1770 /* 1771 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1772 * fragment that specifies the *reverse* comparison to perform, e.g. 1773 * for "if-le" you would use "gt". 1774 * 1775 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1776 */ 1777 /* if-cmp vAA, +BBBB */ 1778 mov r0, rINST, lsr #8 @ r0<- AA 1779 GET_VREG(r2, r0) @ r2<- vAA 1780 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1781 cmp r2, #0 @ compare (vA, 0) 1782 bge 1f @ branch to 1 if comparison failed 1783 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1784 movs r9, r9, asl #1 @ convert to bytes, check sign 1785 bmi common_backwardBranch @ backward branch, do periodic checks 17861: 1787#if defined(WITH_JIT) 1788 GET_JIT_PROF_TABLE(r0) 1789 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1790 cmp r0,#0 1791 bne common_updateProfile 1792 GET_INST_OPCODE(ip) @ extract opcode from rINST 1793 GOTO_OPCODE(ip) @ jump to next instruction 1794#else 1795 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1796 GET_INST_OPCODE(ip) @ extract opcode from rINST 1797 GOTO_OPCODE(ip) @ jump to next instruction 1798#endif 1799 1800 1801/* ------------------------------ */ 1802 .balign 64 1803.L_OP_IF_GEZ: /* 0x3b */ 1804/* File: armv5te/OP_IF_GEZ.S */ 1805/* File: armv5te/zcmp.S */ 1806 /* 1807 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1808 * fragment that specifies the *reverse* comparison to perform, e.g. 1809 * for "if-le" you would use "gt". 1810 * 1811 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1812 */ 1813 /* if-cmp vAA, +BBBB */ 1814 mov r0, rINST, lsr #8 @ r0<- AA 1815 GET_VREG(r2, r0) @ r2<- vAA 1816 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1817 cmp r2, #0 @ compare (vA, 0) 1818 blt 1f @ branch to 1 if comparison failed 1819 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1820 movs r9, r9, asl #1 @ convert to bytes, check sign 1821 bmi common_backwardBranch @ backward branch, do periodic checks 18221: 1823#if defined(WITH_JIT) 1824 GET_JIT_PROF_TABLE(r0) 1825 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1826 cmp r0,#0 1827 bne common_updateProfile 1828 GET_INST_OPCODE(ip) @ extract opcode from rINST 1829 GOTO_OPCODE(ip) @ jump to next instruction 1830#else 1831 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1832 GET_INST_OPCODE(ip) @ extract opcode from rINST 1833 GOTO_OPCODE(ip) @ jump to next instruction 1834#endif 1835 1836 1837/* ------------------------------ */ 1838 .balign 64 1839.L_OP_IF_GTZ: /* 0x3c */ 1840/* File: armv5te/OP_IF_GTZ.S */ 1841/* File: armv5te/zcmp.S */ 1842 /* 1843 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1844 * fragment that specifies the *reverse* comparison to perform, e.g. 1845 * for "if-le" you would use "gt". 1846 * 1847 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1848 */ 1849 /* if-cmp vAA, +BBBB */ 1850 mov r0, rINST, lsr #8 @ r0<- AA 1851 GET_VREG(r2, r0) @ r2<- vAA 1852 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1853 cmp r2, #0 @ compare (vA, 0) 1854 ble 1f @ branch to 1 if comparison failed 1855 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1856 movs r9, r9, asl #1 @ convert to bytes, check sign 1857 bmi common_backwardBranch @ backward branch, do periodic checks 18581: 1859#if defined(WITH_JIT) 1860 GET_JIT_PROF_TABLE(r0) 1861 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1862 cmp r0,#0 1863 bne common_updateProfile 1864 GET_INST_OPCODE(ip) @ extract opcode from rINST 1865 GOTO_OPCODE(ip) @ jump to next instruction 1866#else 1867 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1868 GET_INST_OPCODE(ip) @ extract opcode from rINST 1869 GOTO_OPCODE(ip) @ jump to next instruction 1870#endif 1871 1872 1873/* ------------------------------ */ 1874 .balign 64 1875.L_OP_IF_LEZ: /* 0x3d */ 1876/* File: armv5te/OP_IF_LEZ.S */ 1877/* File: armv5te/zcmp.S */ 1878 /* 1879 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1880 * fragment that specifies the *reverse* comparison to perform, e.g. 1881 * for "if-le" you would use "gt". 1882 * 1883 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1884 */ 1885 /* if-cmp vAA, +BBBB */ 1886 mov r0, rINST, lsr #8 @ r0<- AA 1887 GET_VREG(r2, r0) @ r2<- vAA 1888 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1889 cmp r2, #0 @ compare (vA, 0) 1890 bgt 1f @ branch to 1 if comparison failed 1891 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1892 movs r9, r9, asl #1 @ convert to bytes, check sign 1893 bmi common_backwardBranch @ backward branch, do periodic checks 18941: 1895#if defined(WITH_JIT) 1896 GET_JIT_PROF_TABLE(r0) 1897 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1898 cmp r0,#0 1899 bne common_updateProfile 1900 GET_INST_OPCODE(ip) @ extract opcode from rINST 1901 GOTO_OPCODE(ip) @ jump to next instruction 1902#else 1903 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1904 GET_INST_OPCODE(ip) @ extract opcode from rINST 1905 GOTO_OPCODE(ip) @ jump to next instruction 1906#endif 1907 1908 1909/* ------------------------------ */ 1910 .balign 64 1911.L_OP_UNUSED_3E: /* 0x3e */ 1912/* File: armv5te/OP_UNUSED_3E.S */ 1913/* File: armv5te/unused.S */ 1914 bl common_abort 1915 1916 1917/* ------------------------------ */ 1918 .balign 64 1919.L_OP_UNUSED_3F: /* 0x3f */ 1920/* File: armv5te/OP_UNUSED_3F.S */ 1921/* File: armv5te/unused.S */ 1922 bl common_abort 1923 1924 1925/* ------------------------------ */ 1926 .balign 64 1927.L_OP_UNUSED_40: /* 0x40 */ 1928/* File: armv5te/OP_UNUSED_40.S */ 1929/* File: armv5te/unused.S */ 1930 bl common_abort 1931 1932 1933/* ------------------------------ */ 1934 .balign 64 1935.L_OP_UNUSED_41: /* 0x41 */ 1936/* File: armv5te/OP_UNUSED_41.S */ 1937/* File: armv5te/unused.S */ 1938 bl common_abort 1939 1940 1941/* ------------------------------ */ 1942 .balign 64 1943.L_OP_UNUSED_42: /* 0x42 */ 1944/* File: armv5te/OP_UNUSED_42.S */ 1945/* File: armv5te/unused.S */ 1946 bl common_abort 1947 1948 1949/* ------------------------------ */ 1950 .balign 64 1951.L_OP_UNUSED_43: /* 0x43 */ 1952/* File: armv5te/OP_UNUSED_43.S */ 1953/* File: armv5te/unused.S */ 1954 bl common_abort 1955 1956 1957/* ------------------------------ */ 1958 .balign 64 1959.L_OP_AGET: /* 0x44 */ 1960/* File: armv5te/OP_AGET.S */ 1961 /* 1962 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1963 * 1964 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1965 * instructions. We use a pair of FETCH_Bs instead. 1966 * 1967 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 1968 */ 1969 /* op vAA, vBB, vCC */ 1970 FETCH_B(r2, 1, 0) @ r2<- BB 1971 mov r9, rINST, lsr #8 @ r9<- AA 1972 FETCH_B(r3, 1, 1) @ r3<- CC 1973 GET_VREG(r0, r2) @ r0<- vBB (array object) 1974 GET_VREG(r1, r3) @ r1<- vCC (requested index) 1975 cmp r0, #0 @ null array object? 1976 beq common_errNullObject @ yes, bail 1977 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 1978 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 1979 cmp r1, r3 @ compare unsigned index, length 1980 bcs common_errArrayIndex @ index >= length, bail 1981 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1982 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 1983 GET_INST_OPCODE(ip) @ extract opcode from rINST 1984 SET_VREG(r2, r9) @ vAA<- r2 1985 GOTO_OPCODE(ip) @ jump to next instruction 1986 1987/* ------------------------------ */ 1988 .balign 64 1989.L_OP_AGET_WIDE: /* 0x45 */ 1990/* File: armv5te/OP_AGET_WIDE.S */ 1991 /* 1992 * Array get, 64 bits. vAA <- vBB[vCC]. 1993 * 1994 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 1995 */ 1996 /* aget-wide vAA, vBB, vCC */ 1997 FETCH(r0, 1) @ r0<- CCBB 1998 mov r9, rINST, lsr #8 @ r9<- AA 1999 and r2, r0, #255 @ r2<- BB 2000 mov r3, r0, lsr #8 @ r3<- CC 2001 GET_VREG(r0, r2) @ r0<- vBB (array object) 2002 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2003 cmp r0, #0 @ null array object? 2004 beq common_errNullObject @ yes, bail 2005 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2006 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2007 cmp r1, r3 @ compare unsigned index, length 2008 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2009 b common_errArrayIndex @ index >= length, bail 2010 @ May want to swap the order of these two branches depending on how the 2011 @ branch prediction (if any) handles conditional forward branches vs. 2012 @ unconditional forward branches. 2013 2014/* ------------------------------ */ 2015 .balign 64 2016.L_OP_AGET_OBJECT: /* 0x46 */ 2017/* File: armv5te/OP_AGET_OBJECT.S */ 2018/* File: armv5te/OP_AGET.S */ 2019 /* 2020 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2021 * 2022 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2023 * instructions. We use a pair of FETCH_Bs instead. 2024 * 2025 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2026 */ 2027 /* op vAA, vBB, vCC */ 2028 FETCH_B(r2, 1, 0) @ r2<- BB 2029 mov r9, rINST, lsr #8 @ r9<- AA 2030 FETCH_B(r3, 1, 1) @ r3<- CC 2031 GET_VREG(r0, r2) @ r0<- vBB (array object) 2032 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2033 cmp r0, #0 @ null array object? 2034 beq common_errNullObject @ yes, bail 2035 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2036 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2037 cmp r1, r3 @ compare unsigned index, length 2038 bcs common_errArrayIndex @ index >= length, bail 2039 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2040 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2041 GET_INST_OPCODE(ip) @ extract opcode from rINST 2042 SET_VREG(r2, r9) @ vAA<- r2 2043 GOTO_OPCODE(ip) @ jump to next instruction 2044 2045 2046/* ------------------------------ */ 2047 .balign 64 2048.L_OP_AGET_BOOLEAN: /* 0x47 */ 2049/* File: armv5te/OP_AGET_BOOLEAN.S */ 2050/* File: armv5te/OP_AGET.S */ 2051 /* 2052 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2053 * 2054 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2055 * instructions. We use a pair of FETCH_Bs instead. 2056 * 2057 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2058 */ 2059 /* op vAA, vBB, vCC */ 2060 FETCH_B(r2, 1, 0) @ r2<- BB 2061 mov r9, rINST, lsr #8 @ r9<- AA 2062 FETCH_B(r3, 1, 1) @ r3<- CC 2063 GET_VREG(r0, r2) @ r0<- vBB (array object) 2064 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2065 cmp r0, #0 @ null array object? 2066 beq common_errNullObject @ yes, bail 2067 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2068 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2069 cmp r1, r3 @ compare unsigned index, length 2070 bcs common_errArrayIndex @ index >= length, bail 2071 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2072 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2073 GET_INST_OPCODE(ip) @ extract opcode from rINST 2074 SET_VREG(r2, r9) @ vAA<- r2 2075 GOTO_OPCODE(ip) @ jump to next instruction 2076 2077 2078/* ------------------------------ */ 2079 .balign 64 2080.L_OP_AGET_BYTE: /* 0x48 */ 2081/* File: armv5te/OP_AGET_BYTE.S */ 2082/* File: armv5te/OP_AGET.S */ 2083 /* 2084 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2085 * 2086 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2087 * instructions. We use a pair of FETCH_Bs instead. 2088 * 2089 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2090 */ 2091 /* op vAA, vBB, vCC */ 2092 FETCH_B(r2, 1, 0) @ r2<- BB 2093 mov r9, rINST, lsr #8 @ r9<- AA 2094 FETCH_B(r3, 1, 1) @ r3<- CC 2095 GET_VREG(r0, r2) @ r0<- vBB (array object) 2096 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2097 cmp r0, #0 @ null array object? 2098 beq common_errNullObject @ yes, bail 2099 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2100 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2101 cmp r1, r3 @ compare unsigned index, length 2102 bcs common_errArrayIndex @ index >= length, bail 2103 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2104 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2105 GET_INST_OPCODE(ip) @ extract opcode from rINST 2106 SET_VREG(r2, r9) @ vAA<- r2 2107 GOTO_OPCODE(ip) @ jump to next instruction 2108 2109 2110/* ------------------------------ */ 2111 .balign 64 2112.L_OP_AGET_CHAR: /* 0x49 */ 2113/* File: armv5te/OP_AGET_CHAR.S */ 2114/* File: armv5te/OP_AGET.S */ 2115 /* 2116 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2117 * 2118 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2119 * instructions. We use a pair of FETCH_Bs instead. 2120 * 2121 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2122 */ 2123 /* op vAA, vBB, vCC */ 2124 FETCH_B(r2, 1, 0) @ r2<- BB 2125 mov r9, rINST, lsr #8 @ r9<- AA 2126 FETCH_B(r3, 1, 1) @ r3<- CC 2127 GET_VREG(r0, r2) @ r0<- vBB (array object) 2128 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2129 cmp r0, #0 @ null array object? 2130 beq common_errNullObject @ yes, bail 2131 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2132 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2133 cmp r1, r3 @ compare unsigned index, length 2134 bcs common_errArrayIndex @ index >= length, bail 2135 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2136 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2137 GET_INST_OPCODE(ip) @ extract opcode from rINST 2138 SET_VREG(r2, r9) @ vAA<- r2 2139 GOTO_OPCODE(ip) @ jump to next instruction 2140 2141 2142/* ------------------------------ */ 2143 .balign 64 2144.L_OP_AGET_SHORT: /* 0x4a */ 2145/* File: armv5te/OP_AGET_SHORT.S */ 2146/* File: armv5te/OP_AGET.S */ 2147 /* 2148 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2149 * 2150 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2151 * instructions. We use a pair of FETCH_Bs instead. 2152 * 2153 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2154 */ 2155 /* op vAA, vBB, vCC */ 2156 FETCH_B(r2, 1, 0) @ r2<- BB 2157 mov r9, rINST, lsr #8 @ r9<- AA 2158 FETCH_B(r3, 1, 1) @ r3<- CC 2159 GET_VREG(r0, r2) @ r0<- vBB (array object) 2160 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2161 cmp r0, #0 @ null array object? 2162 beq common_errNullObject @ yes, bail 2163 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2164 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2165 cmp r1, r3 @ compare unsigned index, length 2166 bcs common_errArrayIndex @ index >= length, bail 2167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2168 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2169 GET_INST_OPCODE(ip) @ extract opcode from rINST 2170 SET_VREG(r2, r9) @ vAA<- r2 2171 GOTO_OPCODE(ip) @ jump to next instruction 2172 2173 2174/* ------------------------------ */ 2175 .balign 64 2176.L_OP_APUT: /* 0x4b */ 2177/* File: armv5te/OP_APUT.S */ 2178 /* 2179 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2180 * 2181 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2182 * instructions. We use a pair of FETCH_Bs instead. 2183 * 2184 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2185 */ 2186 /* op vAA, vBB, vCC */ 2187 FETCH_B(r2, 1, 0) @ r2<- BB 2188 mov r9, rINST, lsr #8 @ r9<- AA 2189 FETCH_B(r3, 1, 1) @ r3<- CC 2190 GET_VREG(r0, r2) @ r0<- vBB (array object) 2191 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2192 cmp r0, #0 @ null array object? 2193 beq common_errNullObject @ yes, bail 2194 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2195 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2196 cmp r1, r3 @ compare unsigned index, length 2197 bcs common_errArrayIndex @ index >= length, bail 2198 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2199 GET_VREG(r2, r9) @ r2<- vAA 2200 GET_INST_OPCODE(ip) @ extract opcode from rINST 2201 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2202 GOTO_OPCODE(ip) @ jump to next instruction 2203 2204/* ------------------------------ */ 2205 .balign 64 2206.L_OP_APUT_WIDE: /* 0x4c */ 2207/* File: armv5te/OP_APUT_WIDE.S */ 2208 /* 2209 * Array put, 64 bits. vBB[vCC] <- vAA. 2210 * 2211 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2212 */ 2213 /* aput-wide vAA, vBB, vCC */ 2214 FETCH(r0, 1) @ r0<- CCBB 2215 mov r9, rINST, lsr #8 @ r9<- AA 2216 and r2, r0, #255 @ r2<- BB 2217 mov r3, r0, lsr #8 @ r3<- CC 2218 GET_VREG(r0, r2) @ r0<- vBB (array object) 2219 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2220 cmp r0, #0 @ null array object? 2221 beq common_errNullObject @ yes, bail 2222 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2223 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2224 cmp r1, r3 @ compare unsigned index, length 2225 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2226 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2227 b common_errArrayIndex @ index >= length, bail 2228 @ May want to swap the order of these two branches depending on how the 2229 @ branch prediction (if any) handles conditional forward branches vs. 2230 @ unconditional forward branches. 2231 2232/* ------------------------------ */ 2233 .balign 64 2234.L_OP_APUT_OBJECT: /* 0x4d */ 2235/* File: armv5te/OP_APUT_OBJECT.S */ 2236 /* 2237 * Store an object into an array. vBB[vCC] <- vAA. 2238 */ 2239 /* op vAA, vBB, vCC */ 2240 FETCH(r0, 1) @ r0<- CCBB 2241 mov r9, rINST, lsr #8 @ r9<- AA 2242 and r2, r0, #255 @ r2<- BB 2243 mov r3, r0, lsr #8 @ r3<- CC 2244 GET_VREG(rINST, r2) @ rINST<- vBB (array object) 2245 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2246 cmp rINST, #0 @ null array object? 2247 GET_VREG(r9, r9) @ r9<- vAA 2248 beq common_errNullObject @ yes, bail 2249 ldr r3, [rINST, #offArrayObject_length] @ r3<- arrayObj->length 2250 add r10, rINST, r1, lsl #2 @ r10<- arrayObj + index*width 2251 cmp r1, r3 @ compare unsigned index, length 2252 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2253 b common_errArrayIndex @ index >= length, bail 2254 2255 2256/* ------------------------------ */ 2257 .balign 64 2258.L_OP_APUT_BOOLEAN: /* 0x4e */ 2259/* File: armv5te/OP_APUT_BOOLEAN.S */ 2260/* File: armv5te/OP_APUT.S */ 2261 /* 2262 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2263 * 2264 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2265 * instructions. We use a pair of FETCH_Bs instead. 2266 * 2267 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2268 */ 2269 /* op vAA, vBB, vCC */ 2270 FETCH_B(r2, 1, 0) @ r2<- BB 2271 mov r9, rINST, lsr #8 @ r9<- AA 2272 FETCH_B(r3, 1, 1) @ r3<- CC 2273 GET_VREG(r0, r2) @ r0<- vBB (array object) 2274 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2275 cmp r0, #0 @ null array object? 2276 beq common_errNullObject @ yes, bail 2277 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2278 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2279 cmp r1, r3 @ compare unsigned index, length 2280 bcs common_errArrayIndex @ index >= length, bail 2281 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2282 GET_VREG(r2, r9) @ r2<- vAA 2283 GET_INST_OPCODE(ip) @ extract opcode from rINST 2284 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2285 GOTO_OPCODE(ip) @ jump to next instruction 2286 2287 2288/* ------------------------------ */ 2289 .balign 64 2290.L_OP_APUT_BYTE: /* 0x4f */ 2291/* File: armv5te/OP_APUT_BYTE.S */ 2292/* File: armv5te/OP_APUT.S */ 2293 /* 2294 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2295 * 2296 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2297 * instructions. We use a pair of FETCH_Bs instead. 2298 * 2299 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2300 */ 2301 /* op vAA, vBB, vCC */ 2302 FETCH_B(r2, 1, 0) @ r2<- BB 2303 mov r9, rINST, lsr #8 @ r9<- AA 2304 FETCH_B(r3, 1, 1) @ r3<- CC 2305 GET_VREG(r0, r2) @ r0<- vBB (array object) 2306 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2307 cmp r0, #0 @ null array object? 2308 beq common_errNullObject @ yes, bail 2309 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2310 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2311 cmp r1, r3 @ compare unsigned index, length 2312 bcs common_errArrayIndex @ index >= length, bail 2313 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2314 GET_VREG(r2, r9) @ r2<- vAA 2315 GET_INST_OPCODE(ip) @ extract opcode from rINST 2316 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2317 GOTO_OPCODE(ip) @ jump to next instruction 2318 2319 2320/* ------------------------------ */ 2321 .balign 64 2322.L_OP_APUT_CHAR: /* 0x50 */ 2323/* File: armv5te/OP_APUT_CHAR.S */ 2324/* File: armv5te/OP_APUT.S */ 2325 /* 2326 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2327 * 2328 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2329 * instructions. We use a pair of FETCH_Bs instead. 2330 * 2331 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2332 */ 2333 /* op vAA, vBB, vCC */ 2334 FETCH_B(r2, 1, 0) @ r2<- BB 2335 mov r9, rINST, lsr #8 @ r9<- AA 2336 FETCH_B(r3, 1, 1) @ r3<- CC 2337 GET_VREG(r0, r2) @ r0<- vBB (array object) 2338 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2339 cmp r0, #0 @ null array object? 2340 beq common_errNullObject @ yes, bail 2341 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2342 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2343 cmp r1, r3 @ compare unsigned index, length 2344 bcs common_errArrayIndex @ index >= length, bail 2345 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2346 GET_VREG(r2, r9) @ r2<- vAA 2347 GET_INST_OPCODE(ip) @ extract opcode from rINST 2348 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2349 GOTO_OPCODE(ip) @ jump to next instruction 2350 2351 2352/* ------------------------------ */ 2353 .balign 64 2354.L_OP_APUT_SHORT: /* 0x51 */ 2355/* File: armv5te/OP_APUT_SHORT.S */ 2356/* File: armv5te/OP_APUT.S */ 2357 /* 2358 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2359 * 2360 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2361 * instructions. We use a pair of FETCH_Bs instead. 2362 * 2363 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2364 */ 2365 /* op vAA, vBB, vCC */ 2366 FETCH_B(r2, 1, 0) @ r2<- BB 2367 mov r9, rINST, lsr #8 @ r9<- AA 2368 FETCH_B(r3, 1, 1) @ r3<- CC 2369 GET_VREG(r0, r2) @ r0<- vBB (array object) 2370 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2371 cmp r0, #0 @ null array object? 2372 beq common_errNullObject @ yes, bail 2373 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2374 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2375 cmp r1, r3 @ compare unsigned index, length 2376 bcs common_errArrayIndex @ index >= length, bail 2377 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2378 GET_VREG(r2, r9) @ r2<- vAA 2379 GET_INST_OPCODE(ip) @ extract opcode from rINST 2380 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2381 GOTO_OPCODE(ip) @ jump to next instruction 2382 2383 2384/* ------------------------------ */ 2385 .balign 64 2386.L_OP_IGET: /* 0x52 */ 2387/* File: armv6t2/OP_IGET.S */ 2388 /* 2389 * General 32-bit instance field get. 2390 * 2391 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2392 */ 2393 /* op vA, vB, field@CCCC */ 2394 mov r0, rINST, lsr #12 @ r0<- B 2395 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2396 FETCH(r1, 1) @ r1<- field ref CCCC 2397 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2398 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2399 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2400 cmp r0, #0 @ is resolved entry null? 2401 bne .LOP_IGET_finish @ no, already resolved 24028: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2403 EXPORT_PC() @ resolve() could throw 2404 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2405 bl dvmResolveInstField @ r0<- resolved InstField ptr 2406 cmp r0, #0 2407 bne .LOP_IGET_finish 2408 b common_exceptionThrown 2409 2410/* ------------------------------ */ 2411 .balign 64 2412.L_OP_IGET_WIDE: /* 0x53 */ 2413/* File: armv6t2/OP_IGET_WIDE.S */ 2414 /* 2415 * Wide 32-bit instance field get. 2416 */ 2417 /* iget-wide vA, vB, field@CCCC */ 2418 mov r0, rINST, lsr #12 @ r0<- B 2419 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2420 FETCH(r1, 1) @ r1<- field ref CCCC 2421 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2422 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2423 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2424 cmp r0, #0 @ is resolved entry null? 2425 bne .LOP_IGET_WIDE_finish @ no, already resolved 24268: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2427 EXPORT_PC() @ resolve() could throw 2428 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2429 bl dvmResolveInstField @ r0<- resolved InstField ptr 2430 cmp r0, #0 2431 bne .LOP_IGET_WIDE_finish 2432 b common_exceptionThrown 2433 2434/* ------------------------------ */ 2435 .balign 64 2436.L_OP_IGET_OBJECT: /* 0x54 */ 2437/* File: armv5te/OP_IGET_OBJECT.S */ 2438/* File: armv5te/OP_IGET.S */ 2439 /* 2440 * General 32-bit instance field get. 2441 * 2442 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2443 */ 2444 /* op vA, vB, field@CCCC */ 2445 mov r0, rINST, lsr #12 @ r0<- B 2446 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2447 FETCH(r1, 1) @ r1<- field ref CCCC 2448 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2449 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2450 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2451 cmp r0, #0 @ is resolved entry null? 2452 bne .LOP_IGET_OBJECT_finish @ no, already resolved 24538: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2454 EXPORT_PC() @ resolve() could throw 2455 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2456 bl dvmResolveInstField @ r0<- resolved InstField ptr 2457 cmp r0, #0 2458 bne .LOP_IGET_OBJECT_finish 2459 b common_exceptionThrown 2460 2461 2462/* ------------------------------ */ 2463 .balign 64 2464.L_OP_IGET_BOOLEAN: /* 0x55 */ 2465/* File: armv5te/OP_IGET_BOOLEAN.S */ 2466@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2467/* File: armv5te/OP_IGET.S */ 2468 /* 2469 * General 32-bit instance field get. 2470 * 2471 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2472 */ 2473 /* op vA, vB, field@CCCC */ 2474 mov r0, rINST, lsr #12 @ r0<- B 2475 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2476 FETCH(r1, 1) @ r1<- field ref CCCC 2477 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2478 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2479 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2480 cmp r0, #0 @ is resolved entry null? 2481 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 24828: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2483 EXPORT_PC() @ resolve() could throw 2484 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2485 bl dvmResolveInstField @ r0<- resolved InstField ptr 2486 cmp r0, #0 2487 bne .LOP_IGET_BOOLEAN_finish 2488 b common_exceptionThrown 2489 2490 2491/* ------------------------------ */ 2492 .balign 64 2493.L_OP_IGET_BYTE: /* 0x56 */ 2494/* File: armv5te/OP_IGET_BYTE.S */ 2495@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2496/* File: armv5te/OP_IGET.S */ 2497 /* 2498 * General 32-bit instance field get. 2499 * 2500 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2501 */ 2502 /* op vA, vB, field@CCCC */ 2503 mov r0, rINST, lsr #12 @ r0<- B 2504 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2505 FETCH(r1, 1) @ r1<- field ref CCCC 2506 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2507 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2508 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2509 cmp r0, #0 @ is resolved entry null? 2510 bne .LOP_IGET_BYTE_finish @ no, already resolved 25118: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2512 EXPORT_PC() @ resolve() could throw 2513 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2514 bl dvmResolveInstField @ r0<- resolved InstField ptr 2515 cmp r0, #0 2516 bne .LOP_IGET_BYTE_finish 2517 b common_exceptionThrown 2518 2519 2520/* ------------------------------ */ 2521 .balign 64 2522.L_OP_IGET_CHAR: /* 0x57 */ 2523/* File: armv5te/OP_IGET_CHAR.S */ 2524@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2525/* File: armv5te/OP_IGET.S */ 2526 /* 2527 * General 32-bit instance field get. 2528 * 2529 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2530 */ 2531 /* op vA, vB, field@CCCC */ 2532 mov r0, rINST, lsr #12 @ r0<- B 2533 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2534 FETCH(r1, 1) @ r1<- field ref CCCC 2535 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2536 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2537 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2538 cmp r0, #0 @ is resolved entry null? 2539 bne .LOP_IGET_CHAR_finish @ no, already resolved 25408: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2541 EXPORT_PC() @ resolve() could throw 2542 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2543 bl dvmResolveInstField @ r0<- resolved InstField ptr 2544 cmp r0, #0 2545 bne .LOP_IGET_CHAR_finish 2546 b common_exceptionThrown 2547 2548 2549/* ------------------------------ */ 2550 .balign 64 2551.L_OP_IGET_SHORT: /* 0x58 */ 2552/* File: armv5te/OP_IGET_SHORT.S */ 2553@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2554/* File: armv5te/OP_IGET.S */ 2555 /* 2556 * General 32-bit instance field get. 2557 * 2558 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2559 */ 2560 /* op vA, vB, field@CCCC */ 2561 mov r0, rINST, lsr #12 @ r0<- B 2562 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2563 FETCH(r1, 1) @ r1<- field ref CCCC 2564 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2565 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2566 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2567 cmp r0, #0 @ is resolved entry null? 2568 bne .LOP_IGET_SHORT_finish @ no, already resolved 25698: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2570 EXPORT_PC() @ resolve() could throw 2571 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2572 bl dvmResolveInstField @ r0<- resolved InstField ptr 2573 cmp r0, #0 2574 bne .LOP_IGET_SHORT_finish 2575 b common_exceptionThrown 2576 2577 2578/* ------------------------------ */ 2579 .balign 64 2580.L_OP_IPUT: /* 0x59 */ 2581/* File: armv6t2/OP_IPUT.S */ 2582 /* 2583 * General 32-bit instance field put. 2584 * 2585 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2586 */ 2587 /* op vA, vB, field@CCCC */ 2588 mov r0, rINST, lsr #12 @ r0<- B 2589 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2590 FETCH(r1, 1) @ r1<- field ref CCCC 2591 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2592 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2593 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2594 cmp r0, #0 @ is resolved entry null? 2595 bne .LOP_IPUT_finish @ no, already resolved 25968: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2597 EXPORT_PC() @ resolve() could throw 2598 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2599 bl dvmResolveInstField @ r0<- resolved InstField ptr 2600 cmp r0, #0 @ success? 2601 bne .LOP_IPUT_finish @ yes, finish up 2602 b common_exceptionThrown 2603 2604/* ------------------------------ */ 2605 .balign 64 2606.L_OP_IPUT_WIDE: /* 0x5a */ 2607/* File: armv6t2/OP_IPUT_WIDE.S */ 2608 /* iput-wide vA, vB, field@CCCC */ 2609 mov r0, rINST, lsr #12 @ r0<- B 2610 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2611 FETCH(r1, 1) @ r1<- field ref CCCC 2612 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2613 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2614 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2615 cmp r0, #0 @ is resolved entry null? 2616 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26178: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2618 EXPORT_PC() @ resolve() could throw 2619 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2620 bl dvmResolveInstField @ r0<- resolved InstField ptr 2621 cmp r0, #0 @ success? 2622 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2623 b common_exceptionThrown 2624 2625/* ------------------------------ */ 2626 .balign 64 2627.L_OP_IPUT_OBJECT: /* 0x5b */ 2628/* File: armv5te/OP_IPUT_OBJECT.S */ 2629 /* 2630 * 32-bit instance field put. 2631 * 2632 * for: iput-object, iput-object-volatile 2633 */ 2634 /* op vA, vB, field@CCCC */ 2635 mov r0, rINST, lsr #12 @ r0<- B 2636 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2637 FETCH(r1, 1) @ r1<- field ref CCCC 2638 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2639 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2640 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2641 cmp r0, #0 @ is resolved entry null? 2642 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26438: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2644 EXPORT_PC() @ resolve() could throw 2645 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2646 bl dvmResolveInstField @ r0<- resolved InstField ptr 2647 cmp r0, #0 @ success? 2648 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2649 b common_exceptionThrown 2650 2651/* ------------------------------ */ 2652 .balign 64 2653.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2654/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2655@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2656/* File: armv5te/OP_IPUT.S */ 2657 /* 2658 * General 32-bit instance field put. 2659 * 2660 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2661 */ 2662 /* op vA, vB, field@CCCC */ 2663 mov r0, rINST, lsr #12 @ r0<- B 2664 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2665 FETCH(r1, 1) @ r1<- field ref CCCC 2666 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2667 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2668 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2669 cmp r0, #0 @ is resolved entry null? 2670 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 26718: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2672 EXPORT_PC() @ resolve() could throw 2673 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2674 bl dvmResolveInstField @ r0<- resolved InstField ptr 2675 cmp r0, #0 @ success? 2676 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2677 b common_exceptionThrown 2678 2679 2680/* ------------------------------ */ 2681 .balign 64 2682.L_OP_IPUT_BYTE: /* 0x5d */ 2683/* File: armv5te/OP_IPUT_BYTE.S */ 2684@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2685/* File: armv5te/OP_IPUT.S */ 2686 /* 2687 * General 32-bit instance field put. 2688 * 2689 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2690 */ 2691 /* op vA, vB, field@CCCC */ 2692 mov r0, rINST, lsr #12 @ r0<- B 2693 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2694 FETCH(r1, 1) @ r1<- field ref CCCC 2695 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2696 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2697 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2698 cmp r0, #0 @ is resolved entry null? 2699 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27008: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2701 EXPORT_PC() @ resolve() could throw 2702 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2703 bl dvmResolveInstField @ r0<- resolved InstField ptr 2704 cmp r0, #0 @ success? 2705 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2706 b common_exceptionThrown 2707 2708 2709/* ------------------------------ */ 2710 .balign 64 2711.L_OP_IPUT_CHAR: /* 0x5e */ 2712/* File: armv5te/OP_IPUT_CHAR.S */ 2713@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2714/* File: armv5te/OP_IPUT.S */ 2715 /* 2716 * General 32-bit instance field put. 2717 * 2718 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2719 */ 2720 /* op vA, vB, field@CCCC */ 2721 mov r0, rINST, lsr #12 @ r0<- B 2722 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2723 FETCH(r1, 1) @ r1<- field ref CCCC 2724 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2725 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2726 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2727 cmp r0, #0 @ is resolved entry null? 2728 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27298: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2730 EXPORT_PC() @ resolve() could throw 2731 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2732 bl dvmResolveInstField @ r0<- resolved InstField ptr 2733 cmp r0, #0 @ success? 2734 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2735 b common_exceptionThrown 2736 2737 2738/* ------------------------------ */ 2739 .balign 64 2740.L_OP_IPUT_SHORT: /* 0x5f */ 2741/* File: armv5te/OP_IPUT_SHORT.S */ 2742@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2743/* File: armv5te/OP_IPUT.S */ 2744 /* 2745 * General 32-bit instance field put. 2746 * 2747 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2748 */ 2749 /* op vA, vB, field@CCCC */ 2750 mov r0, rINST, lsr #12 @ r0<- B 2751 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2752 FETCH(r1, 1) @ r1<- field ref CCCC 2753 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2754 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2755 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2756 cmp r0, #0 @ is resolved entry null? 2757 bne .LOP_IPUT_SHORT_finish @ no, already resolved 27588: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2759 EXPORT_PC() @ resolve() could throw 2760 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2761 bl dvmResolveInstField @ r0<- resolved InstField ptr 2762 cmp r0, #0 @ success? 2763 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2764 b common_exceptionThrown 2765 2766 2767/* ------------------------------ */ 2768 .balign 64 2769.L_OP_SGET: /* 0x60 */ 2770/* File: armv5te/OP_SGET.S */ 2771 /* 2772 * General 32-bit SGET handler. 2773 * 2774 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2775 */ 2776 /* op vAA, field@BBBB */ 2777 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2778 FETCH(r1, 1) @ r1<- field ref BBBB 2779 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2780 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2781 cmp r0, #0 @ is resolved entry null? 2782 beq .LOP_SGET_resolve @ yes, do resolve 2783.LOP_SGET_finish: @ field ptr in r0 2784 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2785 @ no-op @ acquiring load 2786 mov r2, rINST, lsr #8 @ r2<- AA 2787 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2788 SET_VREG(r1, r2) @ fp[AA]<- r1 2789 GET_INST_OPCODE(ip) @ extract opcode from rINST 2790 GOTO_OPCODE(ip) @ jump to next instruction 2791 2792/* ------------------------------ */ 2793 .balign 64 2794.L_OP_SGET_WIDE: /* 0x61 */ 2795/* File: armv5te/OP_SGET_WIDE.S */ 2796 /* 2797 * 64-bit SGET handler. 2798 */ 2799 /* sget-wide vAA, field@BBBB */ 2800 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2801 FETCH(r1, 1) @ r1<- field ref BBBB 2802 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2803 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2804 cmp r0, #0 @ is resolved entry null? 2805 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2806.LOP_SGET_WIDE_finish: 2807 mov r9, rINST, lsr #8 @ r9<- AA 2808 .if 0 2809 add r0, r0, #offStaticField_value @ r0<- pointer to data 2810 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 2811 .else 2812 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2813 .endif 2814 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2815 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2816 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2817 GET_INST_OPCODE(ip) @ extract opcode from rINST 2818 GOTO_OPCODE(ip) @ jump to next instruction 2819 2820/* ------------------------------ */ 2821 .balign 64 2822.L_OP_SGET_OBJECT: /* 0x62 */ 2823/* File: armv5te/OP_SGET_OBJECT.S */ 2824/* File: armv5te/OP_SGET.S */ 2825 /* 2826 * General 32-bit SGET handler. 2827 * 2828 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2829 */ 2830 /* op vAA, field@BBBB */ 2831 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2832 FETCH(r1, 1) @ r1<- field ref BBBB 2833 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2834 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2835 cmp r0, #0 @ is resolved entry null? 2836 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2837.LOP_SGET_OBJECT_finish: @ field ptr in r0 2838 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2839 @ no-op @ acquiring load 2840 mov r2, rINST, lsr #8 @ r2<- AA 2841 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2842 SET_VREG(r1, r2) @ fp[AA]<- r1 2843 GET_INST_OPCODE(ip) @ extract opcode from rINST 2844 GOTO_OPCODE(ip) @ jump to next instruction 2845 2846 2847/* ------------------------------ */ 2848 .balign 64 2849.L_OP_SGET_BOOLEAN: /* 0x63 */ 2850/* File: armv5te/OP_SGET_BOOLEAN.S */ 2851/* File: armv5te/OP_SGET.S */ 2852 /* 2853 * General 32-bit SGET handler. 2854 * 2855 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2856 */ 2857 /* op vAA, field@BBBB */ 2858 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2859 FETCH(r1, 1) @ r1<- field ref BBBB 2860 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2861 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2862 cmp r0, #0 @ is resolved entry null? 2863 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2864.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2865 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2866 @ no-op @ acquiring load 2867 mov r2, rINST, lsr #8 @ r2<- AA 2868 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2869 SET_VREG(r1, r2) @ fp[AA]<- r1 2870 GET_INST_OPCODE(ip) @ extract opcode from rINST 2871 GOTO_OPCODE(ip) @ jump to next instruction 2872 2873 2874/* ------------------------------ */ 2875 .balign 64 2876.L_OP_SGET_BYTE: /* 0x64 */ 2877/* File: armv5te/OP_SGET_BYTE.S */ 2878/* File: armv5te/OP_SGET.S */ 2879 /* 2880 * General 32-bit SGET handler. 2881 * 2882 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2883 */ 2884 /* op vAA, field@BBBB */ 2885 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2886 FETCH(r1, 1) @ r1<- field ref BBBB 2887 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2888 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2889 cmp r0, #0 @ is resolved entry null? 2890 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2891.LOP_SGET_BYTE_finish: @ field ptr in r0 2892 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2893 @ no-op @ acquiring load 2894 mov r2, rINST, lsr #8 @ r2<- AA 2895 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2896 SET_VREG(r1, r2) @ fp[AA]<- r1 2897 GET_INST_OPCODE(ip) @ extract opcode from rINST 2898 GOTO_OPCODE(ip) @ jump to next instruction 2899 2900 2901/* ------------------------------ */ 2902 .balign 64 2903.L_OP_SGET_CHAR: /* 0x65 */ 2904/* File: armv5te/OP_SGET_CHAR.S */ 2905/* File: armv5te/OP_SGET.S */ 2906 /* 2907 * General 32-bit SGET handler. 2908 * 2909 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2910 */ 2911 /* op vAA, field@BBBB */ 2912 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2913 FETCH(r1, 1) @ r1<- field ref BBBB 2914 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2915 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2916 cmp r0, #0 @ is resolved entry null? 2917 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2918.LOP_SGET_CHAR_finish: @ field ptr in r0 2919 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2920 @ no-op @ acquiring load 2921 mov r2, rINST, lsr #8 @ r2<- AA 2922 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2923 SET_VREG(r1, r2) @ fp[AA]<- r1 2924 GET_INST_OPCODE(ip) @ extract opcode from rINST 2925 GOTO_OPCODE(ip) @ jump to next instruction 2926 2927 2928/* ------------------------------ */ 2929 .balign 64 2930.L_OP_SGET_SHORT: /* 0x66 */ 2931/* File: armv5te/OP_SGET_SHORT.S */ 2932/* File: armv5te/OP_SGET.S */ 2933 /* 2934 * General 32-bit SGET handler. 2935 * 2936 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2937 */ 2938 /* op vAA, field@BBBB */ 2939 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2940 FETCH(r1, 1) @ r1<- field ref BBBB 2941 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2942 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2943 cmp r0, #0 @ is resolved entry null? 2944 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2945.LOP_SGET_SHORT_finish: @ field ptr in r0 2946 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2947 @ no-op @ acquiring load 2948 mov r2, rINST, lsr #8 @ r2<- AA 2949 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2950 SET_VREG(r1, r2) @ fp[AA]<- r1 2951 GET_INST_OPCODE(ip) @ extract opcode from rINST 2952 GOTO_OPCODE(ip) @ jump to next instruction 2953 2954 2955/* ------------------------------ */ 2956 .balign 64 2957.L_OP_SPUT: /* 0x67 */ 2958/* File: armv5te/OP_SPUT.S */ 2959 /* 2960 * General 32-bit SPUT handler. 2961 * 2962 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 2963 */ 2964 /* op vAA, field@BBBB */ 2965 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2966 FETCH(r1, 1) @ r1<- field ref BBBB 2967 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2968 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2969 cmp r0, #0 @ is resolved entry null? 2970 beq .LOP_SPUT_resolve @ yes, do resolve 2971.LOP_SPUT_finish: @ field ptr in r0 2972 mov r2, rINST, lsr #8 @ r2<- AA 2973 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2974 GET_VREG(r1, r2) @ r1<- fp[AA] 2975 GET_INST_OPCODE(ip) @ extract opcode from rINST 2976 @ no-op @ releasing store 2977 str r1, [r0, #offStaticField_value] @ field<- vAA 2978 GOTO_OPCODE(ip) @ jump to next instruction 2979 2980/* ------------------------------ */ 2981 .balign 64 2982.L_OP_SPUT_WIDE: /* 0x68 */ 2983/* File: armv5te/OP_SPUT_WIDE.S */ 2984 /* 2985 * 64-bit SPUT handler. 2986 */ 2987 /* sput-wide vAA, field@BBBB */ 2988 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 2989 FETCH(r1, 1) @ r1<- field ref BBBB 2990 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 2991 mov r9, rINST, lsr #8 @ r9<- AA 2992 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 2993 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2994 cmp r2, #0 @ is resolved entry null? 2995 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 2996.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 2997 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2998 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 2999 GET_INST_OPCODE(r10) @ extract opcode from rINST 3000 .if 0 3001 add r2, r2, #offStaticField_value @ r2<- pointer to data 3002 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 3003 .else 3004 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3005 .endif 3006 GOTO_OPCODE(r10) @ jump to next instruction 3007 3008/* ------------------------------ */ 3009 .balign 64 3010.L_OP_SPUT_OBJECT: /* 0x69 */ 3011/* File: armv5te/OP_SPUT_OBJECT.S */ 3012 /* 3013 * 32-bit SPUT handler for objects 3014 * 3015 * for: sput-object, sput-object-volatile 3016 */ 3017 /* op vAA, field@BBBB */ 3018 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3019 FETCH(r1, 1) @ r1<- field ref BBBB 3020 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3021 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3022 cmp r0, #0 @ is resolved entry null? 3023 bne .LOP_SPUT_OBJECT_finish @ no, continue 3024 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3025 EXPORT_PC() @ resolve() could throw, so export now 3026 ldr r0, [r9, #offMethod_clazz] @ r0<- method->clazz 3027 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 3028 cmp r0, #0 @ success? 3029 bne .LOP_SPUT_OBJECT_finish @ yes, finish 3030 b common_exceptionThrown @ no, handle exception 3031 3032 3033/* ------------------------------ */ 3034 .balign 64 3035.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3036/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3037/* File: armv5te/OP_SPUT.S */ 3038 /* 3039 * General 32-bit SPUT handler. 3040 * 3041 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3042 */ 3043 /* op vAA, field@BBBB */ 3044 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3045 FETCH(r1, 1) @ r1<- field ref BBBB 3046 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3047 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3048 cmp r0, #0 @ is resolved entry null? 3049 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3050.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3051 mov r2, rINST, lsr #8 @ r2<- AA 3052 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3053 GET_VREG(r1, r2) @ r1<- fp[AA] 3054 GET_INST_OPCODE(ip) @ extract opcode from rINST 3055 @ no-op @ releasing store 3056 str r1, [r0, #offStaticField_value] @ field<- vAA 3057 GOTO_OPCODE(ip) @ jump to next instruction 3058 3059 3060/* ------------------------------ */ 3061 .balign 64 3062.L_OP_SPUT_BYTE: /* 0x6b */ 3063/* File: armv5te/OP_SPUT_BYTE.S */ 3064/* File: armv5te/OP_SPUT.S */ 3065 /* 3066 * General 32-bit SPUT handler. 3067 * 3068 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3069 */ 3070 /* op vAA, field@BBBB */ 3071 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3072 FETCH(r1, 1) @ r1<- field ref BBBB 3073 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3074 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3075 cmp r0, #0 @ is resolved entry null? 3076 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3077.LOP_SPUT_BYTE_finish: @ field ptr in r0 3078 mov r2, rINST, lsr #8 @ r2<- AA 3079 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3080 GET_VREG(r1, r2) @ r1<- fp[AA] 3081 GET_INST_OPCODE(ip) @ extract opcode from rINST 3082 @ no-op @ releasing store 3083 str r1, [r0, #offStaticField_value] @ field<- vAA 3084 GOTO_OPCODE(ip) @ jump to next instruction 3085 3086 3087/* ------------------------------ */ 3088 .balign 64 3089.L_OP_SPUT_CHAR: /* 0x6c */ 3090/* File: armv5te/OP_SPUT_CHAR.S */ 3091/* File: armv5te/OP_SPUT.S */ 3092 /* 3093 * General 32-bit SPUT handler. 3094 * 3095 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3096 */ 3097 /* op vAA, field@BBBB */ 3098 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3099 FETCH(r1, 1) @ r1<- field ref BBBB 3100 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3101 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3102 cmp r0, #0 @ is resolved entry null? 3103 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3104.LOP_SPUT_CHAR_finish: @ field ptr in r0 3105 mov r2, rINST, lsr #8 @ r2<- AA 3106 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3107 GET_VREG(r1, r2) @ r1<- fp[AA] 3108 GET_INST_OPCODE(ip) @ extract opcode from rINST 3109 @ no-op @ releasing store 3110 str r1, [r0, #offStaticField_value] @ field<- vAA 3111 GOTO_OPCODE(ip) @ jump to next instruction 3112 3113 3114/* ------------------------------ */ 3115 .balign 64 3116.L_OP_SPUT_SHORT: /* 0x6d */ 3117/* File: armv5te/OP_SPUT_SHORT.S */ 3118/* File: armv5te/OP_SPUT.S */ 3119 /* 3120 * General 32-bit SPUT handler. 3121 * 3122 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3123 */ 3124 /* op vAA, field@BBBB */ 3125 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3126 FETCH(r1, 1) @ r1<- field ref BBBB 3127 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3128 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3129 cmp r0, #0 @ is resolved entry null? 3130 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3131.LOP_SPUT_SHORT_finish: @ field ptr in r0 3132 mov r2, rINST, lsr #8 @ r2<- AA 3133 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3134 GET_VREG(r1, r2) @ r1<- fp[AA] 3135 GET_INST_OPCODE(ip) @ extract opcode from rINST 3136 @ no-op @ releasing store 3137 str r1, [r0, #offStaticField_value] @ field<- vAA 3138 GOTO_OPCODE(ip) @ jump to next instruction 3139 3140 3141/* ------------------------------ */ 3142 .balign 64 3143.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3144/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3145 /* 3146 * Handle a virtual method call. 3147 * 3148 * for: invoke-virtual, invoke-virtual/range 3149 */ 3150 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3151 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3152 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3153 FETCH(r1, 1) @ r1<- BBBB 3154 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3155 FETCH(r10, 2) @ r10<- GFED or CCCC 3156 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3157 .if (!0) 3158 and r10, r10, #15 @ r10<- D (or stays CCCC) 3159 .endif 3160 cmp r0, #0 @ already resolved? 3161 EXPORT_PC() @ must export for invoke 3162 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3163 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3164 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3165 mov r2, #METHOD_VIRTUAL @ resolver method type 3166 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3167 cmp r0, #0 @ got null? 3168 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3169 b common_exceptionThrown @ yes, handle exception 3170 3171/* ------------------------------ */ 3172 .balign 64 3173.L_OP_INVOKE_SUPER: /* 0x6f */ 3174/* File: armv5te/OP_INVOKE_SUPER.S */ 3175 /* 3176 * Handle a "super" method call. 3177 * 3178 * for: invoke-super, invoke-super/range 3179 */ 3180 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3181 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3182 FETCH(r10, 2) @ r10<- GFED or CCCC 3183 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3184 .if (!0) 3185 and r10, r10, #15 @ r10<- D (or stays CCCC) 3186 .endif 3187 FETCH(r1, 1) @ r1<- BBBB 3188 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3189 GET_VREG(r2, r10) @ r2<- "this" ptr 3190 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3191 cmp r2, #0 @ null "this"? 3192 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3193 beq common_errNullObject @ null "this", throw exception 3194 cmp r0, #0 @ already resolved? 3195 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3196 EXPORT_PC() @ must export for invoke 3197 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3198 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3199 3200/* ------------------------------ */ 3201 .balign 64 3202.L_OP_INVOKE_DIRECT: /* 0x70 */ 3203/* File: armv5te/OP_INVOKE_DIRECT.S */ 3204 /* 3205 * Handle a direct method call. 3206 * 3207 * (We could defer the "is 'this' pointer null" test to the common 3208 * method invocation code, and use a flag to indicate that static 3209 * calls don't count. If we do this as part of copying the arguments 3210 * out we could avoiding loading the first arg twice.) 3211 * 3212 * for: invoke-direct, invoke-direct/range 3213 */ 3214 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3215 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3216 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3217 FETCH(r1, 1) @ r1<- BBBB 3218 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3219 FETCH(r10, 2) @ r10<- GFED or CCCC 3220 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3221 .if (!0) 3222 and r10, r10, #15 @ r10<- D (or stays CCCC) 3223 .endif 3224 cmp r0, #0 @ already resolved? 3225 EXPORT_PC() @ must export for invoke 3226 GET_VREG(r2, r10) @ r2<- "this" ptr 3227 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3228.LOP_INVOKE_DIRECT_finish: 3229 cmp r2, #0 @ null "this" ref? 3230 bne common_invokeMethodNoRange @ no, continue on 3231 b common_errNullObject @ yes, throw exception 3232 3233/* ------------------------------ */ 3234 .balign 64 3235.L_OP_INVOKE_STATIC: /* 0x71 */ 3236/* File: armv5te/OP_INVOKE_STATIC.S */ 3237 /* 3238 * Handle a static method call. 3239 * 3240 * for: invoke-static, invoke-static/range 3241 */ 3242 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3243 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3244 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3245 FETCH(r1, 1) @ r1<- BBBB 3246 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3247 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3248 cmp r0, #0 @ already resolved? 3249 EXPORT_PC() @ must export for invoke 3250 bne common_invokeMethodNoRange @ yes, continue on 32510: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3252 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3253 mov r2, #METHOD_STATIC @ resolver method type 3254 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3255 cmp r0, #0 @ got null? 3256 bne common_invokeMethodNoRange @ no, continue 3257 b common_exceptionThrown @ yes, handle exception 3258 3259/* ------------------------------ */ 3260 .balign 64 3261.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3262/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3263 /* 3264 * Handle an interface method call. 3265 * 3266 * for: invoke-interface, invoke-interface/range 3267 */ 3268 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3269 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3270 FETCH(r2, 2) @ r2<- FEDC or CCCC 3271 FETCH(r1, 1) @ r1<- BBBB 3272 .if (!0) 3273 and r2, r2, #15 @ r2<- C (or stays CCCC) 3274 .endif 3275 EXPORT_PC() @ must export for invoke 3276 GET_VREG(r0, r2) @ r0<- first arg ("this") 3277 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3278 cmp r0, #0 @ null obj? 3279 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3280 beq common_errNullObject @ yes, fail 3281 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3282 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3283 cmp r0, #0 @ failed? 3284 beq common_exceptionThrown @ yes, handle exception 3285 b common_invokeMethodNoRange @ jump to common handler 3286 3287/* ------------------------------ */ 3288 .balign 64 3289.L_OP_UNUSED_73: /* 0x73 */ 3290/* File: armv5te/OP_UNUSED_73.S */ 3291/* File: armv5te/unused.S */ 3292 bl common_abort 3293 3294 3295/* ------------------------------ */ 3296 .balign 64 3297.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3298/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3299/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3300 /* 3301 * Handle a virtual method call. 3302 * 3303 * for: invoke-virtual, invoke-virtual/range 3304 */ 3305 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3306 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3307 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3308 FETCH(r1, 1) @ r1<- BBBB 3309 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3310 FETCH(r10, 2) @ r10<- GFED or CCCC 3311 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3312 .if (!1) 3313 and r10, r10, #15 @ r10<- D (or stays CCCC) 3314 .endif 3315 cmp r0, #0 @ already resolved? 3316 EXPORT_PC() @ must export for invoke 3317 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3318 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3319 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3320 mov r2, #METHOD_VIRTUAL @ resolver method type 3321 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3322 cmp r0, #0 @ got null? 3323 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3324 b common_exceptionThrown @ yes, handle exception 3325 3326 3327/* ------------------------------ */ 3328 .balign 64 3329.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3330/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3331/* File: armv5te/OP_INVOKE_SUPER.S */ 3332 /* 3333 * Handle a "super" method call. 3334 * 3335 * for: invoke-super, invoke-super/range 3336 */ 3337 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3338 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3339 FETCH(r10, 2) @ r10<- GFED or CCCC 3340 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3341 .if (!1) 3342 and r10, r10, #15 @ r10<- D (or stays CCCC) 3343 .endif 3344 FETCH(r1, 1) @ r1<- BBBB 3345 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3346 GET_VREG(r2, r10) @ r2<- "this" ptr 3347 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3348 cmp r2, #0 @ null "this"? 3349 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3350 beq common_errNullObject @ null "this", throw exception 3351 cmp r0, #0 @ already resolved? 3352 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3353 EXPORT_PC() @ must export for invoke 3354 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3355 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3356 3357 3358/* ------------------------------ */ 3359 .balign 64 3360.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3361/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3362/* File: armv5te/OP_INVOKE_DIRECT.S */ 3363 /* 3364 * Handle a direct method call. 3365 * 3366 * (We could defer the "is 'this' pointer null" test to the common 3367 * method invocation code, and use a flag to indicate that static 3368 * calls don't count. If we do this as part of copying the arguments 3369 * out we could avoiding loading the first arg twice.) 3370 * 3371 * for: invoke-direct, invoke-direct/range 3372 */ 3373 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3374 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3375 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3376 FETCH(r1, 1) @ r1<- BBBB 3377 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3378 FETCH(r10, 2) @ r10<- GFED or CCCC 3379 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3380 .if (!1) 3381 and r10, r10, #15 @ r10<- D (or stays CCCC) 3382 .endif 3383 cmp r0, #0 @ already resolved? 3384 EXPORT_PC() @ must export for invoke 3385 GET_VREG(r2, r10) @ r2<- "this" ptr 3386 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3387.LOP_INVOKE_DIRECT_RANGE_finish: 3388 cmp r2, #0 @ null "this" ref? 3389 bne common_invokeMethodRange @ no, continue on 3390 b common_errNullObject @ yes, throw exception 3391 3392 3393/* ------------------------------ */ 3394 .balign 64 3395.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3396/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3397/* File: armv5te/OP_INVOKE_STATIC.S */ 3398 /* 3399 * Handle a static method call. 3400 * 3401 * for: invoke-static, invoke-static/range 3402 */ 3403 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3404 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3405 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3406 FETCH(r1, 1) @ r1<- BBBB 3407 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3408 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3409 cmp r0, #0 @ already resolved? 3410 EXPORT_PC() @ must export for invoke 3411 bne common_invokeMethodRange @ yes, continue on 34120: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3413 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3414 mov r2, #METHOD_STATIC @ resolver method type 3415 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3416 cmp r0, #0 @ got null? 3417 bne common_invokeMethodRange @ no, continue 3418 b common_exceptionThrown @ yes, handle exception 3419 3420 3421/* ------------------------------ */ 3422 .balign 64 3423.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3424/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3425/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3426 /* 3427 * Handle an interface method call. 3428 * 3429 * for: invoke-interface, invoke-interface/range 3430 */ 3431 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3432 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3433 FETCH(r2, 2) @ r2<- FEDC or CCCC 3434 FETCH(r1, 1) @ r1<- BBBB 3435 .if (!1) 3436 and r2, r2, #15 @ r2<- C (or stays CCCC) 3437 .endif 3438 EXPORT_PC() @ must export for invoke 3439 GET_VREG(r0, r2) @ r0<- first arg ("this") 3440 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3441 cmp r0, #0 @ null obj? 3442 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3443 beq common_errNullObject @ yes, fail 3444 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3445 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3446 cmp r0, #0 @ failed? 3447 beq common_exceptionThrown @ yes, handle exception 3448 b common_invokeMethodRange @ jump to common handler 3449 3450 3451/* ------------------------------ */ 3452 .balign 64 3453.L_OP_UNUSED_79: /* 0x79 */ 3454/* File: armv5te/OP_UNUSED_79.S */ 3455/* File: armv5te/unused.S */ 3456 bl common_abort 3457 3458 3459/* ------------------------------ */ 3460 .balign 64 3461.L_OP_UNUSED_7A: /* 0x7a */ 3462/* File: armv5te/OP_UNUSED_7A.S */ 3463/* File: armv5te/unused.S */ 3464 bl common_abort 3465 3466 3467/* ------------------------------ */ 3468 .balign 64 3469.L_OP_NEG_INT: /* 0x7b */ 3470/* File: armv6t2/OP_NEG_INT.S */ 3471/* File: armv6t2/unop.S */ 3472 /* 3473 * Generic 32-bit unary operation. Provide an "instr" line that 3474 * specifies an instruction that performs "result = op r0". 3475 * This could be an ARM instruction or a function call. 3476 * 3477 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3478 * int-to-byte, int-to-char, int-to-short 3479 */ 3480 /* unop vA, vB */ 3481 mov r3, rINST, lsr #12 @ r3<- B 3482 ubfx r9, rINST, #8, #4 @ r9<- A 3483 GET_VREG(r0, r3) @ r0<- vB 3484 @ optional op; may set condition codes 3485 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3486 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3487 GET_INST_OPCODE(ip) @ extract opcode from rINST 3488 SET_VREG(r0, r9) @ vAA<- r0 3489 GOTO_OPCODE(ip) @ jump to next instruction 3490 /* 8-9 instructions */ 3491 3492 3493/* ------------------------------ */ 3494 .balign 64 3495.L_OP_NOT_INT: /* 0x7c */ 3496/* File: armv6t2/OP_NOT_INT.S */ 3497/* File: armv6t2/unop.S */ 3498 /* 3499 * Generic 32-bit unary operation. Provide an "instr" line that 3500 * specifies an instruction that performs "result = op r0". 3501 * This could be an ARM instruction or a function call. 3502 * 3503 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3504 * int-to-byte, int-to-char, int-to-short 3505 */ 3506 /* unop vA, vB */ 3507 mov r3, rINST, lsr #12 @ r3<- B 3508 ubfx r9, rINST, #8, #4 @ r9<- A 3509 GET_VREG(r0, r3) @ r0<- vB 3510 @ optional op; may set condition codes 3511 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3512 mvn r0, r0 @ r0<- op, r0-r3 changed 3513 GET_INST_OPCODE(ip) @ extract opcode from rINST 3514 SET_VREG(r0, r9) @ vAA<- r0 3515 GOTO_OPCODE(ip) @ jump to next instruction 3516 /* 8-9 instructions */ 3517 3518 3519/* ------------------------------ */ 3520 .balign 64 3521.L_OP_NEG_LONG: /* 0x7d */ 3522/* File: armv6t2/OP_NEG_LONG.S */ 3523/* File: armv6t2/unopWide.S */ 3524 /* 3525 * Generic 64-bit unary operation. Provide an "instr" line that 3526 * specifies an instruction that performs "result = op r0/r1". 3527 * This could be an ARM instruction or a function call. 3528 * 3529 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3530 */ 3531 /* unop vA, vB */ 3532 mov r3, rINST, lsr #12 @ r3<- B 3533 ubfx r9, rINST, #8, #4 @ r9<- A 3534 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3535 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3536 ldmia r3, {r0-r1} @ r0/r1<- vAA 3537 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3538 rsbs r0, r0, #0 @ optional op; may set condition codes 3539 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3540 GET_INST_OPCODE(ip) @ extract opcode from rINST 3541 stmia r9, {r0-r1} @ vAA<- r0/r1 3542 GOTO_OPCODE(ip) @ jump to next instruction 3543 /* 10-11 instructions */ 3544 3545 3546/* ------------------------------ */ 3547 .balign 64 3548.L_OP_NOT_LONG: /* 0x7e */ 3549/* File: armv6t2/OP_NOT_LONG.S */ 3550/* File: armv6t2/unopWide.S */ 3551 /* 3552 * Generic 64-bit unary operation. Provide an "instr" line that 3553 * specifies an instruction that performs "result = op r0/r1". 3554 * This could be an ARM instruction or a function call. 3555 * 3556 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3557 */ 3558 /* unop vA, vB */ 3559 mov r3, rINST, lsr #12 @ r3<- B 3560 ubfx r9, rINST, #8, #4 @ r9<- A 3561 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3562 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3563 ldmia r3, {r0-r1} @ r0/r1<- vAA 3564 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3565 mvn r0, r0 @ optional op; may set condition codes 3566 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3567 GET_INST_OPCODE(ip) @ extract opcode from rINST 3568 stmia r9, {r0-r1} @ vAA<- r0/r1 3569 GOTO_OPCODE(ip) @ jump to next instruction 3570 /* 10-11 instructions */ 3571 3572 3573/* ------------------------------ */ 3574 .balign 64 3575.L_OP_NEG_FLOAT: /* 0x7f */ 3576/* File: armv6t2/OP_NEG_FLOAT.S */ 3577/* File: armv6t2/unop.S */ 3578 /* 3579 * Generic 32-bit unary operation. Provide an "instr" line that 3580 * specifies an instruction that performs "result = op r0". 3581 * This could be an ARM instruction or a function call. 3582 * 3583 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3584 * int-to-byte, int-to-char, int-to-short 3585 */ 3586 /* unop vA, vB */ 3587 mov r3, rINST, lsr #12 @ r3<- B 3588 ubfx r9, rINST, #8, #4 @ r9<- A 3589 GET_VREG(r0, r3) @ r0<- vB 3590 @ optional op; may set condition codes 3591 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3592 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3593 GET_INST_OPCODE(ip) @ extract opcode from rINST 3594 SET_VREG(r0, r9) @ vAA<- r0 3595 GOTO_OPCODE(ip) @ jump to next instruction 3596 /* 8-9 instructions */ 3597 3598 3599/* ------------------------------ */ 3600 .balign 64 3601.L_OP_NEG_DOUBLE: /* 0x80 */ 3602/* File: armv6t2/OP_NEG_DOUBLE.S */ 3603/* File: armv6t2/unopWide.S */ 3604 /* 3605 * Generic 64-bit unary operation. Provide an "instr" line that 3606 * specifies an instruction that performs "result = op r0/r1". 3607 * This could be an ARM instruction or a function call. 3608 * 3609 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3610 */ 3611 /* unop vA, vB */ 3612 mov r3, rINST, lsr #12 @ r3<- B 3613 ubfx r9, rINST, #8, #4 @ r9<- A 3614 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3615 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3616 ldmia r3, {r0-r1} @ r0/r1<- vAA 3617 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3618 @ optional op; may set condition codes 3619 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3620 GET_INST_OPCODE(ip) @ extract opcode from rINST 3621 stmia r9, {r0-r1} @ vAA<- r0/r1 3622 GOTO_OPCODE(ip) @ jump to next instruction 3623 /* 10-11 instructions */ 3624 3625 3626/* ------------------------------ */ 3627 .balign 64 3628.L_OP_INT_TO_LONG: /* 0x81 */ 3629/* File: armv6t2/OP_INT_TO_LONG.S */ 3630/* File: armv6t2/unopWider.S */ 3631 /* 3632 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3633 * that specifies an instruction that performs "result = op r0", where 3634 * "result" is a 64-bit quantity in r0/r1. 3635 * 3636 * For: int-to-long, int-to-double, float-to-long, float-to-double 3637 */ 3638 /* unop vA, vB */ 3639 mov r3, rINST, lsr #12 @ r3<- B 3640 ubfx r9, rINST, #8, #4 @ r9<- A 3641 GET_VREG(r0, r3) @ r0<- vB 3642 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3643 @ optional op; may set condition codes 3644 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3645 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3646 GET_INST_OPCODE(ip) @ extract opcode from rINST 3647 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3648 GOTO_OPCODE(ip) @ jump to next instruction 3649 /* 9-10 instructions */ 3650 3651 3652/* ------------------------------ */ 3653 .balign 64 3654.L_OP_INT_TO_FLOAT: /* 0x82 */ 3655/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3656/* File: arm-vfp/funop.S */ 3657 /* 3658 * Generic 32-bit unary floating-point operation. Provide an "instr" 3659 * line that specifies an instruction that performs "s1 = op s0". 3660 * 3661 * for: int-to-float, float-to-int 3662 */ 3663 /* unop vA, vB */ 3664 mov r3, rINST, lsr #12 @ r3<- B 3665 mov r9, rINST, lsr #8 @ r9<- A+ 3666 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3667 flds s0, [r3] @ s0<- vB 3668 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3669 and r9, r9, #15 @ r9<- A 3670 fsitos s1, s0 @ s1<- op 3671 GET_INST_OPCODE(ip) @ extract opcode from rINST 3672 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3673 fsts s1, [r9] @ vA<- s1 3674 GOTO_OPCODE(ip) @ jump to next instruction 3675 3676 3677/* ------------------------------ */ 3678 .balign 64 3679.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3680/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3681/* File: arm-vfp/funopWider.S */ 3682 /* 3683 * Generic 32bit-to-64bit floating point unary operation. Provide an 3684 * "instr" line that specifies an instruction that performs "d0 = op s0". 3685 * 3686 * For: int-to-double, float-to-double 3687 */ 3688 /* unop vA, vB */ 3689 mov r3, rINST, lsr #12 @ r3<- B 3690 mov r9, rINST, lsr #8 @ r9<- A+ 3691 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3692 flds s0, [r3] @ s0<- vB 3693 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3694 and r9, r9, #15 @ r9<- A 3695 fsitod d0, s0 @ d0<- op 3696 GET_INST_OPCODE(ip) @ extract opcode from rINST 3697 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3698 fstd d0, [r9] @ vA<- d0 3699 GOTO_OPCODE(ip) @ jump to next instruction 3700 3701 3702/* ------------------------------ */ 3703 .balign 64 3704.L_OP_LONG_TO_INT: /* 0x84 */ 3705/* File: armv5te/OP_LONG_TO_INT.S */ 3706/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3707/* File: armv5te/OP_MOVE.S */ 3708 /* for move, move-object, long-to-int */ 3709 /* op vA, vB */ 3710 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3711 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3712 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3713 GET_VREG(r2, r1) @ r2<- fp[B] 3714 and r0, r0, #15 3715 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3716 SET_VREG(r2, r0) @ fp[A]<- r2 3717 GOTO_OPCODE(ip) @ execute next instruction 3718 3719 3720/* ------------------------------ */ 3721 .balign 64 3722.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3723/* File: armv6t2/OP_LONG_TO_FLOAT.S */ 3724/* File: armv6t2/unopNarrower.S */ 3725 /* 3726 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3727 * that specifies an instruction that performs "result = op r0/r1", where 3728 * "result" is a 32-bit quantity in r0. 3729 * 3730 * For: long-to-float, double-to-int, double-to-float 3731 * 3732 * (This would work for long-to-int, but that instruction is actually 3733 * an exact match for OP_MOVE.) 3734 */ 3735 /* unop vA, vB */ 3736 mov r3, rINST, lsr #12 @ r3<- B 3737 ubfx r9, rINST, #8, #4 @ r9<- A 3738 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3739 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3740 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3741 @ optional op; may set condition codes 3742 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3743 GET_INST_OPCODE(ip) @ extract opcode from rINST 3744 SET_VREG(r0, r9) @ vA<- r0 3745 GOTO_OPCODE(ip) @ jump to next instruction 3746 /* 9-10 instructions */ 3747 3748 3749/* ------------------------------ */ 3750 .balign 64 3751.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3752/* File: armv6t2/OP_LONG_TO_DOUBLE.S */ 3753/* File: armv6t2/unopWide.S */ 3754 /* 3755 * Generic 64-bit unary operation. Provide an "instr" line that 3756 * specifies an instruction that performs "result = op r0/r1". 3757 * This could be an ARM instruction or a function call. 3758 * 3759 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3760 */ 3761 /* unop vA, vB */ 3762 mov r3, rINST, lsr #12 @ r3<- B 3763 ubfx r9, rINST, #8, #4 @ r9<- A 3764 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3765 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3766 ldmia r3, {r0-r1} @ r0/r1<- vAA 3767 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3768 @ optional op; may set condition codes 3769 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3770 GET_INST_OPCODE(ip) @ extract opcode from rINST 3771 stmia r9, {r0-r1} @ vAA<- r0/r1 3772 GOTO_OPCODE(ip) @ jump to next instruction 3773 /* 10-11 instructions */ 3774 3775 3776/* ------------------------------ */ 3777 .balign 64 3778.L_OP_FLOAT_TO_INT: /* 0x87 */ 3779/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3780/* File: arm-vfp/funop.S */ 3781 /* 3782 * Generic 32-bit unary floating-point operation. Provide an "instr" 3783 * line that specifies an instruction that performs "s1 = op s0". 3784 * 3785 * for: int-to-float, float-to-int 3786 */ 3787 /* unop vA, vB */ 3788 mov r3, rINST, lsr #12 @ r3<- B 3789 mov r9, rINST, lsr #8 @ r9<- A+ 3790 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3791 flds s0, [r3] @ s0<- vB 3792 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3793 and r9, r9, #15 @ r9<- A 3794 ftosizs s1, s0 @ s1<- op 3795 GET_INST_OPCODE(ip) @ extract opcode from rINST 3796 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3797 fsts s1, [r9] @ vA<- s1 3798 GOTO_OPCODE(ip) @ jump to next instruction 3799 3800 3801/* ------------------------------ */ 3802 .balign 64 3803.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3804/* File: armv6t2/OP_FLOAT_TO_LONG.S */ 3805@include "armv6t2/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3806/* File: armv6t2/unopWider.S */ 3807 /* 3808 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3809 * that specifies an instruction that performs "result = op r0", where 3810 * "result" is a 64-bit quantity in r0/r1. 3811 * 3812 * For: int-to-long, int-to-double, float-to-long, float-to-double 3813 */ 3814 /* unop vA, vB */ 3815 mov r3, rINST, lsr #12 @ r3<- B 3816 ubfx r9, rINST, #8, #4 @ r9<- A 3817 GET_VREG(r0, r3) @ r0<- vB 3818 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3819 @ optional op; may set condition codes 3820 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3821 bl f2l_doconv @ r0<- op, r0-r3 changed 3822 GET_INST_OPCODE(ip) @ extract opcode from rINST 3823 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3824 GOTO_OPCODE(ip) @ jump to next instruction 3825 /* 9-10 instructions */ 3826 3827 3828 3829/* ------------------------------ */ 3830 .balign 64 3831.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3832/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3833/* File: arm-vfp/funopWider.S */ 3834 /* 3835 * Generic 32bit-to-64bit floating point unary operation. Provide an 3836 * "instr" line that specifies an instruction that performs "d0 = op s0". 3837 * 3838 * For: int-to-double, float-to-double 3839 */ 3840 /* unop vA, vB */ 3841 mov r3, rINST, lsr #12 @ r3<- B 3842 mov r9, rINST, lsr #8 @ r9<- A+ 3843 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3844 flds s0, [r3] @ s0<- vB 3845 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3846 and r9, r9, #15 @ r9<- A 3847 fcvtds d0, s0 @ d0<- op 3848 GET_INST_OPCODE(ip) @ extract opcode from rINST 3849 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3850 fstd d0, [r9] @ vA<- d0 3851 GOTO_OPCODE(ip) @ jump to next instruction 3852 3853 3854/* ------------------------------ */ 3855 .balign 64 3856.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3857/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3858/* File: arm-vfp/funopNarrower.S */ 3859 /* 3860 * Generic 64bit-to-32bit unary floating point operation. Provide an 3861 * "instr" line that specifies an instruction that performs "s0 = op d0". 3862 * 3863 * For: double-to-int, double-to-float 3864 */ 3865 /* unop vA, vB */ 3866 mov r3, rINST, lsr #12 @ r3<- B 3867 mov r9, rINST, lsr #8 @ r9<- A+ 3868 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3869 fldd d0, [r3] @ d0<- vB 3870 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3871 and r9, r9, #15 @ r9<- A 3872 ftosizd s0, d0 @ s0<- op 3873 GET_INST_OPCODE(ip) @ extract opcode from rINST 3874 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3875 fsts s0, [r9] @ vA<- s0 3876 GOTO_OPCODE(ip) @ jump to next instruction 3877 3878 3879/* ------------------------------ */ 3880 .balign 64 3881.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3882/* File: armv6t2/OP_DOUBLE_TO_LONG.S */ 3883@include "armv6t2/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3884/* File: armv6t2/unopWide.S */ 3885 /* 3886 * Generic 64-bit unary operation. Provide an "instr" line that 3887 * specifies an instruction that performs "result = op r0/r1". 3888 * This could be an ARM instruction or a function call. 3889 * 3890 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3891 */ 3892 /* unop vA, vB */ 3893 mov r3, rINST, lsr #12 @ r3<- B 3894 ubfx r9, rINST, #8, #4 @ r9<- A 3895 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3896 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3897 ldmia r3, {r0-r1} @ r0/r1<- vAA 3898 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3899 @ optional op; may set condition codes 3900 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3901 GET_INST_OPCODE(ip) @ extract opcode from rINST 3902 stmia r9, {r0-r1} @ vAA<- r0/r1 3903 GOTO_OPCODE(ip) @ jump to next instruction 3904 /* 10-11 instructions */ 3905 3906 3907 3908/* ------------------------------ */ 3909 .balign 64 3910.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3911/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3912/* File: arm-vfp/funopNarrower.S */ 3913 /* 3914 * Generic 64bit-to-32bit unary floating point operation. Provide an 3915 * "instr" line that specifies an instruction that performs "s0 = op d0". 3916 * 3917 * For: double-to-int, double-to-float 3918 */ 3919 /* unop vA, vB */ 3920 mov r3, rINST, lsr #12 @ r3<- B 3921 mov r9, rINST, lsr #8 @ r9<- A+ 3922 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3923 fldd d0, [r3] @ d0<- vB 3924 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3925 and r9, r9, #15 @ r9<- A 3926 fcvtsd s0, d0 @ s0<- op 3927 GET_INST_OPCODE(ip) @ extract opcode from rINST 3928 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3929 fsts s0, [r9] @ vA<- s0 3930 GOTO_OPCODE(ip) @ jump to next instruction 3931 3932 3933/* ------------------------------ */ 3934 .balign 64 3935.L_OP_INT_TO_BYTE: /* 0x8d */ 3936/* File: armv6t2/OP_INT_TO_BYTE.S */ 3937/* File: armv6t2/unop.S */ 3938 /* 3939 * Generic 32-bit unary operation. Provide an "instr" line that 3940 * specifies an instruction that performs "result = op r0". 3941 * This could be an ARM instruction or a function call. 3942 * 3943 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3944 * int-to-byte, int-to-char, int-to-short 3945 */ 3946 /* unop vA, vB */ 3947 mov r3, rINST, lsr #12 @ r3<- B 3948 ubfx r9, rINST, #8, #4 @ r9<- A 3949 GET_VREG(r0, r3) @ r0<- vB 3950 @ optional op; may set condition codes 3951 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3952 sxtb r0, r0 @ r0<- op, r0-r3 changed 3953 GET_INST_OPCODE(ip) @ extract opcode from rINST 3954 SET_VREG(r0, r9) @ vAA<- r0 3955 GOTO_OPCODE(ip) @ jump to next instruction 3956 /* 8-9 instructions */ 3957 3958 3959/* ------------------------------ */ 3960 .balign 64 3961.L_OP_INT_TO_CHAR: /* 0x8e */ 3962/* File: armv6t2/OP_INT_TO_CHAR.S */ 3963/* File: armv6t2/unop.S */ 3964 /* 3965 * Generic 32-bit unary operation. Provide an "instr" line that 3966 * specifies an instruction that performs "result = op r0". 3967 * This could be an ARM instruction or a function call. 3968 * 3969 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3970 * int-to-byte, int-to-char, int-to-short 3971 */ 3972 /* unop vA, vB */ 3973 mov r3, rINST, lsr #12 @ r3<- B 3974 ubfx r9, rINST, #8, #4 @ r9<- A 3975 GET_VREG(r0, r3) @ r0<- vB 3976 @ optional op; may set condition codes 3977 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3978 uxth r0, r0 @ r0<- op, r0-r3 changed 3979 GET_INST_OPCODE(ip) @ extract opcode from rINST 3980 SET_VREG(r0, r9) @ vAA<- r0 3981 GOTO_OPCODE(ip) @ jump to next instruction 3982 /* 8-9 instructions */ 3983 3984 3985/* ------------------------------ */ 3986 .balign 64 3987.L_OP_INT_TO_SHORT: /* 0x8f */ 3988/* File: armv6t2/OP_INT_TO_SHORT.S */ 3989/* File: armv6t2/unop.S */ 3990 /* 3991 * Generic 32-bit unary operation. Provide an "instr" line that 3992 * specifies an instruction that performs "result = op r0". 3993 * This could be an ARM instruction or a function call. 3994 * 3995 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3996 * int-to-byte, int-to-char, int-to-short 3997 */ 3998 /* unop vA, vB */ 3999 mov r3, rINST, lsr #12 @ r3<- B 4000 ubfx r9, rINST, #8, #4 @ r9<- A 4001 GET_VREG(r0, r3) @ r0<- vB 4002 @ optional op; may set condition codes 4003 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4004 sxth r0, r0 @ r0<- op, r0-r3 changed 4005 GET_INST_OPCODE(ip) @ extract opcode from rINST 4006 SET_VREG(r0, r9) @ vAA<- r0 4007 GOTO_OPCODE(ip) @ jump to next instruction 4008 /* 8-9 instructions */ 4009 4010 4011/* ------------------------------ */ 4012 .balign 64 4013.L_OP_ADD_INT: /* 0x90 */ 4014/* File: armv5te/OP_ADD_INT.S */ 4015/* File: armv5te/binop.S */ 4016 /* 4017 * Generic 32-bit binary operation. Provide an "instr" line that 4018 * specifies an instruction that performs "result = r0 op r1". 4019 * This could be an ARM instruction or a function call. (If the result 4020 * comes back in a register other than r0, you can override "result".) 4021 * 4022 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4023 * vCC (r1). Useful for integer division and modulus. Note that we 4024 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4025 * handles it correctly. 4026 * 4027 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4028 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4029 * mul-float, div-float, rem-float 4030 */ 4031 /* binop vAA, vBB, vCC */ 4032 FETCH(r0, 1) @ r0<- CCBB 4033 mov r9, rINST, lsr #8 @ r9<- AA 4034 mov r3, r0, lsr #8 @ r3<- CC 4035 and r2, r0, #255 @ r2<- BB 4036 GET_VREG(r1, r3) @ r1<- vCC 4037 GET_VREG(r0, r2) @ r0<- vBB 4038 .if 0 4039 cmp r1, #0 @ is second operand zero? 4040 beq common_errDivideByZero 4041 .endif 4042 4043 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4044 @ optional op; may set condition codes 4045 add r0, r0, r1 @ r0<- op, r0-r3 changed 4046 GET_INST_OPCODE(ip) @ extract opcode from rINST 4047 SET_VREG(r0, r9) @ vAA<- r0 4048 GOTO_OPCODE(ip) @ jump to next instruction 4049 /* 11-14 instructions */ 4050 4051 4052/* ------------------------------ */ 4053 .balign 64 4054.L_OP_SUB_INT: /* 0x91 */ 4055/* File: armv5te/OP_SUB_INT.S */ 4056/* File: armv5te/binop.S */ 4057 /* 4058 * Generic 32-bit binary operation. Provide an "instr" line that 4059 * specifies an instruction that performs "result = r0 op r1". 4060 * This could be an ARM instruction or a function call. (If the result 4061 * comes back in a register other than r0, you can override "result".) 4062 * 4063 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4064 * vCC (r1). Useful for integer division and modulus. Note that we 4065 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4066 * handles it correctly. 4067 * 4068 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4069 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4070 * mul-float, div-float, rem-float 4071 */ 4072 /* binop vAA, vBB, vCC */ 4073 FETCH(r0, 1) @ r0<- CCBB 4074 mov r9, rINST, lsr #8 @ r9<- AA 4075 mov r3, r0, lsr #8 @ r3<- CC 4076 and r2, r0, #255 @ r2<- BB 4077 GET_VREG(r1, r3) @ r1<- vCC 4078 GET_VREG(r0, r2) @ r0<- vBB 4079 .if 0 4080 cmp r1, #0 @ is second operand zero? 4081 beq common_errDivideByZero 4082 .endif 4083 4084 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4085 @ optional op; may set condition codes 4086 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4087 GET_INST_OPCODE(ip) @ extract opcode from rINST 4088 SET_VREG(r0, r9) @ vAA<- r0 4089 GOTO_OPCODE(ip) @ jump to next instruction 4090 /* 11-14 instructions */ 4091 4092 4093/* ------------------------------ */ 4094 .balign 64 4095.L_OP_MUL_INT: /* 0x92 */ 4096/* File: armv5te/OP_MUL_INT.S */ 4097/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4098/* File: armv5te/binop.S */ 4099 /* 4100 * Generic 32-bit binary operation. Provide an "instr" line that 4101 * specifies an instruction that performs "result = r0 op r1". 4102 * This could be an ARM instruction or a function call. (If the result 4103 * comes back in a register other than r0, you can override "result".) 4104 * 4105 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4106 * vCC (r1). Useful for integer division and modulus. Note that we 4107 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4108 * handles it correctly. 4109 * 4110 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4111 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4112 * mul-float, div-float, rem-float 4113 */ 4114 /* binop vAA, vBB, vCC */ 4115 FETCH(r0, 1) @ r0<- CCBB 4116 mov r9, rINST, lsr #8 @ r9<- AA 4117 mov r3, r0, lsr #8 @ r3<- CC 4118 and r2, r0, #255 @ r2<- BB 4119 GET_VREG(r1, r3) @ r1<- vCC 4120 GET_VREG(r0, r2) @ r0<- vBB 4121 .if 0 4122 cmp r1, #0 @ is second operand zero? 4123 beq common_errDivideByZero 4124 .endif 4125 4126 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4127 @ optional op; may set condition codes 4128 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4129 GET_INST_OPCODE(ip) @ extract opcode from rINST 4130 SET_VREG(r0, r9) @ vAA<- r0 4131 GOTO_OPCODE(ip) @ jump to next instruction 4132 /* 11-14 instructions */ 4133 4134 4135/* ------------------------------ */ 4136 .balign 64 4137.L_OP_DIV_INT: /* 0x93 */ 4138/* File: armv5te/OP_DIV_INT.S */ 4139/* File: armv5te/binop.S */ 4140 /* 4141 * Generic 32-bit binary operation. Provide an "instr" line that 4142 * specifies an instruction that performs "result = r0 op r1". 4143 * This could be an ARM instruction or a function call. (If the result 4144 * comes back in a register other than r0, you can override "result".) 4145 * 4146 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4147 * vCC (r1). Useful for integer division and modulus. Note that we 4148 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4149 * handles it correctly. 4150 * 4151 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4152 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4153 * mul-float, div-float, rem-float 4154 */ 4155 /* binop vAA, vBB, vCC */ 4156 FETCH(r0, 1) @ r0<- CCBB 4157 mov r9, rINST, lsr #8 @ r9<- AA 4158 mov r3, r0, lsr #8 @ r3<- CC 4159 and r2, r0, #255 @ r2<- BB 4160 GET_VREG(r1, r3) @ r1<- vCC 4161 GET_VREG(r0, r2) @ r0<- vBB 4162 .if 1 4163 cmp r1, #0 @ is second operand zero? 4164 beq common_errDivideByZero 4165 .endif 4166 4167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4168 @ optional op; may set condition codes 4169 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4170 GET_INST_OPCODE(ip) @ extract opcode from rINST 4171 SET_VREG(r0, r9) @ vAA<- r0 4172 GOTO_OPCODE(ip) @ jump to next instruction 4173 /* 11-14 instructions */ 4174 4175 4176/* ------------------------------ */ 4177 .balign 64 4178.L_OP_REM_INT: /* 0x94 */ 4179/* File: armv5te/OP_REM_INT.S */ 4180/* idivmod returns quotient in r0 and remainder in r1 */ 4181/* File: armv5te/binop.S */ 4182 /* 4183 * Generic 32-bit binary operation. Provide an "instr" line that 4184 * specifies an instruction that performs "result = r0 op r1". 4185 * This could be an ARM instruction or a function call. (If the result 4186 * comes back in a register other than r0, you can override "result".) 4187 * 4188 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4189 * vCC (r1). Useful for integer division and modulus. Note that we 4190 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4191 * handles it correctly. 4192 * 4193 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4194 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4195 * mul-float, div-float, rem-float 4196 */ 4197 /* binop vAA, vBB, vCC */ 4198 FETCH(r0, 1) @ r0<- CCBB 4199 mov r9, rINST, lsr #8 @ r9<- AA 4200 mov r3, r0, lsr #8 @ r3<- CC 4201 and r2, r0, #255 @ r2<- BB 4202 GET_VREG(r1, r3) @ r1<- vCC 4203 GET_VREG(r0, r2) @ r0<- vBB 4204 .if 1 4205 cmp r1, #0 @ is second operand zero? 4206 beq common_errDivideByZero 4207 .endif 4208 4209 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4210 @ optional op; may set condition codes 4211 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4212 GET_INST_OPCODE(ip) @ extract opcode from rINST 4213 SET_VREG(r1, r9) @ vAA<- r1 4214 GOTO_OPCODE(ip) @ jump to next instruction 4215 /* 11-14 instructions */ 4216 4217 4218/* ------------------------------ */ 4219 .balign 64 4220.L_OP_AND_INT: /* 0x95 */ 4221/* File: armv5te/OP_AND_INT.S */ 4222/* File: armv5te/binop.S */ 4223 /* 4224 * Generic 32-bit binary operation. Provide an "instr" line that 4225 * specifies an instruction that performs "result = r0 op r1". 4226 * This could be an ARM instruction or a function call. (If the result 4227 * comes back in a register other than r0, you can override "result".) 4228 * 4229 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4230 * vCC (r1). Useful for integer division and modulus. Note that we 4231 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4232 * handles it correctly. 4233 * 4234 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4235 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4236 * mul-float, div-float, rem-float 4237 */ 4238 /* binop vAA, vBB, vCC */ 4239 FETCH(r0, 1) @ r0<- CCBB 4240 mov r9, rINST, lsr #8 @ r9<- AA 4241 mov r3, r0, lsr #8 @ r3<- CC 4242 and r2, r0, #255 @ r2<- BB 4243 GET_VREG(r1, r3) @ r1<- vCC 4244 GET_VREG(r0, r2) @ r0<- vBB 4245 .if 0 4246 cmp r1, #0 @ is second operand zero? 4247 beq common_errDivideByZero 4248 .endif 4249 4250 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4251 @ optional op; may set condition codes 4252 and r0, r0, r1 @ r0<- op, r0-r3 changed 4253 GET_INST_OPCODE(ip) @ extract opcode from rINST 4254 SET_VREG(r0, r9) @ vAA<- r0 4255 GOTO_OPCODE(ip) @ jump to next instruction 4256 /* 11-14 instructions */ 4257 4258 4259/* ------------------------------ */ 4260 .balign 64 4261.L_OP_OR_INT: /* 0x96 */ 4262/* File: armv5te/OP_OR_INT.S */ 4263/* File: armv5te/binop.S */ 4264 /* 4265 * Generic 32-bit binary operation. Provide an "instr" line that 4266 * specifies an instruction that performs "result = r0 op r1". 4267 * This could be an ARM instruction or a function call. (If the result 4268 * comes back in a register other than r0, you can override "result".) 4269 * 4270 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4271 * vCC (r1). Useful for integer division and modulus. Note that we 4272 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4273 * handles it correctly. 4274 * 4275 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4276 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4277 * mul-float, div-float, rem-float 4278 */ 4279 /* binop vAA, vBB, vCC */ 4280 FETCH(r0, 1) @ r0<- CCBB 4281 mov r9, rINST, lsr #8 @ r9<- AA 4282 mov r3, r0, lsr #8 @ r3<- CC 4283 and r2, r0, #255 @ r2<- BB 4284 GET_VREG(r1, r3) @ r1<- vCC 4285 GET_VREG(r0, r2) @ r0<- vBB 4286 .if 0 4287 cmp r1, #0 @ is second operand zero? 4288 beq common_errDivideByZero 4289 .endif 4290 4291 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4292 @ optional op; may set condition codes 4293 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4294 GET_INST_OPCODE(ip) @ extract opcode from rINST 4295 SET_VREG(r0, r9) @ vAA<- r0 4296 GOTO_OPCODE(ip) @ jump to next instruction 4297 /* 11-14 instructions */ 4298 4299 4300/* ------------------------------ */ 4301 .balign 64 4302.L_OP_XOR_INT: /* 0x97 */ 4303/* File: armv5te/OP_XOR_INT.S */ 4304/* File: armv5te/binop.S */ 4305 /* 4306 * Generic 32-bit binary operation. Provide an "instr" line that 4307 * specifies an instruction that performs "result = r0 op r1". 4308 * This could be an ARM instruction or a function call. (If the result 4309 * comes back in a register other than r0, you can override "result".) 4310 * 4311 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4312 * vCC (r1). Useful for integer division and modulus. Note that we 4313 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4314 * handles it correctly. 4315 * 4316 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4317 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4318 * mul-float, div-float, rem-float 4319 */ 4320 /* binop vAA, vBB, vCC */ 4321 FETCH(r0, 1) @ r0<- CCBB 4322 mov r9, rINST, lsr #8 @ r9<- AA 4323 mov r3, r0, lsr #8 @ r3<- CC 4324 and r2, r0, #255 @ r2<- BB 4325 GET_VREG(r1, r3) @ r1<- vCC 4326 GET_VREG(r0, r2) @ r0<- vBB 4327 .if 0 4328 cmp r1, #0 @ is second operand zero? 4329 beq common_errDivideByZero 4330 .endif 4331 4332 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4333 @ optional op; may set condition codes 4334 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4335 GET_INST_OPCODE(ip) @ extract opcode from rINST 4336 SET_VREG(r0, r9) @ vAA<- r0 4337 GOTO_OPCODE(ip) @ jump to next instruction 4338 /* 11-14 instructions */ 4339 4340 4341/* ------------------------------ */ 4342 .balign 64 4343.L_OP_SHL_INT: /* 0x98 */ 4344/* File: armv5te/OP_SHL_INT.S */ 4345/* File: armv5te/binop.S */ 4346 /* 4347 * Generic 32-bit binary operation. Provide an "instr" line that 4348 * specifies an instruction that performs "result = r0 op r1". 4349 * This could be an ARM instruction or a function call. (If the result 4350 * comes back in a register other than r0, you can override "result".) 4351 * 4352 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4353 * vCC (r1). Useful for integer division and modulus. Note that we 4354 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4355 * handles it correctly. 4356 * 4357 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4358 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4359 * mul-float, div-float, rem-float 4360 */ 4361 /* binop vAA, vBB, vCC */ 4362 FETCH(r0, 1) @ r0<- CCBB 4363 mov r9, rINST, lsr #8 @ r9<- AA 4364 mov r3, r0, lsr #8 @ r3<- CC 4365 and r2, r0, #255 @ r2<- BB 4366 GET_VREG(r1, r3) @ r1<- vCC 4367 GET_VREG(r0, r2) @ r0<- vBB 4368 .if 0 4369 cmp r1, #0 @ is second operand zero? 4370 beq common_errDivideByZero 4371 .endif 4372 4373 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4374 and r1, r1, #31 @ optional op; may set condition codes 4375 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4376 GET_INST_OPCODE(ip) @ extract opcode from rINST 4377 SET_VREG(r0, r9) @ vAA<- r0 4378 GOTO_OPCODE(ip) @ jump to next instruction 4379 /* 11-14 instructions */ 4380 4381 4382/* ------------------------------ */ 4383 .balign 64 4384.L_OP_SHR_INT: /* 0x99 */ 4385/* File: armv5te/OP_SHR_INT.S */ 4386/* File: armv5te/binop.S */ 4387 /* 4388 * Generic 32-bit binary operation. Provide an "instr" line that 4389 * specifies an instruction that performs "result = r0 op r1". 4390 * This could be an ARM instruction or a function call. (If the result 4391 * comes back in a register other than r0, you can override "result".) 4392 * 4393 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4394 * vCC (r1). Useful for integer division and modulus. Note that we 4395 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4396 * handles it correctly. 4397 * 4398 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4399 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4400 * mul-float, div-float, rem-float 4401 */ 4402 /* binop vAA, vBB, vCC */ 4403 FETCH(r0, 1) @ r0<- CCBB 4404 mov r9, rINST, lsr #8 @ r9<- AA 4405 mov r3, r0, lsr #8 @ r3<- CC 4406 and r2, r0, #255 @ r2<- BB 4407 GET_VREG(r1, r3) @ r1<- vCC 4408 GET_VREG(r0, r2) @ r0<- vBB 4409 .if 0 4410 cmp r1, #0 @ is second operand zero? 4411 beq common_errDivideByZero 4412 .endif 4413 4414 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4415 and r1, r1, #31 @ optional op; may set condition codes 4416 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4417 GET_INST_OPCODE(ip) @ extract opcode from rINST 4418 SET_VREG(r0, r9) @ vAA<- r0 4419 GOTO_OPCODE(ip) @ jump to next instruction 4420 /* 11-14 instructions */ 4421 4422 4423/* ------------------------------ */ 4424 .balign 64 4425.L_OP_USHR_INT: /* 0x9a */ 4426/* File: armv5te/OP_USHR_INT.S */ 4427/* File: armv5te/binop.S */ 4428 /* 4429 * Generic 32-bit binary operation. Provide an "instr" line that 4430 * specifies an instruction that performs "result = r0 op r1". 4431 * This could be an ARM instruction or a function call. (If the result 4432 * comes back in a register other than r0, you can override "result".) 4433 * 4434 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4435 * vCC (r1). Useful for integer division and modulus. Note that we 4436 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4437 * handles it correctly. 4438 * 4439 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4440 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4441 * mul-float, div-float, rem-float 4442 */ 4443 /* binop vAA, vBB, vCC */ 4444 FETCH(r0, 1) @ r0<- CCBB 4445 mov r9, rINST, lsr #8 @ r9<- AA 4446 mov r3, r0, lsr #8 @ r3<- CC 4447 and r2, r0, #255 @ r2<- BB 4448 GET_VREG(r1, r3) @ r1<- vCC 4449 GET_VREG(r0, r2) @ r0<- vBB 4450 .if 0 4451 cmp r1, #0 @ is second operand zero? 4452 beq common_errDivideByZero 4453 .endif 4454 4455 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4456 and r1, r1, #31 @ optional op; may set condition codes 4457 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4458 GET_INST_OPCODE(ip) @ extract opcode from rINST 4459 SET_VREG(r0, r9) @ vAA<- r0 4460 GOTO_OPCODE(ip) @ jump to next instruction 4461 /* 11-14 instructions */ 4462 4463 4464/* ------------------------------ */ 4465 .balign 64 4466.L_OP_ADD_LONG: /* 0x9b */ 4467/* File: armv5te/OP_ADD_LONG.S */ 4468/* File: armv5te/binopWide.S */ 4469 /* 4470 * Generic 64-bit binary operation. Provide an "instr" line that 4471 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4472 * This could be an ARM instruction or a function call. (If the result 4473 * comes back in a register other than r0, you can override "result".) 4474 * 4475 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4476 * vCC (r1). Useful for integer division and modulus. 4477 * 4478 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4479 * xor-long, add-double, sub-double, mul-double, div-double, 4480 * rem-double 4481 * 4482 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4483 */ 4484 /* binop vAA, vBB, vCC */ 4485 FETCH(r0, 1) @ r0<- CCBB 4486 mov r9, rINST, lsr #8 @ r9<- AA 4487 and r2, r0, #255 @ r2<- BB 4488 mov r3, r0, lsr #8 @ r3<- CC 4489 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4490 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4491 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4492 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4493 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4494 .if 0 4495 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4496 beq common_errDivideByZero 4497 .endif 4498 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4499 4500 adds r0, r0, r2 @ optional op; may set condition codes 4501 adc r1, r1, r3 @ result<- op, r0-r3 changed 4502 GET_INST_OPCODE(ip) @ extract opcode from rINST 4503 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4504 GOTO_OPCODE(ip) @ jump to next instruction 4505 /* 14-17 instructions */ 4506 4507 4508/* ------------------------------ */ 4509 .balign 64 4510.L_OP_SUB_LONG: /* 0x9c */ 4511/* File: armv5te/OP_SUB_LONG.S */ 4512/* File: armv5te/binopWide.S */ 4513 /* 4514 * Generic 64-bit binary operation. Provide an "instr" line that 4515 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4516 * This could be an ARM instruction or a function call. (If the result 4517 * comes back in a register other than r0, you can override "result".) 4518 * 4519 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4520 * vCC (r1). Useful for integer division and modulus. 4521 * 4522 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4523 * xor-long, add-double, sub-double, mul-double, div-double, 4524 * rem-double 4525 * 4526 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4527 */ 4528 /* binop vAA, vBB, vCC */ 4529 FETCH(r0, 1) @ r0<- CCBB 4530 mov r9, rINST, lsr #8 @ r9<- AA 4531 and r2, r0, #255 @ r2<- BB 4532 mov r3, r0, lsr #8 @ r3<- CC 4533 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4534 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4535 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4536 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4537 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4538 .if 0 4539 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4540 beq common_errDivideByZero 4541 .endif 4542 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4543 4544 subs r0, r0, r2 @ optional op; may set condition codes 4545 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4546 GET_INST_OPCODE(ip) @ extract opcode from rINST 4547 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4548 GOTO_OPCODE(ip) @ jump to next instruction 4549 /* 14-17 instructions */ 4550 4551 4552/* ------------------------------ */ 4553 .balign 64 4554.L_OP_MUL_LONG: /* 0x9d */ 4555/* File: armv5te/OP_MUL_LONG.S */ 4556 /* 4557 * Signed 64-bit integer multiply. 4558 * 4559 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4560 * WX 4561 * x YZ 4562 * -------- 4563 * ZW ZX 4564 * YW YX 4565 * 4566 * The low word of the result holds ZX, the high word holds 4567 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4568 * it doesn't fit in the low 64 bits. 4569 * 4570 * Unlike most ARM math operations, multiply instructions have 4571 * restrictions on using the same register more than once (Rd and Rm 4572 * cannot be the same). 4573 */ 4574 /* mul-long vAA, vBB, vCC */ 4575 FETCH(r0, 1) @ r0<- CCBB 4576 and r2, r0, #255 @ r2<- BB 4577 mov r3, r0, lsr #8 @ r3<- CC 4578 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4579 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4580 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4581 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4582 mul ip, r2, r1 @ ip<- ZxW 4583 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4584 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4585 mov r0, rINST, lsr #8 @ r0<- AA 4586 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4587 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4588 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4589 b .LOP_MUL_LONG_finish 4590 4591/* ------------------------------ */ 4592 .balign 64 4593.L_OP_DIV_LONG: /* 0x9e */ 4594/* File: armv5te/OP_DIV_LONG.S */ 4595/* File: armv5te/binopWide.S */ 4596 /* 4597 * Generic 64-bit binary operation. Provide an "instr" line that 4598 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4599 * This could be an ARM instruction or a function call. (If the result 4600 * comes back in a register other than r0, you can override "result".) 4601 * 4602 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4603 * vCC (r1). Useful for integer division and modulus. 4604 * 4605 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4606 * xor-long, add-double, sub-double, mul-double, div-double, 4607 * rem-double 4608 * 4609 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4610 */ 4611 /* binop vAA, vBB, vCC */ 4612 FETCH(r0, 1) @ r0<- CCBB 4613 mov r9, rINST, lsr #8 @ r9<- AA 4614 and r2, r0, #255 @ r2<- BB 4615 mov r3, r0, lsr #8 @ r3<- CC 4616 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4617 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4618 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4619 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4620 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4621 .if 1 4622 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4623 beq common_errDivideByZero 4624 .endif 4625 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4626 4627 @ optional op; may set condition codes 4628 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4629 GET_INST_OPCODE(ip) @ extract opcode from rINST 4630 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4631 GOTO_OPCODE(ip) @ jump to next instruction 4632 /* 14-17 instructions */ 4633 4634 4635/* ------------------------------ */ 4636 .balign 64 4637.L_OP_REM_LONG: /* 0x9f */ 4638/* File: armv5te/OP_REM_LONG.S */ 4639/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4640/* File: armv5te/binopWide.S */ 4641 /* 4642 * Generic 64-bit binary operation. Provide an "instr" line that 4643 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4644 * This could be an ARM instruction or a function call. (If the result 4645 * comes back in a register other than r0, you can override "result".) 4646 * 4647 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4648 * vCC (r1). Useful for integer division and modulus. 4649 * 4650 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4651 * xor-long, add-double, sub-double, mul-double, div-double, 4652 * rem-double 4653 * 4654 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4655 */ 4656 /* binop vAA, vBB, vCC */ 4657 FETCH(r0, 1) @ r0<- CCBB 4658 mov r9, rINST, lsr #8 @ r9<- AA 4659 and r2, r0, #255 @ r2<- BB 4660 mov r3, r0, lsr #8 @ r3<- CC 4661 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4662 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4663 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4664 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4665 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4666 .if 1 4667 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4668 beq common_errDivideByZero 4669 .endif 4670 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4671 4672 @ optional op; may set condition codes 4673 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4674 GET_INST_OPCODE(ip) @ extract opcode from rINST 4675 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4676 GOTO_OPCODE(ip) @ jump to next instruction 4677 /* 14-17 instructions */ 4678 4679 4680/* ------------------------------ */ 4681 .balign 64 4682.L_OP_AND_LONG: /* 0xa0 */ 4683/* File: armv5te/OP_AND_LONG.S */ 4684/* File: armv5te/binopWide.S */ 4685 /* 4686 * Generic 64-bit binary operation. Provide an "instr" line that 4687 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4688 * This could be an ARM instruction or a function call. (If the result 4689 * comes back in a register other than r0, you can override "result".) 4690 * 4691 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4692 * vCC (r1). Useful for integer division and modulus. 4693 * 4694 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4695 * xor-long, add-double, sub-double, mul-double, div-double, 4696 * rem-double 4697 * 4698 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4699 */ 4700 /* binop vAA, vBB, vCC */ 4701 FETCH(r0, 1) @ r0<- CCBB 4702 mov r9, rINST, lsr #8 @ r9<- AA 4703 and r2, r0, #255 @ r2<- BB 4704 mov r3, r0, lsr #8 @ r3<- CC 4705 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4706 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4707 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4708 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4709 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4710 .if 0 4711 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4712 beq common_errDivideByZero 4713 .endif 4714 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4715 4716 and r0, r0, r2 @ optional op; may set condition codes 4717 and r1, r1, r3 @ result<- op, r0-r3 changed 4718 GET_INST_OPCODE(ip) @ extract opcode from rINST 4719 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4720 GOTO_OPCODE(ip) @ jump to next instruction 4721 /* 14-17 instructions */ 4722 4723 4724/* ------------------------------ */ 4725 .balign 64 4726.L_OP_OR_LONG: /* 0xa1 */ 4727/* File: armv5te/OP_OR_LONG.S */ 4728/* File: armv5te/binopWide.S */ 4729 /* 4730 * Generic 64-bit binary operation. Provide an "instr" line that 4731 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4732 * This could be an ARM instruction or a function call. (If the result 4733 * comes back in a register other than r0, you can override "result".) 4734 * 4735 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4736 * vCC (r1). Useful for integer division and modulus. 4737 * 4738 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4739 * xor-long, add-double, sub-double, mul-double, div-double, 4740 * rem-double 4741 * 4742 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4743 */ 4744 /* binop vAA, vBB, vCC */ 4745 FETCH(r0, 1) @ r0<- CCBB 4746 mov r9, rINST, lsr #8 @ r9<- AA 4747 and r2, r0, #255 @ r2<- BB 4748 mov r3, r0, lsr #8 @ r3<- CC 4749 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4750 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4751 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4752 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4753 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4754 .if 0 4755 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4756 beq common_errDivideByZero 4757 .endif 4758 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4759 4760 orr r0, r0, r2 @ optional op; may set condition codes 4761 orr r1, r1, r3 @ result<- op, r0-r3 changed 4762 GET_INST_OPCODE(ip) @ extract opcode from rINST 4763 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4764 GOTO_OPCODE(ip) @ jump to next instruction 4765 /* 14-17 instructions */ 4766 4767 4768/* ------------------------------ */ 4769 .balign 64 4770.L_OP_XOR_LONG: /* 0xa2 */ 4771/* File: armv5te/OP_XOR_LONG.S */ 4772/* File: armv5te/binopWide.S */ 4773 /* 4774 * Generic 64-bit binary operation. Provide an "instr" line that 4775 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4776 * This could be an ARM instruction or a function call. (If the result 4777 * comes back in a register other than r0, you can override "result".) 4778 * 4779 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4780 * vCC (r1). Useful for integer division and modulus. 4781 * 4782 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4783 * xor-long, add-double, sub-double, mul-double, div-double, 4784 * rem-double 4785 * 4786 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4787 */ 4788 /* binop vAA, vBB, vCC */ 4789 FETCH(r0, 1) @ r0<- CCBB 4790 mov r9, rINST, lsr #8 @ r9<- AA 4791 and r2, r0, #255 @ r2<- BB 4792 mov r3, r0, lsr #8 @ r3<- CC 4793 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4794 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4795 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4796 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4797 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4798 .if 0 4799 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4800 beq common_errDivideByZero 4801 .endif 4802 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4803 4804 eor r0, r0, r2 @ optional op; may set condition codes 4805 eor r1, r1, r3 @ result<- op, r0-r3 changed 4806 GET_INST_OPCODE(ip) @ extract opcode from rINST 4807 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4808 GOTO_OPCODE(ip) @ jump to next instruction 4809 /* 14-17 instructions */ 4810 4811 4812/* ------------------------------ */ 4813 .balign 64 4814.L_OP_SHL_LONG: /* 0xa3 */ 4815/* File: armv5te/OP_SHL_LONG.S */ 4816 /* 4817 * Long integer shift. This is different from the generic 32/64-bit 4818 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4819 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4820 * 6 bits of the shift distance. 4821 */ 4822 /* shl-long vAA, vBB, vCC */ 4823 FETCH(r0, 1) @ r0<- CCBB 4824 mov r9, rINST, lsr #8 @ r9<- AA 4825 and r3, r0, #255 @ r3<- BB 4826 mov r0, r0, lsr #8 @ r0<- CC 4827 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4828 GET_VREG(r2, r0) @ r2<- vCC 4829 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4830 and r2, r2, #63 @ r2<- r2 & 0x3f 4831 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4832 4833 mov r1, r1, asl r2 @ r1<- r1 << r2 4834 rsb r3, r2, #32 @ r3<- 32 - r2 4835 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4836 subs ip, r2, #32 @ ip<- r2 - 32 4837 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4838 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4839 b .LOP_SHL_LONG_finish 4840 4841/* ------------------------------ */ 4842 .balign 64 4843.L_OP_SHR_LONG: /* 0xa4 */ 4844/* File: armv5te/OP_SHR_LONG.S */ 4845 /* 4846 * Long integer shift. This is different from the generic 32/64-bit 4847 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4848 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4849 * 6 bits of the shift distance. 4850 */ 4851 /* shr-long vAA, vBB, vCC */ 4852 FETCH(r0, 1) @ r0<- CCBB 4853 mov r9, rINST, lsr #8 @ r9<- AA 4854 and r3, r0, #255 @ r3<- BB 4855 mov r0, r0, lsr #8 @ r0<- CC 4856 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4857 GET_VREG(r2, r0) @ r2<- vCC 4858 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4859 and r2, r2, #63 @ r0<- r0 & 0x3f 4860 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4861 4862 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4863 rsb r3, r2, #32 @ r3<- 32 - r2 4864 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4865 subs ip, r2, #32 @ ip<- r2 - 32 4866 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4867 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4868 b .LOP_SHR_LONG_finish 4869 4870/* ------------------------------ */ 4871 .balign 64 4872.L_OP_USHR_LONG: /* 0xa5 */ 4873/* File: armv5te/OP_USHR_LONG.S */ 4874 /* 4875 * Long integer shift. This is different from the generic 32/64-bit 4876 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4877 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4878 * 6 bits of the shift distance. 4879 */ 4880 /* ushr-long vAA, vBB, vCC */ 4881 FETCH(r0, 1) @ r0<- CCBB 4882 mov r9, rINST, lsr #8 @ r9<- AA 4883 and r3, r0, #255 @ r3<- BB 4884 mov r0, r0, lsr #8 @ r0<- CC 4885 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4886 GET_VREG(r2, r0) @ r2<- vCC 4887 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4888 and r2, r2, #63 @ r0<- r0 & 0x3f 4889 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4890 4891 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4892 rsb r3, r2, #32 @ r3<- 32 - r2 4893 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4894 subs ip, r2, #32 @ ip<- r2 - 32 4895 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4896 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4897 b .LOP_USHR_LONG_finish 4898 4899/* ------------------------------ */ 4900 .balign 64 4901.L_OP_ADD_FLOAT: /* 0xa6 */ 4902/* File: arm-vfp/OP_ADD_FLOAT.S */ 4903/* File: arm-vfp/fbinop.S */ 4904 /* 4905 * Generic 32-bit floating-point operation. Provide an "instr" line that 4906 * specifies an instruction that performs "s2 = s0 op s1". Because we 4907 * use the "softfp" ABI, this must be an instruction, not a function call. 4908 * 4909 * For: add-float, sub-float, mul-float, div-float 4910 */ 4911 /* floatop vAA, vBB, vCC */ 4912 FETCH(r0, 1) @ r0<- CCBB 4913 mov r9, rINST, lsr #8 @ r9<- AA 4914 mov r3, r0, lsr #8 @ r3<- CC 4915 and r2, r0, #255 @ r2<- BB 4916 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4917 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4918 flds s1, [r3] @ s1<- vCC 4919 flds s0, [r2] @ s0<- vBB 4920 4921 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4922 fadds s2, s0, s1 @ s2<- op 4923 GET_INST_OPCODE(ip) @ extract opcode from rINST 4924 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4925 fsts s2, [r9] @ vAA<- s2 4926 GOTO_OPCODE(ip) @ jump to next instruction 4927 4928 4929/* ------------------------------ */ 4930 .balign 64 4931.L_OP_SUB_FLOAT: /* 0xa7 */ 4932/* File: arm-vfp/OP_SUB_FLOAT.S */ 4933/* File: arm-vfp/fbinop.S */ 4934 /* 4935 * Generic 32-bit floating-point operation. Provide an "instr" line that 4936 * specifies an instruction that performs "s2 = s0 op s1". Because we 4937 * use the "softfp" ABI, this must be an instruction, not a function call. 4938 * 4939 * For: add-float, sub-float, mul-float, div-float 4940 */ 4941 /* floatop vAA, vBB, vCC */ 4942 FETCH(r0, 1) @ r0<- CCBB 4943 mov r9, rINST, lsr #8 @ r9<- AA 4944 mov r3, r0, lsr #8 @ r3<- CC 4945 and r2, r0, #255 @ r2<- BB 4946 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4947 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4948 flds s1, [r3] @ s1<- vCC 4949 flds s0, [r2] @ s0<- vBB 4950 4951 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4952 fsubs s2, s0, s1 @ s2<- op 4953 GET_INST_OPCODE(ip) @ extract opcode from rINST 4954 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4955 fsts s2, [r9] @ vAA<- s2 4956 GOTO_OPCODE(ip) @ jump to next instruction 4957 4958 4959/* ------------------------------ */ 4960 .balign 64 4961.L_OP_MUL_FLOAT: /* 0xa8 */ 4962/* File: arm-vfp/OP_MUL_FLOAT.S */ 4963/* File: arm-vfp/fbinop.S */ 4964 /* 4965 * Generic 32-bit floating-point operation. Provide an "instr" line that 4966 * specifies an instruction that performs "s2 = s0 op s1". Because we 4967 * use the "softfp" ABI, this must be an instruction, not a function call. 4968 * 4969 * For: add-float, sub-float, mul-float, div-float 4970 */ 4971 /* floatop vAA, vBB, vCC */ 4972 FETCH(r0, 1) @ r0<- CCBB 4973 mov r9, rINST, lsr #8 @ r9<- AA 4974 mov r3, r0, lsr #8 @ r3<- CC 4975 and r2, r0, #255 @ r2<- BB 4976 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4977 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4978 flds s1, [r3] @ s1<- vCC 4979 flds s0, [r2] @ s0<- vBB 4980 4981 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4982 fmuls s2, s0, s1 @ s2<- op 4983 GET_INST_OPCODE(ip) @ extract opcode from rINST 4984 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4985 fsts s2, [r9] @ vAA<- s2 4986 GOTO_OPCODE(ip) @ jump to next instruction 4987 4988 4989/* ------------------------------ */ 4990 .balign 64 4991.L_OP_DIV_FLOAT: /* 0xa9 */ 4992/* File: arm-vfp/OP_DIV_FLOAT.S */ 4993/* File: arm-vfp/fbinop.S */ 4994 /* 4995 * Generic 32-bit floating-point operation. Provide an "instr" line that 4996 * specifies an instruction that performs "s2 = s0 op s1". Because we 4997 * use the "softfp" ABI, this must be an instruction, not a function call. 4998 * 4999 * For: add-float, sub-float, mul-float, div-float 5000 */ 5001 /* floatop vAA, vBB, vCC */ 5002 FETCH(r0, 1) @ r0<- CCBB 5003 mov r9, rINST, lsr #8 @ r9<- AA 5004 mov r3, r0, lsr #8 @ r3<- CC 5005 and r2, r0, #255 @ r2<- BB 5006 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5007 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5008 flds s1, [r3] @ s1<- vCC 5009 flds s0, [r2] @ s0<- vBB 5010 5011 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5012 fdivs s2, s0, s1 @ s2<- op 5013 GET_INST_OPCODE(ip) @ extract opcode from rINST 5014 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5015 fsts s2, [r9] @ vAA<- s2 5016 GOTO_OPCODE(ip) @ jump to next instruction 5017 5018 5019/* ------------------------------ */ 5020 .balign 64 5021.L_OP_REM_FLOAT: /* 0xaa */ 5022/* File: armv5te/OP_REM_FLOAT.S */ 5023/* EABI doesn't define a float remainder function, but libm does */ 5024/* File: armv5te/binop.S */ 5025 /* 5026 * Generic 32-bit binary operation. Provide an "instr" line that 5027 * specifies an instruction that performs "result = r0 op r1". 5028 * This could be an ARM instruction or a function call. (If the result 5029 * comes back in a register other than r0, you can override "result".) 5030 * 5031 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5032 * vCC (r1). Useful for integer division and modulus. Note that we 5033 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5034 * handles it correctly. 5035 * 5036 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5037 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5038 * mul-float, div-float, rem-float 5039 */ 5040 /* binop vAA, vBB, vCC */ 5041 FETCH(r0, 1) @ r0<- CCBB 5042 mov r9, rINST, lsr #8 @ r9<- AA 5043 mov r3, r0, lsr #8 @ r3<- CC 5044 and r2, r0, #255 @ r2<- BB 5045 GET_VREG(r1, r3) @ r1<- vCC 5046 GET_VREG(r0, r2) @ r0<- vBB 5047 .if 0 5048 cmp r1, #0 @ is second operand zero? 5049 beq common_errDivideByZero 5050 .endif 5051 5052 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5053 @ optional op; may set condition codes 5054 bl fmodf @ r0<- op, r0-r3 changed 5055 GET_INST_OPCODE(ip) @ extract opcode from rINST 5056 SET_VREG(r0, r9) @ vAA<- r0 5057 GOTO_OPCODE(ip) @ jump to next instruction 5058 /* 11-14 instructions */ 5059 5060 5061/* ------------------------------ */ 5062 .balign 64 5063.L_OP_ADD_DOUBLE: /* 0xab */ 5064/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5065/* File: arm-vfp/fbinopWide.S */ 5066 /* 5067 * Generic 64-bit double-precision floating point binary operation. 5068 * Provide an "instr" line that specifies an instruction that performs 5069 * "d2 = d0 op d1". 5070 * 5071 * for: add-double, sub-double, mul-double, div-double 5072 */ 5073 /* doubleop vAA, vBB, vCC */ 5074 FETCH(r0, 1) @ r0<- CCBB 5075 mov r9, rINST, lsr #8 @ r9<- AA 5076 mov r3, r0, lsr #8 @ r3<- CC 5077 and r2, r0, #255 @ r2<- BB 5078 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5079 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5080 fldd d1, [r3] @ d1<- vCC 5081 fldd d0, [r2] @ d0<- vBB 5082 5083 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5084 faddd d2, d0, d1 @ s2<- op 5085 GET_INST_OPCODE(ip) @ extract opcode from rINST 5086 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5087 fstd d2, [r9] @ vAA<- d2 5088 GOTO_OPCODE(ip) @ jump to next instruction 5089 5090 5091/* ------------------------------ */ 5092 .balign 64 5093.L_OP_SUB_DOUBLE: /* 0xac */ 5094/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5095/* File: arm-vfp/fbinopWide.S */ 5096 /* 5097 * Generic 64-bit double-precision floating point binary operation. 5098 * Provide an "instr" line that specifies an instruction that performs 5099 * "d2 = d0 op d1". 5100 * 5101 * for: add-double, sub-double, mul-double, div-double 5102 */ 5103 /* doubleop vAA, vBB, vCC */ 5104 FETCH(r0, 1) @ r0<- CCBB 5105 mov r9, rINST, lsr #8 @ r9<- AA 5106 mov r3, r0, lsr #8 @ r3<- CC 5107 and r2, r0, #255 @ r2<- BB 5108 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5109 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5110 fldd d1, [r3] @ d1<- vCC 5111 fldd d0, [r2] @ d0<- vBB 5112 5113 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5114 fsubd d2, d0, d1 @ s2<- op 5115 GET_INST_OPCODE(ip) @ extract opcode from rINST 5116 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5117 fstd d2, [r9] @ vAA<- d2 5118 GOTO_OPCODE(ip) @ jump to next instruction 5119 5120 5121/* ------------------------------ */ 5122 .balign 64 5123.L_OP_MUL_DOUBLE: /* 0xad */ 5124/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5125/* File: arm-vfp/fbinopWide.S */ 5126 /* 5127 * Generic 64-bit double-precision floating point binary operation. 5128 * Provide an "instr" line that specifies an instruction that performs 5129 * "d2 = d0 op d1". 5130 * 5131 * for: add-double, sub-double, mul-double, div-double 5132 */ 5133 /* doubleop vAA, vBB, vCC */ 5134 FETCH(r0, 1) @ r0<- CCBB 5135 mov r9, rINST, lsr #8 @ r9<- AA 5136 mov r3, r0, lsr #8 @ r3<- CC 5137 and r2, r0, #255 @ r2<- BB 5138 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5139 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5140 fldd d1, [r3] @ d1<- vCC 5141 fldd d0, [r2] @ d0<- vBB 5142 5143 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5144 fmuld d2, d0, d1 @ s2<- op 5145 GET_INST_OPCODE(ip) @ extract opcode from rINST 5146 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5147 fstd d2, [r9] @ vAA<- d2 5148 GOTO_OPCODE(ip) @ jump to next instruction 5149 5150 5151/* ------------------------------ */ 5152 .balign 64 5153.L_OP_DIV_DOUBLE: /* 0xae */ 5154/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5155/* File: arm-vfp/fbinopWide.S */ 5156 /* 5157 * Generic 64-bit double-precision floating point binary operation. 5158 * Provide an "instr" line that specifies an instruction that performs 5159 * "d2 = d0 op d1". 5160 * 5161 * for: add-double, sub-double, mul-double, div-double 5162 */ 5163 /* doubleop vAA, vBB, vCC */ 5164 FETCH(r0, 1) @ r0<- CCBB 5165 mov r9, rINST, lsr #8 @ r9<- AA 5166 mov r3, r0, lsr #8 @ r3<- CC 5167 and r2, r0, #255 @ r2<- BB 5168 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5169 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5170 fldd d1, [r3] @ d1<- vCC 5171 fldd d0, [r2] @ d0<- vBB 5172 5173 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5174 fdivd d2, d0, d1 @ s2<- op 5175 GET_INST_OPCODE(ip) @ extract opcode from rINST 5176 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5177 fstd d2, [r9] @ vAA<- d2 5178 GOTO_OPCODE(ip) @ jump to next instruction 5179 5180 5181/* ------------------------------ */ 5182 .balign 64 5183.L_OP_REM_DOUBLE: /* 0xaf */ 5184/* File: armv5te/OP_REM_DOUBLE.S */ 5185/* EABI doesn't define a double remainder function, but libm does */ 5186/* File: armv5te/binopWide.S */ 5187 /* 5188 * Generic 64-bit binary operation. Provide an "instr" line that 5189 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5190 * This could be an ARM instruction or a function call. (If the result 5191 * comes back in a register other than r0, you can override "result".) 5192 * 5193 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5194 * vCC (r1). Useful for integer division and modulus. 5195 * 5196 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5197 * xor-long, add-double, sub-double, mul-double, div-double, 5198 * rem-double 5199 * 5200 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5201 */ 5202 /* binop vAA, vBB, vCC */ 5203 FETCH(r0, 1) @ r0<- CCBB 5204 mov r9, rINST, lsr #8 @ r9<- AA 5205 and r2, r0, #255 @ r2<- BB 5206 mov r3, r0, lsr #8 @ r3<- CC 5207 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5208 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5209 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5210 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5211 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5212 .if 0 5213 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5214 beq common_errDivideByZero 5215 .endif 5216 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5217 5218 @ optional op; may set condition codes 5219 bl fmod @ result<- op, r0-r3 changed 5220 GET_INST_OPCODE(ip) @ extract opcode from rINST 5221 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5222 GOTO_OPCODE(ip) @ jump to next instruction 5223 /* 14-17 instructions */ 5224 5225 5226/* ------------------------------ */ 5227 .balign 64 5228.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5229/* File: armv6t2/OP_ADD_INT_2ADDR.S */ 5230/* File: armv6t2/binop2addr.S */ 5231 /* 5232 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5233 * that specifies an instruction that performs "result = r0 op r1". 5234 * This could be an ARM instruction or a function call. (If the result 5235 * comes back in a register other than r0, you can override "result".) 5236 * 5237 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5238 * vCC (r1). Useful for integer division and modulus. 5239 * 5240 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5241 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5242 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5243 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5244 */ 5245 /* binop/2addr vA, vB */ 5246 mov r3, rINST, lsr #12 @ r3<- B 5247 ubfx r9, rINST, #8, #4 @ r9<- A 5248 GET_VREG(r1, r3) @ r1<- vB 5249 GET_VREG(r0, r9) @ r0<- vA 5250 .if 0 5251 cmp r1, #0 @ is second operand zero? 5252 beq common_errDivideByZero 5253 .endif 5254 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5255 5256 @ optional op; may set condition codes 5257 add r0, r0, r1 @ r0<- op, r0-r3 changed 5258 GET_INST_OPCODE(ip) @ extract opcode from rINST 5259 SET_VREG(r0, r9) @ vAA<- r0 5260 GOTO_OPCODE(ip) @ jump to next instruction 5261 /* 10-13 instructions */ 5262 5263 5264/* ------------------------------ */ 5265 .balign 64 5266.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5267/* File: armv6t2/OP_SUB_INT_2ADDR.S */ 5268/* File: armv6t2/binop2addr.S */ 5269 /* 5270 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5271 * that specifies an instruction that performs "result = r0 op r1". 5272 * This could be an ARM instruction or a function call. (If the result 5273 * comes back in a register other than r0, you can override "result".) 5274 * 5275 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5276 * vCC (r1). Useful for integer division and modulus. 5277 * 5278 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5279 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5280 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5281 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5282 */ 5283 /* binop/2addr vA, vB */ 5284 mov r3, rINST, lsr #12 @ r3<- B 5285 ubfx r9, rINST, #8, #4 @ r9<- A 5286 GET_VREG(r1, r3) @ r1<- vB 5287 GET_VREG(r0, r9) @ r0<- vA 5288 .if 0 5289 cmp r1, #0 @ is second operand zero? 5290 beq common_errDivideByZero 5291 .endif 5292 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5293 5294 @ optional op; may set condition codes 5295 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5296 GET_INST_OPCODE(ip) @ extract opcode from rINST 5297 SET_VREG(r0, r9) @ vAA<- r0 5298 GOTO_OPCODE(ip) @ jump to next instruction 5299 /* 10-13 instructions */ 5300 5301 5302/* ------------------------------ */ 5303 .balign 64 5304.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5305/* File: armv6t2/OP_MUL_INT_2ADDR.S */ 5306/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5307/* File: armv6t2/binop2addr.S */ 5308 /* 5309 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5310 * that specifies an instruction that performs "result = r0 op r1". 5311 * This could be an ARM instruction or a function call. (If the result 5312 * comes back in a register other than r0, you can override "result".) 5313 * 5314 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5315 * vCC (r1). Useful for integer division and modulus. 5316 * 5317 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5318 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5319 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5320 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5321 */ 5322 /* binop/2addr vA, vB */ 5323 mov r3, rINST, lsr #12 @ r3<- B 5324 ubfx r9, rINST, #8, #4 @ r9<- A 5325 GET_VREG(r1, r3) @ r1<- vB 5326 GET_VREG(r0, r9) @ r0<- vA 5327 .if 0 5328 cmp r1, #0 @ is second operand zero? 5329 beq common_errDivideByZero 5330 .endif 5331 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5332 5333 @ optional op; may set condition codes 5334 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5335 GET_INST_OPCODE(ip) @ extract opcode from rINST 5336 SET_VREG(r0, r9) @ vAA<- r0 5337 GOTO_OPCODE(ip) @ jump to next instruction 5338 /* 10-13 instructions */ 5339 5340 5341/* ------------------------------ */ 5342 .balign 64 5343.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5344/* File: armv6t2/OP_DIV_INT_2ADDR.S */ 5345/* File: armv6t2/binop2addr.S */ 5346 /* 5347 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5348 * that specifies an instruction that performs "result = r0 op r1". 5349 * This could be an ARM instruction or a function call. (If the result 5350 * comes back in a register other than r0, you can override "result".) 5351 * 5352 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5353 * vCC (r1). Useful for integer division and modulus. 5354 * 5355 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5356 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5357 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5358 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5359 */ 5360 /* binop/2addr vA, vB */ 5361 mov r3, rINST, lsr #12 @ r3<- B 5362 ubfx r9, rINST, #8, #4 @ r9<- A 5363 GET_VREG(r1, r3) @ r1<- vB 5364 GET_VREG(r0, r9) @ r0<- vA 5365 .if 1 5366 cmp r1, #0 @ is second operand zero? 5367 beq common_errDivideByZero 5368 .endif 5369 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5370 5371 @ optional op; may set condition codes 5372 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5373 GET_INST_OPCODE(ip) @ extract opcode from rINST 5374 SET_VREG(r0, r9) @ vAA<- r0 5375 GOTO_OPCODE(ip) @ jump to next instruction 5376 /* 10-13 instructions */ 5377 5378 5379/* ------------------------------ */ 5380 .balign 64 5381.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5382/* File: armv6t2/OP_REM_INT_2ADDR.S */ 5383/* idivmod returns quotient in r0 and remainder in r1 */ 5384/* File: armv6t2/binop2addr.S */ 5385 /* 5386 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5387 * that specifies an instruction that performs "result = r0 op r1". 5388 * This could be an ARM instruction or a function call. (If the result 5389 * comes back in a register other than r0, you can override "result".) 5390 * 5391 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5392 * vCC (r1). Useful for integer division and modulus. 5393 * 5394 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5395 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5396 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5397 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5398 */ 5399 /* binop/2addr vA, vB */ 5400 mov r3, rINST, lsr #12 @ r3<- B 5401 ubfx r9, rINST, #8, #4 @ r9<- A 5402 GET_VREG(r1, r3) @ r1<- vB 5403 GET_VREG(r0, r9) @ r0<- vA 5404 .if 1 5405 cmp r1, #0 @ is second operand zero? 5406 beq common_errDivideByZero 5407 .endif 5408 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5409 5410 @ optional op; may set condition codes 5411 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5412 GET_INST_OPCODE(ip) @ extract opcode from rINST 5413 SET_VREG(r1, r9) @ vAA<- r1 5414 GOTO_OPCODE(ip) @ jump to next instruction 5415 /* 10-13 instructions */ 5416 5417 5418/* ------------------------------ */ 5419 .balign 64 5420.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5421/* File: armv6t2/OP_AND_INT_2ADDR.S */ 5422/* File: armv6t2/binop2addr.S */ 5423 /* 5424 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5425 * that specifies an instruction that performs "result = r0 op r1". 5426 * This could be an ARM instruction or a function call. (If the result 5427 * comes back in a register other than r0, you can override "result".) 5428 * 5429 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5430 * vCC (r1). Useful for integer division and modulus. 5431 * 5432 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5433 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5434 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5435 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5436 */ 5437 /* binop/2addr vA, vB */ 5438 mov r3, rINST, lsr #12 @ r3<- B 5439 ubfx r9, rINST, #8, #4 @ r9<- A 5440 GET_VREG(r1, r3) @ r1<- vB 5441 GET_VREG(r0, r9) @ r0<- vA 5442 .if 0 5443 cmp r1, #0 @ is second operand zero? 5444 beq common_errDivideByZero 5445 .endif 5446 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5447 5448 @ optional op; may set condition codes 5449 and r0, r0, r1 @ r0<- op, r0-r3 changed 5450 GET_INST_OPCODE(ip) @ extract opcode from rINST 5451 SET_VREG(r0, r9) @ vAA<- r0 5452 GOTO_OPCODE(ip) @ jump to next instruction 5453 /* 10-13 instructions */ 5454 5455 5456/* ------------------------------ */ 5457 .balign 64 5458.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5459/* File: armv6t2/OP_OR_INT_2ADDR.S */ 5460/* File: armv6t2/binop2addr.S */ 5461 /* 5462 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5463 * that specifies an instruction that performs "result = r0 op r1". 5464 * This could be an ARM instruction or a function call. (If the result 5465 * comes back in a register other than r0, you can override "result".) 5466 * 5467 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5468 * vCC (r1). Useful for integer division and modulus. 5469 * 5470 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5471 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5472 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5473 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5474 */ 5475 /* binop/2addr vA, vB */ 5476 mov r3, rINST, lsr #12 @ r3<- B 5477 ubfx r9, rINST, #8, #4 @ r9<- A 5478 GET_VREG(r1, r3) @ r1<- vB 5479 GET_VREG(r0, r9) @ r0<- vA 5480 .if 0 5481 cmp r1, #0 @ is second operand zero? 5482 beq common_errDivideByZero 5483 .endif 5484 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5485 5486 @ optional op; may set condition codes 5487 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5488 GET_INST_OPCODE(ip) @ extract opcode from rINST 5489 SET_VREG(r0, r9) @ vAA<- r0 5490 GOTO_OPCODE(ip) @ jump to next instruction 5491 /* 10-13 instructions */ 5492 5493 5494/* ------------------------------ */ 5495 .balign 64 5496.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5497/* File: armv6t2/OP_XOR_INT_2ADDR.S */ 5498/* File: armv6t2/binop2addr.S */ 5499 /* 5500 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5501 * that specifies an instruction that performs "result = r0 op r1". 5502 * This could be an ARM instruction or a function call. (If the result 5503 * comes back in a register other than r0, you can override "result".) 5504 * 5505 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5506 * vCC (r1). Useful for integer division and modulus. 5507 * 5508 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5509 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5510 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5511 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5512 */ 5513 /* binop/2addr vA, vB */ 5514 mov r3, rINST, lsr #12 @ r3<- B 5515 ubfx r9, rINST, #8, #4 @ r9<- A 5516 GET_VREG(r1, r3) @ r1<- vB 5517 GET_VREG(r0, r9) @ r0<- vA 5518 .if 0 5519 cmp r1, #0 @ is second operand zero? 5520 beq common_errDivideByZero 5521 .endif 5522 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5523 5524 @ optional op; may set condition codes 5525 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5526 GET_INST_OPCODE(ip) @ extract opcode from rINST 5527 SET_VREG(r0, r9) @ vAA<- r0 5528 GOTO_OPCODE(ip) @ jump to next instruction 5529 /* 10-13 instructions */ 5530 5531 5532/* ------------------------------ */ 5533 .balign 64 5534.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5535/* File: armv6t2/OP_SHL_INT_2ADDR.S */ 5536/* File: armv6t2/binop2addr.S */ 5537 /* 5538 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5539 * that specifies an instruction that performs "result = r0 op r1". 5540 * This could be an ARM instruction or a function call. (If the result 5541 * comes back in a register other than r0, you can override "result".) 5542 * 5543 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5544 * vCC (r1). Useful for integer division and modulus. 5545 * 5546 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5547 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5548 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5549 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5550 */ 5551 /* binop/2addr vA, vB */ 5552 mov r3, rINST, lsr #12 @ r3<- B 5553 ubfx r9, rINST, #8, #4 @ r9<- A 5554 GET_VREG(r1, r3) @ r1<- vB 5555 GET_VREG(r0, r9) @ r0<- vA 5556 .if 0 5557 cmp r1, #0 @ is second operand zero? 5558 beq common_errDivideByZero 5559 .endif 5560 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5561 5562 and r1, r1, #31 @ optional op; may set condition codes 5563 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5564 GET_INST_OPCODE(ip) @ extract opcode from rINST 5565 SET_VREG(r0, r9) @ vAA<- r0 5566 GOTO_OPCODE(ip) @ jump to next instruction 5567 /* 10-13 instructions */ 5568 5569 5570/* ------------------------------ */ 5571 .balign 64 5572.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5573/* File: armv6t2/OP_SHR_INT_2ADDR.S */ 5574/* File: armv6t2/binop2addr.S */ 5575 /* 5576 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5577 * that specifies an instruction that performs "result = r0 op r1". 5578 * This could be an ARM instruction or a function call. (If the result 5579 * comes back in a register other than r0, you can override "result".) 5580 * 5581 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5582 * vCC (r1). Useful for integer division and modulus. 5583 * 5584 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5585 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5586 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5587 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5588 */ 5589 /* binop/2addr vA, vB */ 5590 mov r3, rINST, lsr #12 @ r3<- B 5591 ubfx r9, rINST, #8, #4 @ r9<- A 5592 GET_VREG(r1, r3) @ r1<- vB 5593 GET_VREG(r0, r9) @ r0<- vA 5594 .if 0 5595 cmp r1, #0 @ is second operand zero? 5596 beq common_errDivideByZero 5597 .endif 5598 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5599 5600 and r1, r1, #31 @ optional op; may set condition codes 5601 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5602 GET_INST_OPCODE(ip) @ extract opcode from rINST 5603 SET_VREG(r0, r9) @ vAA<- r0 5604 GOTO_OPCODE(ip) @ jump to next instruction 5605 /* 10-13 instructions */ 5606 5607 5608/* ------------------------------ */ 5609 .balign 64 5610.L_OP_USHR_INT_2ADDR: /* 0xba */ 5611/* File: armv6t2/OP_USHR_INT_2ADDR.S */ 5612/* File: armv6t2/binop2addr.S */ 5613 /* 5614 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5615 * that specifies an instruction that performs "result = r0 op r1". 5616 * This could be an ARM instruction or a function call. (If the result 5617 * comes back in a register other than r0, you can override "result".) 5618 * 5619 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5620 * vCC (r1). Useful for integer division and modulus. 5621 * 5622 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5623 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5624 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5625 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5626 */ 5627 /* binop/2addr vA, vB */ 5628 mov r3, rINST, lsr #12 @ r3<- B 5629 ubfx r9, rINST, #8, #4 @ r9<- A 5630 GET_VREG(r1, r3) @ r1<- vB 5631 GET_VREG(r0, r9) @ r0<- vA 5632 .if 0 5633 cmp r1, #0 @ is second operand zero? 5634 beq common_errDivideByZero 5635 .endif 5636 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5637 5638 and r1, r1, #31 @ optional op; may set condition codes 5639 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5640 GET_INST_OPCODE(ip) @ extract opcode from rINST 5641 SET_VREG(r0, r9) @ vAA<- r0 5642 GOTO_OPCODE(ip) @ jump to next instruction 5643 /* 10-13 instructions */ 5644 5645 5646/* ------------------------------ */ 5647 .balign 64 5648.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5649/* File: armv6t2/OP_ADD_LONG_2ADDR.S */ 5650/* File: armv6t2/binopWide2addr.S */ 5651 /* 5652 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5653 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5654 * This could be an ARM instruction or a function call. (If the result 5655 * comes back in a register other than r0, you can override "result".) 5656 * 5657 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5658 * vCC (r1). Useful for integer division and modulus. 5659 * 5660 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5661 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5662 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5663 * rem-double/2addr 5664 */ 5665 /* binop/2addr vA, vB */ 5666 mov r1, rINST, lsr #12 @ r1<- B 5667 ubfx r9, rINST, #8, #4 @ r9<- A 5668 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5669 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5670 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5671 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5672 .if 0 5673 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5674 beq common_errDivideByZero 5675 .endif 5676 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5677 5678 adds r0, r0, r2 @ optional op; may set condition codes 5679 adc r1, r1, r3 @ result<- op, r0-r3 changed 5680 GET_INST_OPCODE(ip) @ extract opcode from rINST 5681 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5682 GOTO_OPCODE(ip) @ jump to next instruction 5683 /* 12-15 instructions */ 5684 5685 5686/* ------------------------------ */ 5687 .balign 64 5688.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5689/* File: armv6t2/OP_SUB_LONG_2ADDR.S */ 5690/* File: armv6t2/binopWide2addr.S */ 5691 /* 5692 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5693 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5694 * This could be an ARM instruction or a function call. (If the result 5695 * comes back in a register other than r0, you can override "result".) 5696 * 5697 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5698 * vCC (r1). Useful for integer division and modulus. 5699 * 5700 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5701 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5702 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5703 * rem-double/2addr 5704 */ 5705 /* binop/2addr vA, vB */ 5706 mov r1, rINST, lsr #12 @ r1<- B 5707 ubfx r9, rINST, #8, #4 @ r9<- A 5708 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5709 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5710 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5711 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5712 .if 0 5713 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5714 beq common_errDivideByZero 5715 .endif 5716 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5717 5718 subs r0, r0, r2 @ optional op; may set condition codes 5719 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5720 GET_INST_OPCODE(ip) @ extract opcode from rINST 5721 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5722 GOTO_OPCODE(ip) @ jump to next instruction 5723 /* 12-15 instructions */ 5724 5725 5726/* ------------------------------ */ 5727 .balign 64 5728.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5729/* File: armv6t2/OP_MUL_LONG_2ADDR.S */ 5730 /* 5731 * Signed 64-bit integer multiply, "/2addr" version. 5732 * 5733 * See OP_MUL_LONG for an explanation. 5734 * 5735 * We get a little tight on registers, so to avoid looking up &fp[A] 5736 * again we stuff it into rINST. 5737 */ 5738 /* mul-long/2addr vA, vB */ 5739 mov r1, rINST, lsr #12 @ r1<- B 5740 ubfx r9, rINST, #8, #4 @ r9<- A 5741 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5742 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5743 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5744 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5745 mul ip, r2, r1 @ ip<- ZxW 5746 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5747 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5748 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5749 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5750 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5751 GET_INST_OPCODE(ip) @ extract opcode from rINST 5752 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5753 GOTO_OPCODE(ip) @ jump to next instruction 5754 5755/* ------------------------------ */ 5756 .balign 64 5757.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5758/* File: armv6t2/OP_DIV_LONG_2ADDR.S */ 5759/* File: armv6t2/binopWide2addr.S */ 5760 /* 5761 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5762 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5763 * This could be an ARM instruction or a function call. (If the result 5764 * comes back in a register other than r0, you can override "result".) 5765 * 5766 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5767 * vCC (r1). Useful for integer division and modulus. 5768 * 5769 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5770 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5771 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5772 * rem-double/2addr 5773 */ 5774 /* binop/2addr vA, vB */ 5775 mov r1, rINST, lsr #12 @ r1<- B 5776 ubfx r9, rINST, #8, #4 @ r9<- A 5777 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5778 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5779 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5780 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5781 .if 1 5782 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5783 beq common_errDivideByZero 5784 .endif 5785 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5786 5787 @ optional op; may set condition codes 5788 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5789 GET_INST_OPCODE(ip) @ extract opcode from rINST 5790 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5791 GOTO_OPCODE(ip) @ jump to next instruction 5792 /* 12-15 instructions */ 5793 5794 5795/* ------------------------------ */ 5796 .balign 64 5797.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5798/* File: armv6t2/OP_REM_LONG_2ADDR.S */ 5799/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5800/* File: armv6t2/binopWide2addr.S */ 5801 /* 5802 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5803 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5804 * This could be an ARM instruction or a function call. (If the result 5805 * comes back in a register other than r0, you can override "result".) 5806 * 5807 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5808 * vCC (r1). Useful for integer division and modulus. 5809 * 5810 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5811 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5812 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5813 * rem-double/2addr 5814 */ 5815 /* binop/2addr vA, vB */ 5816 mov r1, rINST, lsr #12 @ r1<- B 5817 ubfx r9, rINST, #8, #4 @ r9<- A 5818 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5819 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5820 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5821 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5822 .if 1 5823 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5824 beq common_errDivideByZero 5825 .endif 5826 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5827 5828 @ optional op; may set condition codes 5829 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5830 GET_INST_OPCODE(ip) @ extract opcode from rINST 5831 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5832 GOTO_OPCODE(ip) @ jump to next instruction 5833 /* 12-15 instructions */ 5834 5835 5836/* ------------------------------ */ 5837 .balign 64 5838.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5839/* File: armv6t2/OP_AND_LONG_2ADDR.S */ 5840/* File: armv6t2/binopWide2addr.S */ 5841 /* 5842 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5843 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5844 * This could be an ARM instruction or a function call. (If the result 5845 * comes back in a register other than r0, you can override "result".) 5846 * 5847 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5848 * vCC (r1). Useful for integer division and modulus. 5849 * 5850 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5851 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5852 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5853 * rem-double/2addr 5854 */ 5855 /* binop/2addr vA, vB */ 5856 mov r1, rINST, lsr #12 @ r1<- B 5857 ubfx r9, rINST, #8, #4 @ r9<- A 5858 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5859 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5860 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5861 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5862 .if 0 5863 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5864 beq common_errDivideByZero 5865 .endif 5866 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5867 5868 and r0, r0, r2 @ optional op; may set condition codes 5869 and r1, r1, r3 @ result<- op, r0-r3 changed 5870 GET_INST_OPCODE(ip) @ extract opcode from rINST 5871 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5872 GOTO_OPCODE(ip) @ jump to next instruction 5873 /* 12-15 instructions */ 5874 5875 5876/* ------------------------------ */ 5877 .balign 64 5878.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5879/* File: armv6t2/OP_OR_LONG_2ADDR.S */ 5880/* File: armv6t2/binopWide2addr.S */ 5881 /* 5882 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5883 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5884 * This could be an ARM instruction or a function call. (If the result 5885 * comes back in a register other than r0, you can override "result".) 5886 * 5887 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5888 * vCC (r1). Useful for integer division and modulus. 5889 * 5890 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5891 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5892 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5893 * rem-double/2addr 5894 */ 5895 /* binop/2addr vA, vB */ 5896 mov r1, rINST, lsr #12 @ r1<- B 5897 ubfx r9, rINST, #8, #4 @ r9<- A 5898 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5899 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5900 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5901 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5902 .if 0 5903 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5904 beq common_errDivideByZero 5905 .endif 5906 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5907 5908 orr r0, r0, r2 @ optional op; may set condition codes 5909 orr r1, r1, r3 @ result<- op, r0-r3 changed 5910 GET_INST_OPCODE(ip) @ extract opcode from rINST 5911 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5912 GOTO_OPCODE(ip) @ jump to next instruction 5913 /* 12-15 instructions */ 5914 5915 5916/* ------------------------------ */ 5917 .balign 64 5918.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 5919/* File: armv6t2/OP_XOR_LONG_2ADDR.S */ 5920/* File: armv6t2/binopWide2addr.S */ 5921 /* 5922 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5923 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5924 * This could be an ARM instruction or a function call. (If the result 5925 * comes back in a register other than r0, you can override "result".) 5926 * 5927 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5928 * vCC (r1). Useful for integer division and modulus. 5929 * 5930 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5931 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5932 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5933 * rem-double/2addr 5934 */ 5935 /* binop/2addr vA, vB */ 5936 mov r1, rINST, lsr #12 @ r1<- B 5937 ubfx r9, rINST, #8, #4 @ r9<- A 5938 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5939 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5940 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5941 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5942 .if 0 5943 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5944 beq common_errDivideByZero 5945 .endif 5946 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5947 5948 eor r0, r0, r2 @ optional op; may set condition codes 5949 eor r1, r1, r3 @ result<- op, r0-r3 changed 5950 GET_INST_OPCODE(ip) @ extract opcode from rINST 5951 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5952 GOTO_OPCODE(ip) @ jump to next instruction 5953 /* 12-15 instructions */ 5954 5955 5956/* ------------------------------ */ 5957 .balign 64 5958.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 5959/* File: armv6t2/OP_SHL_LONG_2ADDR.S */ 5960 /* 5961 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 5962 * 32-bit shift distance. 5963 */ 5964 /* shl-long/2addr vA, vB */ 5965 mov r3, rINST, lsr #12 @ r3<- B 5966 ubfx r9, rINST, #8, #4 @ r9<- A 5967 GET_VREG(r2, r3) @ r2<- vB 5968 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5969 and r2, r2, #63 @ r2<- r2 & 0x3f 5970 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5971 5972 mov r1, r1, asl r2 @ r1<- r1 << r2 5973 rsb r3, r2, #32 @ r3<- 32 - r2 5974 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 5975 subs ip, r2, #32 @ ip<- r2 - 32 5976 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5977 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 5978 mov r0, r0, asl r2 @ r0<- r0 << r2 5979 b .LOP_SHL_LONG_2ADDR_finish 5980 5981/* ------------------------------ */ 5982 .balign 64 5983.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 5984/* File: armv6t2/OP_SHR_LONG_2ADDR.S */ 5985 /* 5986 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 5987 * 32-bit shift distance. 5988 */ 5989 /* shr-long/2addr vA, vB */ 5990 mov r3, rINST, lsr #12 @ r3<- B 5991 ubfx r9, rINST, #8, #4 @ r9<- A 5992 GET_VREG(r2, r3) @ r2<- vB 5993 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5994 and r2, r2, #63 @ r2<- r2 & 0x3f 5995 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5996 5997 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5998 rsb r3, r2, #32 @ r3<- 32 - r2 5999 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6000 subs ip, r2, #32 @ ip<- r2 - 32 6001 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6002 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6003 mov r1, r1, asr r2 @ r1<- r1 >> r2 6004 b .LOP_SHR_LONG_2ADDR_finish 6005 6006/* ------------------------------ */ 6007 .balign 64 6008.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6009/* File: armv6t2/OP_USHR_LONG_2ADDR.S */ 6010 /* 6011 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6012 * 32-bit shift distance. 6013 */ 6014 /* ushr-long/2addr vA, vB */ 6015 mov r3, rINST, lsr #12 @ r3<- B 6016 ubfx r9, rINST, #8, #4 @ r9<- A 6017 GET_VREG(r2, r3) @ r2<- vB 6018 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6019 and r2, r2, #63 @ r2<- r2 & 0x3f 6020 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6021 6022 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6023 rsb r3, r2, #32 @ r3<- 32 - r2 6024 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6025 subs ip, r2, #32 @ ip<- r2 - 32 6026 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6027 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6028 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6029 b .LOP_USHR_LONG_2ADDR_finish 6030 6031/* ------------------------------ */ 6032 .balign 64 6033.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6034/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6035/* File: arm-vfp/fbinop2addr.S */ 6036 /* 6037 * Generic 32-bit floating point "/2addr" binary operation. Provide 6038 * an "instr" line that specifies an instruction that performs 6039 * "s2 = s0 op s1". 6040 * 6041 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6042 */ 6043 /* binop/2addr vA, vB */ 6044 mov r3, rINST, lsr #12 @ r3<- B 6045 mov r9, rINST, lsr #8 @ r9<- A+ 6046 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6047 and r9, r9, #15 @ r9<- A 6048 flds s1, [r3] @ s1<- vB 6049 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6050 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6051 flds s0, [r9] @ s0<- vA 6052 6053 fadds s2, s0, s1 @ s2<- op 6054 GET_INST_OPCODE(ip) @ extract opcode from rINST 6055 fsts s2, [r9] @ vAA<- s2 6056 GOTO_OPCODE(ip) @ jump to next instruction 6057 6058 6059/* ------------------------------ */ 6060 .balign 64 6061.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6062/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6063/* File: arm-vfp/fbinop2addr.S */ 6064 /* 6065 * Generic 32-bit floating point "/2addr" binary operation. Provide 6066 * an "instr" line that specifies an instruction that performs 6067 * "s2 = s0 op s1". 6068 * 6069 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6070 */ 6071 /* binop/2addr vA, vB */ 6072 mov r3, rINST, lsr #12 @ r3<- B 6073 mov r9, rINST, lsr #8 @ r9<- A+ 6074 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6075 and r9, r9, #15 @ r9<- A 6076 flds s1, [r3] @ s1<- vB 6077 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6078 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6079 flds s0, [r9] @ s0<- vA 6080 6081 fsubs s2, s0, s1 @ s2<- op 6082 GET_INST_OPCODE(ip) @ extract opcode from rINST 6083 fsts s2, [r9] @ vAA<- s2 6084 GOTO_OPCODE(ip) @ jump to next instruction 6085 6086 6087/* ------------------------------ */ 6088 .balign 64 6089.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6090/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6091/* File: arm-vfp/fbinop2addr.S */ 6092 /* 6093 * Generic 32-bit floating point "/2addr" binary operation. Provide 6094 * an "instr" line that specifies an instruction that performs 6095 * "s2 = s0 op s1". 6096 * 6097 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6098 */ 6099 /* binop/2addr vA, vB */ 6100 mov r3, rINST, lsr #12 @ r3<- B 6101 mov r9, rINST, lsr #8 @ r9<- A+ 6102 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6103 and r9, r9, #15 @ r9<- A 6104 flds s1, [r3] @ s1<- vB 6105 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6106 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6107 flds s0, [r9] @ s0<- vA 6108 6109 fmuls s2, s0, s1 @ s2<- op 6110 GET_INST_OPCODE(ip) @ extract opcode from rINST 6111 fsts s2, [r9] @ vAA<- s2 6112 GOTO_OPCODE(ip) @ jump to next instruction 6113 6114 6115/* ------------------------------ */ 6116 .balign 64 6117.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6118/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6119/* File: arm-vfp/fbinop2addr.S */ 6120 /* 6121 * Generic 32-bit floating point "/2addr" binary operation. Provide 6122 * an "instr" line that specifies an instruction that performs 6123 * "s2 = s0 op s1". 6124 * 6125 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6126 */ 6127 /* binop/2addr vA, vB */ 6128 mov r3, rINST, lsr #12 @ r3<- B 6129 mov r9, rINST, lsr #8 @ r9<- A+ 6130 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6131 and r9, r9, #15 @ r9<- A 6132 flds s1, [r3] @ s1<- vB 6133 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6134 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6135 flds s0, [r9] @ s0<- vA 6136 6137 fdivs s2, s0, s1 @ s2<- op 6138 GET_INST_OPCODE(ip) @ extract opcode from rINST 6139 fsts s2, [r9] @ vAA<- s2 6140 GOTO_OPCODE(ip) @ jump to next instruction 6141 6142 6143/* ------------------------------ */ 6144 .balign 64 6145.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6146/* File: armv6t2/OP_REM_FLOAT_2ADDR.S */ 6147/* EABI doesn't define a float remainder function, but libm does */ 6148/* File: armv6t2/binop2addr.S */ 6149 /* 6150 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6151 * that specifies an instruction that performs "result = r0 op r1". 6152 * This could be an ARM instruction or a function call. (If the result 6153 * comes back in a register other than r0, you can override "result".) 6154 * 6155 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6156 * vCC (r1). Useful for integer division and modulus. 6157 * 6158 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6159 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6160 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6161 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6162 */ 6163 /* binop/2addr vA, vB */ 6164 mov r3, rINST, lsr #12 @ r3<- B 6165 ubfx r9, rINST, #8, #4 @ r9<- A 6166 GET_VREG(r1, r3) @ r1<- vB 6167 GET_VREG(r0, r9) @ r0<- vA 6168 .if 0 6169 cmp r1, #0 @ is second operand zero? 6170 beq common_errDivideByZero 6171 .endif 6172 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6173 6174 @ optional op; may set condition codes 6175 bl fmodf @ r0<- op, r0-r3 changed 6176 GET_INST_OPCODE(ip) @ extract opcode from rINST 6177 SET_VREG(r0, r9) @ vAA<- r0 6178 GOTO_OPCODE(ip) @ jump to next instruction 6179 /* 10-13 instructions */ 6180 6181 6182/* ------------------------------ */ 6183 .balign 64 6184.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6185/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6186/* File: arm-vfp/fbinopWide2addr.S */ 6187 /* 6188 * Generic 64-bit floating point "/2addr" binary operation. Provide 6189 * an "instr" line that specifies an instruction that performs 6190 * "d2 = d0 op d1". 6191 * 6192 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6193 * div-double/2addr 6194 */ 6195 /* binop/2addr vA, vB */ 6196 mov r3, rINST, lsr #12 @ r3<- B 6197 mov r9, rINST, lsr #8 @ r9<- A+ 6198 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6199 and r9, r9, #15 @ r9<- A 6200 fldd d1, [r3] @ d1<- vB 6201 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6202 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6203 fldd d0, [r9] @ d0<- vA 6204 6205 faddd d2, d0, d1 @ d2<- op 6206 GET_INST_OPCODE(ip) @ extract opcode from rINST 6207 fstd d2, [r9] @ vAA<- d2 6208 GOTO_OPCODE(ip) @ jump to next instruction 6209 6210 6211/* ------------------------------ */ 6212 .balign 64 6213.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6214/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6215/* File: arm-vfp/fbinopWide2addr.S */ 6216 /* 6217 * Generic 64-bit floating point "/2addr" binary operation. Provide 6218 * an "instr" line that specifies an instruction that performs 6219 * "d2 = d0 op d1". 6220 * 6221 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6222 * div-double/2addr 6223 */ 6224 /* binop/2addr vA, vB */ 6225 mov r3, rINST, lsr #12 @ r3<- B 6226 mov r9, rINST, lsr #8 @ r9<- A+ 6227 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6228 and r9, r9, #15 @ r9<- A 6229 fldd d1, [r3] @ d1<- vB 6230 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6231 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6232 fldd d0, [r9] @ d0<- vA 6233 6234 fsubd d2, d0, d1 @ d2<- op 6235 GET_INST_OPCODE(ip) @ extract opcode from rINST 6236 fstd d2, [r9] @ vAA<- d2 6237 GOTO_OPCODE(ip) @ jump to next instruction 6238 6239 6240/* ------------------------------ */ 6241 .balign 64 6242.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6243/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6244/* File: arm-vfp/fbinopWide2addr.S */ 6245 /* 6246 * Generic 64-bit floating point "/2addr" binary operation. Provide 6247 * an "instr" line that specifies an instruction that performs 6248 * "d2 = d0 op d1". 6249 * 6250 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6251 * div-double/2addr 6252 */ 6253 /* binop/2addr vA, vB */ 6254 mov r3, rINST, lsr #12 @ r3<- B 6255 mov r9, rINST, lsr #8 @ r9<- A+ 6256 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6257 and r9, r9, #15 @ r9<- A 6258 fldd d1, [r3] @ d1<- vB 6259 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6260 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6261 fldd d0, [r9] @ d0<- vA 6262 6263 fmuld d2, d0, d1 @ d2<- op 6264 GET_INST_OPCODE(ip) @ extract opcode from rINST 6265 fstd d2, [r9] @ vAA<- d2 6266 GOTO_OPCODE(ip) @ jump to next instruction 6267 6268 6269/* ------------------------------ */ 6270 .balign 64 6271.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6272/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6273/* File: arm-vfp/fbinopWide2addr.S */ 6274 /* 6275 * Generic 64-bit floating point "/2addr" binary operation. Provide 6276 * an "instr" line that specifies an instruction that performs 6277 * "d2 = d0 op d1". 6278 * 6279 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6280 * div-double/2addr 6281 */ 6282 /* binop/2addr vA, vB */ 6283 mov r3, rINST, lsr #12 @ r3<- B 6284 mov r9, rINST, lsr #8 @ r9<- A+ 6285 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6286 and r9, r9, #15 @ r9<- A 6287 fldd d1, [r3] @ d1<- vB 6288 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6289 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6290 fldd d0, [r9] @ d0<- vA 6291 6292 fdivd d2, d0, d1 @ d2<- op 6293 GET_INST_OPCODE(ip) @ extract opcode from rINST 6294 fstd d2, [r9] @ vAA<- d2 6295 GOTO_OPCODE(ip) @ jump to next instruction 6296 6297 6298/* ------------------------------ */ 6299 .balign 64 6300.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6301/* File: armv6t2/OP_REM_DOUBLE_2ADDR.S */ 6302/* EABI doesn't define a double remainder function, but libm does */ 6303/* File: armv6t2/binopWide2addr.S */ 6304 /* 6305 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6306 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6307 * This could be an ARM instruction or a function call. (If the result 6308 * comes back in a register other than r0, you can override "result".) 6309 * 6310 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6311 * vCC (r1). Useful for integer division and modulus. 6312 * 6313 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6314 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6315 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6316 * rem-double/2addr 6317 */ 6318 /* binop/2addr vA, vB */ 6319 mov r1, rINST, lsr #12 @ r1<- B 6320 ubfx r9, rINST, #8, #4 @ r9<- A 6321 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6322 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6323 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6324 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6325 .if 0 6326 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6327 beq common_errDivideByZero 6328 .endif 6329 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6330 6331 @ optional op; may set condition codes 6332 bl fmod @ result<- op, r0-r3 changed 6333 GET_INST_OPCODE(ip) @ extract opcode from rINST 6334 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6335 GOTO_OPCODE(ip) @ jump to next instruction 6336 /* 12-15 instructions */ 6337 6338 6339/* ------------------------------ */ 6340 .balign 64 6341.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6342/* File: armv6t2/OP_ADD_INT_LIT16.S */ 6343/* File: armv6t2/binopLit16.S */ 6344 /* 6345 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6346 * that specifies an instruction that performs "result = r0 op r1". 6347 * This could be an ARM instruction or a function call. (If the result 6348 * comes back in a register other than r0, you can override "result".) 6349 * 6350 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6351 * vCC (r1). Useful for integer division and modulus. 6352 * 6353 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6354 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6355 */ 6356 /* binop/lit16 vA, vB, #+CCCC */ 6357 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6358 mov r2, rINST, lsr #12 @ r2<- B 6359 ubfx r9, rINST, #8, #4 @ r9<- A 6360 GET_VREG(r0, r2) @ r0<- vB 6361 .if 0 6362 cmp r1, #0 @ is second operand zero? 6363 beq common_errDivideByZero 6364 .endif 6365 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6366 6367 add r0, r0, r1 @ r0<- op, r0-r3 changed 6368 GET_INST_OPCODE(ip) @ extract opcode from rINST 6369 SET_VREG(r0, r9) @ vAA<- r0 6370 GOTO_OPCODE(ip) @ jump to next instruction 6371 /* 10-13 instructions */ 6372 6373 6374/* ------------------------------ */ 6375 .balign 64 6376.L_OP_RSUB_INT: /* 0xd1 */ 6377/* File: armv6t2/OP_RSUB_INT.S */ 6378/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6379/* File: armv6t2/binopLit16.S */ 6380 /* 6381 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6382 * that specifies an instruction that performs "result = r0 op r1". 6383 * This could be an ARM instruction or a function call. (If the result 6384 * comes back in a register other than r0, you can override "result".) 6385 * 6386 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6387 * vCC (r1). Useful for integer division and modulus. 6388 * 6389 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6390 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6391 */ 6392 /* binop/lit16 vA, vB, #+CCCC */ 6393 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6394 mov r2, rINST, lsr #12 @ r2<- B 6395 ubfx r9, rINST, #8, #4 @ r9<- A 6396 GET_VREG(r0, r2) @ r0<- vB 6397 .if 0 6398 cmp r1, #0 @ is second operand zero? 6399 beq common_errDivideByZero 6400 .endif 6401 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6402 6403 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6404 GET_INST_OPCODE(ip) @ extract opcode from rINST 6405 SET_VREG(r0, r9) @ vAA<- r0 6406 GOTO_OPCODE(ip) @ jump to next instruction 6407 /* 10-13 instructions */ 6408 6409 6410/* ------------------------------ */ 6411 .balign 64 6412.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6413/* File: armv6t2/OP_MUL_INT_LIT16.S */ 6414/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6415/* File: armv6t2/binopLit16.S */ 6416 /* 6417 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6418 * that specifies an instruction that performs "result = r0 op r1". 6419 * This could be an ARM instruction or a function call. (If the result 6420 * comes back in a register other than r0, you can override "result".) 6421 * 6422 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6423 * vCC (r1). Useful for integer division and modulus. 6424 * 6425 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6426 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6427 */ 6428 /* binop/lit16 vA, vB, #+CCCC */ 6429 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6430 mov r2, rINST, lsr #12 @ r2<- B 6431 ubfx r9, rINST, #8, #4 @ r9<- A 6432 GET_VREG(r0, r2) @ r0<- vB 6433 .if 0 6434 cmp r1, #0 @ is second operand zero? 6435 beq common_errDivideByZero 6436 .endif 6437 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6438 6439 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6440 GET_INST_OPCODE(ip) @ extract opcode from rINST 6441 SET_VREG(r0, r9) @ vAA<- r0 6442 GOTO_OPCODE(ip) @ jump to next instruction 6443 /* 10-13 instructions */ 6444 6445 6446/* ------------------------------ */ 6447 .balign 64 6448.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6449/* File: armv6t2/OP_DIV_INT_LIT16.S */ 6450/* File: armv6t2/binopLit16.S */ 6451 /* 6452 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6453 * that specifies an instruction that performs "result = r0 op r1". 6454 * This could be an ARM instruction or a function call. (If the result 6455 * comes back in a register other than r0, you can override "result".) 6456 * 6457 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6458 * vCC (r1). Useful for integer division and modulus. 6459 * 6460 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6461 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6462 */ 6463 /* binop/lit16 vA, vB, #+CCCC */ 6464 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6465 mov r2, rINST, lsr #12 @ r2<- B 6466 ubfx r9, rINST, #8, #4 @ r9<- A 6467 GET_VREG(r0, r2) @ r0<- vB 6468 .if 1 6469 cmp r1, #0 @ is second operand zero? 6470 beq common_errDivideByZero 6471 .endif 6472 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6473 6474 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6475 GET_INST_OPCODE(ip) @ extract opcode from rINST 6476 SET_VREG(r0, r9) @ vAA<- r0 6477 GOTO_OPCODE(ip) @ jump to next instruction 6478 /* 10-13 instructions */ 6479 6480 6481/* ------------------------------ */ 6482 .balign 64 6483.L_OP_REM_INT_LIT16: /* 0xd4 */ 6484/* File: armv6t2/OP_REM_INT_LIT16.S */ 6485/* idivmod returns quotient in r0 and remainder in r1 */ 6486/* File: armv6t2/binopLit16.S */ 6487 /* 6488 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6489 * that specifies an instruction that performs "result = r0 op r1". 6490 * This could be an ARM instruction or a function call. (If the result 6491 * comes back in a register other than r0, you can override "result".) 6492 * 6493 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6494 * vCC (r1). Useful for integer division and modulus. 6495 * 6496 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6497 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6498 */ 6499 /* binop/lit16 vA, vB, #+CCCC */ 6500 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6501 mov r2, rINST, lsr #12 @ r2<- B 6502 ubfx r9, rINST, #8, #4 @ r9<- A 6503 GET_VREG(r0, r2) @ r0<- vB 6504 .if 1 6505 cmp r1, #0 @ is second operand zero? 6506 beq common_errDivideByZero 6507 .endif 6508 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6509 6510 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6511 GET_INST_OPCODE(ip) @ extract opcode from rINST 6512 SET_VREG(r1, r9) @ vAA<- r1 6513 GOTO_OPCODE(ip) @ jump to next instruction 6514 /* 10-13 instructions */ 6515 6516 6517/* ------------------------------ */ 6518 .balign 64 6519.L_OP_AND_INT_LIT16: /* 0xd5 */ 6520/* File: armv6t2/OP_AND_INT_LIT16.S */ 6521/* File: armv6t2/binopLit16.S */ 6522 /* 6523 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6524 * that specifies an instruction that performs "result = r0 op r1". 6525 * This could be an ARM instruction or a function call. (If the result 6526 * comes back in a register other than r0, you can override "result".) 6527 * 6528 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6529 * vCC (r1). Useful for integer division and modulus. 6530 * 6531 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6532 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6533 */ 6534 /* binop/lit16 vA, vB, #+CCCC */ 6535 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6536 mov r2, rINST, lsr #12 @ r2<- B 6537 ubfx r9, rINST, #8, #4 @ r9<- A 6538 GET_VREG(r0, r2) @ r0<- vB 6539 .if 0 6540 cmp r1, #0 @ is second operand zero? 6541 beq common_errDivideByZero 6542 .endif 6543 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6544 6545 and r0, r0, r1 @ r0<- op, r0-r3 changed 6546 GET_INST_OPCODE(ip) @ extract opcode from rINST 6547 SET_VREG(r0, r9) @ vAA<- r0 6548 GOTO_OPCODE(ip) @ jump to next instruction 6549 /* 10-13 instructions */ 6550 6551 6552/* ------------------------------ */ 6553 .balign 64 6554.L_OP_OR_INT_LIT16: /* 0xd6 */ 6555/* File: armv6t2/OP_OR_INT_LIT16.S */ 6556/* File: armv6t2/binopLit16.S */ 6557 /* 6558 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6559 * that specifies an instruction that performs "result = r0 op r1". 6560 * This could be an ARM instruction or a function call. (If the result 6561 * comes back in a register other than r0, you can override "result".) 6562 * 6563 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6564 * vCC (r1). Useful for integer division and modulus. 6565 * 6566 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6567 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6568 */ 6569 /* binop/lit16 vA, vB, #+CCCC */ 6570 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6571 mov r2, rINST, lsr #12 @ r2<- B 6572 ubfx r9, rINST, #8, #4 @ r9<- A 6573 GET_VREG(r0, r2) @ r0<- vB 6574 .if 0 6575 cmp r1, #0 @ is second operand zero? 6576 beq common_errDivideByZero 6577 .endif 6578 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6579 6580 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6581 GET_INST_OPCODE(ip) @ extract opcode from rINST 6582 SET_VREG(r0, r9) @ vAA<- r0 6583 GOTO_OPCODE(ip) @ jump to next instruction 6584 /* 10-13 instructions */ 6585 6586 6587/* ------------------------------ */ 6588 .balign 64 6589.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6590/* File: armv6t2/OP_XOR_INT_LIT16.S */ 6591/* File: armv6t2/binopLit16.S */ 6592 /* 6593 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6594 * that specifies an instruction that performs "result = r0 op r1". 6595 * This could be an ARM instruction or a function call. (If the result 6596 * comes back in a register other than r0, you can override "result".) 6597 * 6598 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6599 * vCC (r1). Useful for integer division and modulus. 6600 * 6601 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6602 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6603 */ 6604 /* binop/lit16 vA, vB, #+CCCC */ 6605 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6606 mov r2, rINST, lsr #12 @ r2<- B 6607 ubfx r9, rINST, #8, #4 @ r9<- A 6608 GET_VREG(r0, r2) @ r0<- vB 6609 .if 0 6610 cmp r1, #0 @ is second operand zero? 6611 beq common_errDivideByZero 6612 .endif 6613 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6614 6615 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6616 GET_INST_OPCODE(ip) @ extract opcode from rINST 6617 SET_VREG(r0, r9) @ vAA<- r0 6618 GOTO_OPCODE(ip) @ jump to next instruction 6619 /* 10-13 instructions */ 6620 6621 6622/* ------------------------------ */ 6623 .balign 64 6624.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6625/* File: armv5te/OP_ADD_INT_LIT8.S */ 6626/* File: armv5te/binopLit8.S */ 6627 /* 6628 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6629 * that specifies an instruction that performs "result = r0 op r1". 6630 * This could be an ARM instruction or a function call. (If the result 6631 * comes back in a register other than r0, you can override "result".) 6632 * 6633 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6634 * vCC (r1). Useful for integer division and modulus. 6635 * 6636 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6637 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6638 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6639 */ 6640 /* binop/lit8 vAA, vBB, #+CC */ 6641 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6642 mov r9, rINST, lsr #8 @ r9<- AA 6643 and r2, r3, #255 @ r2<- BB 6644 GET_VREG(r0, r2) @ r0<- vBB 6645 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6646 .if 0 6647 @cmp r1, #0 @ is second operand zero? 6648 beq common_errDivideByZero 6649 .endif 6650 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6651 6652 @ optional op; may set condition codes 6653 add r0, r0, r1 @ r0<- op, r0-r3 changed 6654 GET_INST_OPCODE(ip) @ extract opcode from rINST 6655 SET_VREG(r0, r9) @ vAA<- r0 6656 GOTO_OPCODE(ip) @ jump to next instruction 6657 /* 10-12 instructions */ 6658 6659 6660/* ------------------------------ */ 6661 .balign 64 6662.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6663/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6664/* File: armv5te/binopLit8.S */ 6665 /* 6666 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6667 * that specifies an instruction that performs "result = r0 op r1". 6668 * This could be an ARM instruction or a function call. (If the result 6669 * comes back in a register other than r0, you can override "result".) 6670 * 6671 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6672 * vCC (r1). Useful for integer division and modulus. 6673 * 6674 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6675 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6676 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6677 */ 6678 /* binop/lit8 vAA, vBB, #+CC */ 6679 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6680 mov r9, rINST, lsr #8 @ r9<- AA 6681 and r2, r3, #255 @ r2<- BB 6682 GET_VREG(r0, r2) @ r0<- vBB 6683 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6684 .if 0 6685 @cmp r1, #0 @ is second operand zero? 6686 beq common_errDivideByZero 6687 .endif 6688 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6689 6690 @ optional op; may set condition codes 6691 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6692 GET_INST_OPCODE(ip) @ extract opcode from rINST 6693 SET_VREG(r0, r9) @ vAA<- r0 6694 GOTO_OPCODE(ip) @ jump to next instruction 6695 /* 10-12 instructions */ 6696 6697 6698/* ------------------------------ */ 6699 .balign 64 6700.L_OP_MUL_INT_LIT8: /* 0xda */ 6701/* File: armv5te/OP_MUL_INT_LIT8.S */ 6702/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6703/* File: armv5te/binopLit8.S */ 6704 /* 6705 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6706 * that specifies an instruction that performs "result = r0 op r1". 6707 * This could be an ARM instruction or a function call. (If the result 6708 * comes back in a register other than r0, you can override "result".) 6709 * 6710 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6711 * vCC (r1). Useful for integer division and modulus. 6712 * 6713 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6714 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6715 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6716 */ 6717 /* binop/lit8 vAA, vBB, #+CC */ 6718 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6719 mov r9, rINST, lsr #8 @ r9<- AA 6720 and r2, r3, #255 @ r2<- BB 6721 GET_VREG(r0, r2) @ r0<- vBB 6722 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6723 .if 0 6724 @cmp r1, #0 @ is second operand zero? 6725 beq common_errDivideByZero 6726 .endif 6727 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6728 6729 @ optional op; may set condition codes 6730 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6731 GET_INST_OPCODE(ip) @ extract opcode from rINST 6732 SET_VREG(r0, r9) @ vAA<- r0 6733 GOTO_OPCODE(ip) @ jump to next instruction 6734 /* 10-12 instructions */ 6735 6736 6737/* ------------------------------ */ 6738 .balign 64 6739.L_OP_DIV_INT_LIT8: /* 0xdb */ 6740/* File: armv5te/OP_DIV_INT_LIT8.S */ 6741/* File: armv5te/binopLit8.S */ 6742 /* 6743 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6744 * that specifies an instruction that performs "result = r0 op r1". 6745 * This could be an ARM instruction or a function call. (If the result 6746 * comes back in a register other than r0, you can override "result".) 6747 * 6748 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6749 * vCC (r1). Useful for integer division and modulus. 6750 * 6751 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6752 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6753 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6754 */ 6755 /* binop/lit8 vAA, vBB, #+CC */ 6756 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6757 mov r9, rINST, lsr #8 @ r9<- AA 6758 and r2, r3, #255 @ r2<- BB 6759 GET_VREG(r0, r2) @ r0<- vBB 6760 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6761 .if 1 6762 @cmp r1, #0 @ is second operand zero? 6763 beq common_errDivideByZero 6764 .endif 6765 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6766 6767 @ optional op; may set condition codes 6768 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6769 GET_INST_OPCODE(ip) @ extract opcode from rINST 6770 SET_VREG(r0, r9) @ vAA<- r0 6771 GOTO_OPCODE(ip) @ jump to next instruction 6772 /* 10-12 instructions */ 6773 6774 6775/* ------------------------------ */ 6776 .balign 64 6777.L_OP_REM_INT_LIT8: /* 0xdc */ 6778/* File: armv5te/OP_REM_INT_LIT8.S */ 6779/* idivmod returns quotient in r0 and remainder in r1 */ 6780/* File: armv5te/binopLit8.S */ 6781 /* 6782 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6783 * that specifies an instruction that performs "result = r0 op r1". 6784 * This could be an ARM instruction or a function call. (If the result 6785 * comes back in a register other than r0, you can override "result".) 6786 * 6787 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6788 * vCC (r1). Useful for integer division and modulus. 6789 * 6790 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6791 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6792 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6793 */ 6794 /* binop/lit8 vAA, vBB, #+CC */ 6795 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6796 mov r9, rINST, lsr #8 @ r9<- AA 6797 and r2, r3, #255 @ r2<- BB 6798 GET_VREG(r0, r2) @ r0<- vBB 6799 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6800 .if 1 6801 @cmp r1, #0 @ is second operand zero? 6802 beq common_errDivideByZero 6803 .endif 6804 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6805 6806 @ optional op; may set condition codes 6807 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6808 GET_INST_OPCODE(ip) @ extract opcode from rINST 6809 SET_VREG(r1, r9) @ vAA<- r1 6810 GOTO_OPCODE(ip) @ jump to next instruction 6811 /* 10-12 instructions */ 6812 6813 6814/* ------------------------------ */ 6815 .balign 64 6816.L_OP_AND_INT_LIT8: /* 0xdd */ 6817/* File: armv5te/OP_AND_INT_LIT8.S */ 6818/* File: armv5te/binopLit8.S */ 6819 /* 6820 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6821 * that specifies an instruction that performs "result = r0 op r1". 6822 * This could be an ARM instruction or a function call. (If the result 6823 * comes back in a register other than r0, you can override "result".) 6824 * 6825 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6826 * vCC (r1). Useful for integer division and modulus. 6827 * 6828 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6829 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6830 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6831 */ 6832 /* binop/lit8 vAA, vBB, #+CC */ 6833 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6834 mov r9, rINST, lsr #8 @ r9<- AA 6835 and r2, r3, #255 @ r2<- BB 6836 GET_VREG(r0, r2) @ r0<- vBB 6837 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6838 .if 0 6839 @cmp r1, #0 @ is second operand zero? 6840 beq common_errDivideByZero 6841 .endif 6842 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6843 6844 @ optional op; may set condition codes 6845 and r0, r0, r1 @ r0<- op, r0-r3 changed 6846 GET_INST_OPCODE(ip) @ extract opcode from rINST 6847 SET_VREG(r0, r9) @ vAA<- r0 6848 GOTO_OPCODE(ip) @ jump to next instruction 6849 /* 10-12 instructions */ 6850 6851 6852/* ------------------------------ */ 6853 .balign 64 6854.L_OP_OR_INT_LIT8: /* 0xde */ 6855/* File: armv5te/OP_OR_INT_LIT8.S */ 6856/* File: armv5te/binopLit8.S */ 6857 /* 6858 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6859 * that specifies an instruction that performs "result = r0 op r1". 6860 * This could be an ARM instruction or a function call. (If the result 6861 * comes back in a register other than r0, you can override "result".) 6862 * 6863 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6864 * vCC (r1). Useful for integer division and modulus. 6865 * 6866 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6867 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6868 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6869 */ 6870 /* binop/lit8 vAA, vBB, #+CC */ 6871 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6872 mov r9, rINST, lsr #8 @ r9<- AA 6873 and r2, r3, #255 @ r2<- BB 6874 GET_VREG(r0, r2) @ r0<- vBB 6875 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6876 .if 0 6877 @cmp r1, #0 @ is second operand zero? 6878 beq common_errDivideByZero 6879 .endif 6880 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6881 6882 @ optional op; may set condition codes 6883 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6884 GET_INST_OPCODE(ip) @ extract opcode from rINST 6885 SET_VREG(r0, r9) @ vAA<- r0 6886 GOTO_OPCODE(ip) @ jump to next instruction 6887 /* 10-12 instructions */ 6888 6889 6890/* ------------------------------ */ 6891 .balign 64 6892.L_OP_XOR_INT_LIT8: /* 0xdf */ 6893/* File: armv5te/OP_XOR_INT_LIT8.S */ 6894/* File: armv5te/binopLit8.S */ 6895 /* 6896 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6897 * that specifies an instruction that performs "result = r0 op r1". 6898 * This could be an ARM instruction or a function call. (If the result 6899 * comes back in a register other than r0, you can override "result".) 6900 * 6901 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6902 * vCC (r1). Useful for integer division and modulus. 6903 * 6904 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6905 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6906 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6907 */ 6908 /* binop/lit8 vAA, vBB, #+CC */ 6909 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6910 mov r9, rINST, lsr #8 @ r9<- AA 6911 and r2, r3, #255 @ r2<- BB 6912 GET_VREG(r0, r2) @ r0<- vBB 6913 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6914 .if 0 6915 @cmp r1, #0 @ is second operand zero? 6916 beq common_errDivideByZero 6917 .endif 6918 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6919 6920 @ optional op; may set condition codes 6921 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6922 GET_INST_OPCODE(ip) @ extract opcode from rINST 6923 SET_VREG(r0, r9) @ vAA<- r0 6924 GOTO_OPCODE(ip) @ jump to next instruction 6925 /* 10-12 instructions */ 6926 6927 6928/* ------------------------------ */ 6929 .balign 64 6930.L_OP_SHL_INT_LIT8: /* 0xe0 */ 6931/* File: armv5te/OP_SHL_INT_LIT8.S */ 6932/* File: armv5te/binopLit8.S */ 6933 /* 6934 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6935 * that specifies an instruction that performs "result = r0 op r1". 6936 * This could be an ARM instruction or a function call. (If the result 6937 * comes back in a register other than r0, you can override "result".) 6938 * 6939 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6940 * vCC (r1). Useful for integer division and modulus. 6941 * 6942 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6943 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6944 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6945 */ 6946 /* binop/lit8 vAA, vBB, #+CC */ 6947 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6948 mov r9, rINST, lsr #8 @ r9<- AA 6949 and r2, r3, #255 @ r2<- BB 6950 GET_VREG(r0, r2) @ r0<- vBB 6951 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6952 .if 0 6953 @cmp r1, #0 @ is second operand zero? 6954 beq common_errDivideByZero 6955 .endif 6956 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6957 6958 and r1, r1, #31 @ optional op; may set condition codes 6959 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 6960 GET_INST_OPCODE(ip) @ extract opcode from rINST 6961 SET_VREG(r0, r9) @ vAA<- r0 6962 GOTO_OPCODE(ip) @ jump to next instruction 6963 /* 10-12 instructions */ 6964 6965 6966/* ------------------------------ */ 6967 .balign 64 6968.L_OP_SHR_INT_LIT8: /* 0xe1 */ 6969/* File: armv5te/OP_SHR_INT_LIT8.S */ 6970/* File: armv5te/binopLit8.S */ 6971 /* 6972 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6973 * that specifies an instruction that performs "result = r0 op r1". 6974 * This could be an ARM instruction or a function call. (If the result 6975 * comes back in a register other than r0, you can override "result".) 6976 * 6977 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6978 * vCC (r1). Useful for integer division and modulus. 6979 * 6980 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6981 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6982 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6983 */ 6984 /* binop/lit8 vAA, vBB, #+CC */ 6985 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6986 mov r9, rINST, lsr #8 @ r9<- AA 6987 and r2, r3, #255 @ r2<- BB 6988 GET_VREG(r0, r2) @ r0<- vBB 6989 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6990 .if 0 6991 @cmp r1, #0 @ is second operand zero? 6992 beq common_errDivideByZero 6993 .endif 6994 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6995 6996 and r1, r1, #31 @ optional op; may set condition codes 6997 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 6998 GET_INST_OPCODE(ip) @ extract opcode from rINST 6999 SET_VREG(r0, r9) @ vAA<- r0 7000 GOTO_OPCODE(ip) @ jump to next instruction 7001 /* 10-12 instructions */ 7002 7003 7004/* ------------------------------ */ 7005 .balign 64 7006.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7007/* File: armv5te/OP_USHR_INT_LIT8.S */ 7008/* File: armv5te/binopLit8.S */ 7009 /* 7010 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7011 * that specifies an instruction that performs "result = r0 op r1". 7012 * This could be an ARM instruction or a function call. (If the result 7013 * comes back in a register other than r0, you can override "result".) 7014 * 7015 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7016 * vCC (r1). Useful for integer division and modulus. 7017 * 7018 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7019 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7020 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7021 */ 7022 /* binop/lit8 vAA, vBB, #+CC */ 7023 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7024 mov r9, rINST, lsr #8 @ r9<- AA 7025 and r2, r3, #255 @ r2<- BB 7026 GET_VREG(r0, r2) @ r0<- vBB 7027 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7028 .if 0 7029 @cmp r1, #0 @ is second operand zero? 7030 beq common_errDivideByZero 7031 .endif 7032 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7033 7034 and r1, r1, #31 @ optional op; may set condition codes 7035 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7036 GET_INST_OPCODE(ip) @ extract opcode from rINST 7037 SET_VREG(r0, r9) @ vAA<- r0 7038 GOTO_OPCODE(ip) @ jump to next instruction 7039 /* 10-12 instructions */ 7040 7041 7042/* ------------------------------ */ 7043 .balign 64 7044.L_OP_IGET_VOLATILE: /* 0xe3 */ 7045/* File: armv5te/OP_IGET_VOLATILE.S */ 7046/* File: armv5te/OP_IGET.S */ 7047 /* 7048 * General 32-bit instance field get. 7049 * 7050 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7051 */ 7052 /* op vA, vB, field@CCCC */ 7053 mov r0, rINST, lsr #12 @ r0<- B 7054 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7055 FETCH(r1, 1) @ r1<- field ref CCCC 7056 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7057 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7058 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7059 cmp r0, #0 @ is resolved entry null? 7060 bne .LOP_IGET_VOLATILE_finish @ no, already resolved 70618: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7062 EXPORT_PC() @ resolve() could throw 7063 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7064 bl dvmResolveInstField @ r0<- resolved InstField ptr 7065 cmp r0, #0 7066 bne .LOP_IGET_VOLATILE_finish 7067 b common_exceptionThrown 7068 7069 7070/* ------------------------------ */ 7071 .balign 64 7072.L_OP_IPUT_VOLATILE: /* 0xe4 */ 7073/* File: armv5te/OP_IPUT_VOLATILE.S */ 7074/* File: armv5te/OP_IPUT.S */ 7075 /* 7076 * General 32-bit instance field put. 7077 * 7078 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 7079 */ 7080 /* op vA, vB, field@CCCC */ 7081 mov r0, rINST, lsr #12 @ r0<- B 7082 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7083 FETCH(r1, 1) @ r1<- field ref CCCC 7084 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7085 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7086 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7087 cmp r0, #0 @ is resolved entry null? 7088 bne .LOP_IPUT_VOLATILE_finish @ no, already resolved 70898: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7090 EXPORT_PC() @ resolve() could throw 7091 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7092 bl dvmResolveInstField @ r0<- resolved InstField ptr 7093 cmp r0, #0 @ success? 7094 bne .LOP_IPUT_VOLATILE_finish @ yes, finish up 7095 b common_exceptionThrown 7096 7097 7098/* ------------------------------ */ 7099 .balign 64 7100.L_OP_SGET_VOLATILE: /* 0xe5 */ 7101/* File: armv5te/OP_SGET_VOLATILE.S */ 7102/* File: armv5te/OP_SGET.S */ 7103 /* 7104 * General 32-bit SGET handler. 7105 * 7106 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7107 */ 7108 /* op vAA, field@BBBB */ 7109 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7110 FETCH(r1, 1) @ r1<- field ref BBBB 7111 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7112 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7113 cmp r0, #0 @ is resolved entry null? 7114 beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve 7115.LOP_SGET_VOLATILE_finish: @ field ptr in r0 7116 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7117 SMP_DMB @ acquiring load 7118 mov r2, rINST, lsr #8 @ r2<- AA 7119 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7120 SET_VREG(r1, r2) @ fp[AA]<- r1 7121 GET_INST_OPCODE(ip) @ extract opcode from rINST 7122 GOTO_OPCODE(ip) @ jump to next instruction 7123 7124 7125/* ------------------------------ */ 7126 .balign 64 7127.L_OP_SPUT_VOLATILE: /* 0xe6 */ 7128/* File: armv5te/OP_SPUT_VOLATILE.S */ 7129/* File: armv5te/OP_SPUT.S */ 7130 /* 7131 * General 32-bit SPUT handler. 7132 * 7133 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 7134 */ 7135 /* op vAA, field@BBBB */ 7136 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7137 FETCH(r1, 1) @ r1<- field ref BBBB 7138 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7139 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7140 cmp r0, #0 @ is resolved entry null? 7141 beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve 7142.LOP_SPUT_VOLATILE_finish: @ field ptr in r0 7143 mov r2, rINST, lsr #8 @ r2<- AA 7144 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7145 GET_VREG(r1, r2) @ r1<- fp[AA] 7146 GET_INST_OPCODE(ip) @ extract opcode from rINST 7147 SMP_DMB @ releasing store 7148 str r1, [r0, #offStaticField_value] @ field<- vAA 7149 GOTO_OPCODE(ip) @ jump to next instruction 7150 7151 7152/* ------------------------------ */ 7153 .balign 64 7154.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */ 7155/* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */ 7156/* File: armv5te/OP_IGET.S */ 7157 /* 7158 * General 32-bit instance field get. 7159 * 7160 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7161 */ 7162 /* op vA, vB, field@CCCC */ 7163 mov r0, rINST, lsr #12 @ r0<- B 7164 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7165 FETCH(r1, 1) @ r1<- field ref CCCC 7166 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7167 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7168 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7169 cmp r0, #0 @ is resolved entry null? 7170 bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved 71718: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7172 EXPORT_PC() @ resolve() could throw 7173 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7174 bl dvmResolveInstField @ r0<- resolved InstField ptr 7175 cmp r0, #0 7176 bne .LOP_IGET_OBJECT_VOLATILE_finish 7177 b common_exceptionThrown 7178 7179 7180/* ------------------------------ */ 7181 .balign 64 7182.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7183/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7184/* File: armv5te/OP_IGET_WIDE.S */ 7185 /* 7186 * Wide 32-bit instance field get. 7187 */ 7188 /* iget-wide vA, vB, field@CCCC */ 7189 mov r0, rINST, lsr #12 @ r0<- B 7190 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7191 FETCH(r1, 1) @ r1<- field ref CCCC 7192 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7193 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7194 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7195 cmp r0, #0 @ is resolved entry null? 7196 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 71978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7198 EXPORT_PC() @ resolve() could throw 7199 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7200 bl dvmResolveInstField @ r0<- resolved InstField ptr 7201 cmp r0, #0 7202 bne .LOP_IGET_WIDE_VOLATILE_finish 7203 b common_exceptionThrown 7204 7205 7206/* ------------------------------ */ 7207 .balign 64 7208.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7209/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7210/* File: armv5te/OP_IPUT_WIDE.S */ 7211 /* iput-wide vA, vB, field@CCCC */ 7212 mov r0, rINST, lsr #12 @ r0<- B 7213 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7214 FETCH(r1, 1) @ r1<- field ref CCCC 7215 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7216 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7217 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7218 cmp r0, #0 @ is resolved entry null? 7219 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 72208: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7221 EXPORT_PC() @ resolve() could throw 7222 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7223 bl dvmResolveInstField @ r0<- resolved InstField ptr 7224 cmp r0, #0 @ success? 7225 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7226 b common_exceptionThrown 7227 7228 7229/* ------------------------------ */ 7230 .balign 64 7231.L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7232/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7233/* File: armv5te/OP_SGET_WIDE.S */ 7234 /* 7235 * 64-bit SGET handler. 7236 */ 7237 /* sget-wide vAA, field@BBBB */ 7238 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7239 FETCH(r1, 1) @ r1<- field ref BBBB 7240 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7241 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7242 cmp r0, #0 @ is resolved entry null? 7243 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7244.LOP_SGET_WIDE_VOLATILE_finish: 7245 mov r9, rINST, lsr #8 @ r9<- AA 7246 .if 1 7247 add r0, r0, #offStaticField_value @ r0<- pointer to data 7248 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 7249 .else 7250 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7251 .endif 7252 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7253 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7254 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7255 GET_INST_OPCODE(ip) @ extract opcode from rINST 7256 GOTO_OPCODE(ip) @ jump to next instruction 7257 7258 7259/* ------------------------------ */ 7260 .balign 64 7261.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7262/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7263/* File: armv5te/OP_SPUT_WIDE.S */ 7264 /* 7265 * 64-bit SPUT handler. 7266 */ 7267 /* sput-wide vAA, field@BBBB */ 7268 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7269 FETCH(r1, 1) @ r1<- field ref BBBB 7270 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7271 mov r9, rINST, lsr #8 @ r9<- AA 7272 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7273 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7274 cmp r2, #0 @ is resolved entry null? 7275 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7276.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7277 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7278 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7279 GET_INST_OPCODE(r10) @ extract opcode from rINST 7280 .if 1 7281 add r2, r2, #offStaticField_value @ r2<- pointer to data 7282 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 7283 .else 7284 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7285 .endif 7286 GOTO_OPCODE(r10) @ jump to next instruction 7287 7288 7289/* ------------------------------ */ 7290 .balign 64 7291.L_OP_BREAKPOINT: /* 0xec */ 7292/* File: armv5te/OP_BREAKPOINT.S */ 7293/* File: armv5te/unused.S */ 7294 bl common_abort 7295 7296 7297/* ------------------------------ */ 7298 .balign 64 7299.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7300/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7301 /* 7302 * Handle a throw-verification-error instruction. This throws an 7303 * exception for an error discovered during verification. The 7304 * exception is indicated by AA, with some detail provided by BBBB. 7305 */ 7306 /* op AA, ref@BBBB */ 7307 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7308 FETCH(r2, 1) @ r2<- BBBB 7309 EXPORT_PC() @ export the PC 7310 mov r1, rINST, lsr #8 @ r1<- AA 7311 bl dvmThrowVerificationError @ always throws 7312 b common_exceptionThrown @ handle exception 7313 7314/* ------------------------------ */ 7315 .balign 64 7316.L_OP_EXECUTE_INLINE: /* 0xee */ 7317/* File: armv5te/OP_EXECUTE_INLINE.S */ 7318 /* 7319 * Execute a "native inline" instruction. 7320 * 7321 * We need to call an InlineOp4Func: 7322 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7323 * 7324 * The first four args are in r0-r3, pointer to return value storage 7325 * is on the stack. The function's return value is a flag that tells 7326 * us if an exception was thrown. 7327 */ 7328 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7329 FETCH(r10, 1) @ r10<- BBBB 7330 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7331 EXPORT_PC() @ can throw 7332 sub sp, sp, #8 @ make room for arg, +64 bit align 7333 mov r0, rINST, lsr #12 @ r0<- B 7334 str r1, [sp] @ push &glue->retval 7335 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7336 add sp, sp, #8 @ pop stack 7337 cmp r0, #0 @ test boolean result of inline 7338 beq common_exceptionThrown @ returned false, handle exception 7339 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7340 GET_INST_OPCODE(ip) @ extract opcode from rINST 7341 GOTO_OPCODE(ip) @ jump to next instruction 7342 7343/* ------------------------------ */ 7344 .balign 64 7345.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7346/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7347 /* 7348 * Execute a "native inline" instruction, using "/range" semantics. 7349 * Same idea as execute-inline, but we get the args differently. 7350 * 7351 * We need to call an InlineOp4Func: 7352 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7353 * 7354 * The first four args are in r0-r3, pointer to return value storage 7355 * is on the stack. The function's return value is a flag that tells 7356 * us if an exception was thrown. 7357 */ 7358 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7359 FETCH(r10, 1) @ r10<- BBBB 7360 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7361 EXPORT_PC() @ can throw 7362 sub sp, sp, #8 @ make room for arg, +64 bit align 7363 mov r0, rINST, lsr #8 @ r0<- AA 7364 str r1, [sp] @ push &glue->retval 7365 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7366 add sp, sp, #8 @ pop stack 7367 cmp r0, #0 @ test boolean result of inline 7368 beq common_exceptionThrown @ returned false, handle exception 7369 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7370 GET_INST_OPCODE(ip) @ extract opcode from rINST 7371 GOTO_OPCODE(ip) @ jump to next instruction 7372 7373/* ------------------------------ */ 7374 .balign 64 7375.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7376/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7377 /* 7378 * invoke-direct-empty is a no-op in a "standard" interpreter. 7379 */ 7380 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7381 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7382 GOTO_OPCODE(ip) @ execute it 7383 7384/* ------------------------------ */ 7385 .balign 64 7386.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */ 7387/* File: armv5te/OP_RETURN_VOID_BARRIER.S */ 7388 SMP_DMB_ST 7389 b common_returnFromMethod 7390 7391/* ------------------------------ */ 7392 .balign 64 7393.L_OP_IGET_QUICK: /* 0xf2 */ 7394/* File: armv6t2/OP_IGET_QUICK.S */ 7395 /* For: iget-quick, iget-object-quick */ 7396 /* op vA, vB, offset@CCCC */ 7397 mov r2, rINST, lsr #12 @ r2<- B 7398 FETCH(r1, 1) @ r1<- field byte offset 7399 GET_VREG(r3, r2) @ r3<- object we're operating on 7400 ubfx r2, rINST, #8, #4 @ r2<- A 7401 cmp r3, #0 @ check object for null 7402 beq common_errNullObject @ object was null 7403 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7404 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7405 GET_INST_OPCODE(ip) @ extract opcode from rINST 7406 SET_VREG(r0, r2) @ fp[A]<- r0 7407 GOTO_OPCODE(ip) @ jump to next instruction 7408 7409/* ------------------------------ */ 7410 .balign 64 7411.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7412/* File: armv6t2/OP_IGET_WIDE_QUICK.S */ 7413 /* iget-wide-quick vA, vB, offset@CCCC */ 7414 mov r2, rINST, lsr #12 @ r2<- B 7415 FETCH(ip, 1) @ ip<- field byte offset 7416 GET_VREG(r3, r2) @ r3<- object we're operating on 7417 ubfx r2, rINST, #8, #4 @ r2<- A 7418 cmp r3, #0 @ check object for null 7419 beq common_errNullObject @ object was null 7420 ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) 7421 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7422 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7423 GET_INST_OPCODE(ip) @ extract opcode from rINST 7424 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7425 GOTO_OPCODE(ip) @ jump to next instruction 7426 7427/* ------------------------------ */ 7428 .balign 64 7429.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7430/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7431/* File: armv5te/OP_IGET_QUICK.S */ 7432 /* For: iget-quick, iget-object-quick */ 7433 /* op vA, vB, offset@CCCC */ 7434 mov r2, rINST, lsr #12 @ r2<- B 7435 GET_VREG(r3, r2) @ r3<- object we're operating on 7436 FETCH(r1, 1) @ r1<- field byte offset 7437 cmp r3, #0 @ check object for null 7438 mov r2, rINST, lsr #8 @ r2<- A(+) 7439 beq common_errNullObject @ object was null 7440 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7441 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7442 and r2, r2, #15 7443 GET_INST_OPCODE(ip) @ extract opcode from rINST 7444 SET_VREG(r0, r2) @ fp[A]<- r0 7445 GOTO_OPCODE(ip) @ jump to next instruction 7446 7447 7448/* ------------------------------ */ 7449 .balign 64 7450.L_OP_IPUT_QUICK: /* 0xf5 */ 7451/* File: armv6t2/OP_IPUT_QUICK.S */ 7452 /* For: iput-quick, iput-object-quick */ 7453 /* op vA, vB, offset@CCCC */ 7454 mov r2, rINST, lsr #12 @ r2<- B 7455 FETCH(r1, 1) @ r1<- field byte offset 7456 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7457 ubfx r2, rINST, #8, #4 @ r2<- A 7458 cmp r3, #0 @ check object for null 7459 beq common_errNullObject @ object was null 7460 GET_VREG(r0, r2) @ r0<- fp[A] 7461 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7462 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7463 GET_INST_OPCODE(ip) @ extract opcode from rINST 7464 GOTO_OPCODE(ip) @ jump to next instruction 7465 7466/* ------------------------------ */ 7467 .balign 64 7468.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7469/* File: armv6t2/OP_IPUT_WIDE_QUICK.S */ 7470 /* iput-wide-quick vA, vB, offset@CCCC */ 7471 mov r1, rINST, lsr #12 @ r1<- B 7472 ubfx r0, rINST, #8, #4 @ r0<- A 7473 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7474 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7475 cmp r2, #0 @ check object for null 7476 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7477 beq common_errNullObject @ object was null 7478 FETCH(r3, 1) @ r3<- field byte offset 7479 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7480 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7481 GET_INST_OPCODE(ip) @ extract opcode from rINST 7482 GOTO_OPCODE(ip) @ jump to next instruction 7483 7484/* ------------------------------ */ 7485 .balign 64 7486.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7487/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7488 /* For: iput-object-quick */ 7489 /* op vA, vB, offset@CCCC */ 7490 mov r2, rINST, lsr #12 @ r2<- B 7491 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7492 FETCH(r1, 1) @ r1<- field byte offset 7493 cmp r3, #0 @ check object for null 7494 mov r2, rINST, lsr #8 @ r2<- A(+) 7495 beq common_errNullObject @ object was null 7496 and r2, r2, #15 7497 GET_VREG(r0, r2) @ r0<- fp[A] 7498 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 7499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7500 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7501 cmp r0, #0 7502 strneb r2, [r2, r3, lsr #GC_CARD_SHIFT] @ mark card based on obj head 7503 GET_INST_OPCODE(ip) @ extract opcode from rINST 7504 GOTO_OPCODE(ip) @ jump to next instruction 7505 7506/* ------------------------------ */ 7507 .balign 64 7508.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7509/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7510 /* 7511 * Handle an optimized virtual method call. 7512 * 7513 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7514 */ 7515 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7516 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7517 FETCH(r3, 2) @ r3<- FEDC or CCCC 7518 FETCH(r1, 1) @ r1<- BBBB 7519 .if (!0) 7520 and r3, r3, #15 @ r3<- C (or stays CCCC) 7521 .endif 7522 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7523 cmp r2, #0 @ is "this" null? 7524 beq common_errNullObject @ null "this", throw exception 7525 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7526 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7527 EXPORT_PC() @ invoke must export 7528 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7529 bl common_invokeMethodNoRange @ continue on 7530 7531/* ------------------------------ */ 7532 .balign 64 7533.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7534/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7535/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7536 /* 7537 * Handle an optimized virtual method call. 7538 * 7539 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7540 */ 7541 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7542 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7543 FETCH(r3, 2) @ r3<- FEDC or CCCC 7544 FETCH(r1, 1) @ r1<- BBBB 7545 .if (!1) 7546 and r3, r3, #15 @ r3<- C (or stays CCCC) 7547 .endif 7548 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7549 cmp r2, #0 @ is "this" null? 7550 beq common_errNullObject @ null "this", throw exception 7551 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7552 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7553 EXPORT_PC() @ invoke must export 7554 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7555 bl common_invokeMethodRange @ continue on 7556 7557 7558/* ------------------------------ */ 7559 .balign 64 7560.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7561/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7562 /* 7563 * Handle an optimized "super" method call. 7564 * 7565 * for: [opt] invoke-super-quick, invoke-super-quick/range 7566 */ 7567 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7568 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7569 FETCH(r10, 2) @ r10<- GFED or CCCC 7570 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7571 .if (!0) 7572 and r10, r10, #15 @ r10<- D (or stays CCCC) 7573 .endif 7574 FETCH(r1, 1) @ r1<- BBBB 7575 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7576 EXPORT_PC() @ must export for invoke 7577 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7578 GET_VREG(r3, r10) @ r3<- "this" 7579 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7580 cmp r3, #0 @ null "this" ref? 7581 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7582 beq common_errNullObject @ "this" is null, throw exception 7583 bl common_invokeMethodNoRange @ continue on 7584 7585/* ------------------------------ */ 7586 .balign 64 7587.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7588/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7589/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7590 /* 7591 * Handle an optimized "super" method call. 7592 * 7593 * for: [opt] invoke-super-quick, invoke-super-quick/range 7594 */ 7595 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7596 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7597 FETCH(r10, 2) @ r10<- GFED or CCCC 7598 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7599 .if (!1) 7600 and r10, r10, #15 @ r10<- D (or stays CCCC) 7601 .endif 7602 FETCH(r1, 1) @ r1<- BBBB 7603 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7604 EXPORT_PC() @ must export for invoke 7605 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7606 GET_VREG(r3, r10) @ r3<- "this" 7607 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7608 cmp r3, #0 @ null "this" ref? 7609 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7610 beq common_errNullObject @ "this" is null, throw exception 7611 bl common_invokeMethodRange @ continue on 7612 7613 7614/* ------------------------------ */ 7615 .balign 64 7616.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */ 7617/* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */ 7618/* File: armv5te/OP_IPUT_OBJECT.S */ 7619 /* 7620 * 32-bit instance field put. 7621 * 7622 * for: iput-object, iput-object-volatile 7623 */ 7624 /* op vA, vB, field@CCCC */ 7625 mov r0, rINST, lsr #12 @ r0<- B 7626 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7627 FETCH(r1, 1) @ r1<- field ref CCCC 7628 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7629 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7630 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7631 cmp r0, #0 @ is resolved entry null? 7632 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved 76338: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7634 EXPORT_PC() @ resolve() could throw 7635 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7636 bl dvmResolveInstField @ r0<- resolved InstField ptr 7637 cmp r0, #0 @ success? 7638 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up 7639 b common_exceptionThrown 7640 7641 7642/* ------------------------------ */ 7643 .balign 64 7644.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */ 7645/* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */ 7646/* File: armv5te/OP_SGET.S */ 7647 /* 7648 * General 32-bit SGET handler. 7649 * 7650 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7651 */ 7652 /* op vAA, field@BBBB */ 7653 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7654 FETCH(r1, 1) @ r1<- field ref BBBB 7655 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7656 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7657 cmp r0, #0 @ is resolved entry null? 7658 beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve 7659.LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0 7660 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7661 SMP_DMB @ acquiring load 7662 mov r2, rINST, lsr #8 @ r2<- AA 7663 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7664 SET_VREG(r1, r2) @ fp[AA]<- r1 7665 GET_INST_OPCODE(ip) @ extract opcode from rINST 7666 GOTO_OPCODE(ip) @ jump to next instruction 7667 7668 7669/* ------------------------------ */ 7670 .balign 64 7671.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */ 7672/* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */ 7673/* File: armv5te/OP_SPUT_OBJECT.S */ 7674 /* 7675 * 32-bit SPUT handler for objects 7676 * 7677 * for: sput-object, sput-object-volatile 7678 */ 7679 /* op vAA, field@BBBB */ 7680 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7681 FETCH(r1, 1) @ r1<- field ref BBBB 7682 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7683 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7684 cmp r0, #0 @ is resolved entry null? 7685 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ no, continue 7686 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 7687 EXPORT_PC() @ resolve() could throw, so export now 7688 ldr r0, [r9, #offMethod_clazz] @ r0<- method->clazz 7689 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 7690 cmp r0, #0 @ success? 7691 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ yes, finish 7692 b common_exceptionThrown @ no, handle exception 7693 7694 7695 7696/* ------------------------------ */ 7697 .balign 64 7698.L_OP_UNUSED_FF: /* 0xff */ 7699/* File: armv5te/OP_UNUSED_FF.S */ 7700/* File: armv5te/unused.S */ 7701 bl common_abort 7702 7703 7704 7705 .balign 64 7706 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7707 .global dvmAsmInstructionEnd 7708dvmAsmInstructionEnd: 7709 7710/* 7711 * =========================================================================== 7712 * Sister implementations 7713 * =========================================================================== 7714 */ 7715 .global dvmAsmSisterStart 7716 .type dvmAsmSisterStart, %function 7717 .text 7718 .balign 4 7719dvmAsmSisterStart: 7720 7721/* continuation for OP_CONST_STRING */ 7722 7723 /* 7724 * Continuation if the String has not yet been resolved. 7725 * r1: BBBB (String ref) 7726 * r9: target register 7727 */ 7728.LOP_CONST_STRING_resolve: 7729 EXPORT_PC() 7730 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7731 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7732 bl dvmResolveString @ r0<- String reference 7733 cmp r0, #0 @ failed? 7734 beq common_exceptionThrown @ yup, handle the exception 7735 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7736 GET_INST_OPCODE(ip) @ extract opcode from rINST 7737 SET_VREG(r0, r9) @ vAA<- r0 7738 GOTO_OPCODE(ip) @ jump to next instruction 7739 7740/* continuation for OP_CONST_STRING_JUMBO */ 7741 7742 /* 7743 * Continuation if the String has not yet been resolved. 7744 * r1: BBBBBBBB (String ref) 7745 * r9: target register 7746 */ 7747.LOP_CONST_STRING_JUMBO_resolve: 7748 EXPORT_PC() 7749 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7750 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7751 bl dvmResolveString @ r0<- String reference 7752 cmp r0, #0 @ failed? 7753 beq common_exceptionThrown @ yup, handle the exception 7754 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7755 GET_INST_OPCODE(ip) @ extract opcode from rINST 7756 SET_VREG(r0, r9) @ vAA<- r0 7757 GOTO_OPCODE(ip) @ jump to next instruction 7758 7759/* continuation for OP_CONST_CLASS */ 7760 7761 /* 7762 * Continuation if the Class has not yet been resolved. 7763 * r1: BBBB (Class ref) 7764 * r9: target register 7765 */ 7766.LOP_CONST_CLASS_resolve: 7767 EXPORT_PC() 7768 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7769 mov r2, #1 @ r2<- true 7770 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7771 bl dvmResolveClass @ r0<- Class reference 7772 cmp r0, #0 @ failed? 7773 beq common_exceptionThrown @ yup, handle the exception 7774 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7775 GET_INST_OPCODE(ip) @ extract opcode from rINST 7776 SET_VREG(r0, r9) @ vAA<- r0 7777 GOTO_OPCODE(ip) @ jump to next instruction 7778 7779/* continuation for OP_CHECK_CAST */ 7780 7781 /* 7782 * Trivial test failed, need to perform full check. This is common. 7783 * r0 holds obj->clazz 7784 * r1 holds desired class resolved from BBBB 7785 * r9 holds object 7786 */ 7787.LOP_CHECK_CAST_fullcheck: 7788 mov r10, r1 @ avoid ClassObject getting clobbered 7789 bl dvmInstanceofNonTrivial @ r0<- boolean result 7790 cmp r0, #0 @ failed? 7791 bne .LOP_CHECK_CAST_okay @ no, success 7792 7793 @ A cast has failed. We need to throw a ClassCastException. 7794 EXPORT_PC() @ about to throw 7795 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz (actual class) 7796 mov r1, r10 @ r1<- desired class 7797 bl dvmThrowClassCastException 7798 b common_exceptionThrown 7799 7800 /* 7801 * Resolution required. This is the least-likely path. 7802 * 7803 * r2 holds BBBB 7804 * r9 holds object 7805 */ 7806.LOP_CHECK_CAST_resolve: 7807 EXPORT_PC() @ resolve() could throw 7808 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7809 mov r1, r2 @ r1<- BBBB 7810 mov r2, #0 @ r2<- false 7811 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7812 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7813 cmp r0, #0 @ got null? 7814 beq common_exceptionThrown @ yes, handle exception 7815 mov r1, r0 @ r1<- class resolved from BBB 7816 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7817 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7818 7819/* continuation for OP_INSTANCE_OF */ 7820 7821 /* 7822 * Trivial test failed, need to perform full check. This is common. 7823 * r0 holds obj->clazz 7824 * r1 holds class resolved from BBBB 7825 * r9 holds A 7826 */ 7827.LOP_INSTANCE_OF_fullcheck: 7828 bl dvmInstanceofNonTrivial @ r0<- boolean result 7829 @ fall through to OP_INSTANCE_OF_store 7830 7831 /* 7832 * r0 holds boolean result 7833 * r9 holds A 7834 */ 7835.LOP_INSTANCE_OF_store: 7836 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7837 SET_VREG(r0, r9) @ vA<- r0 7838 GET_INST_OPCODE(ip) @ extract opcode from rINST 7839 GOTO_OPCODE(ip) @ jump to next instruction 7840 7841 /* 7842 * Trivial test succeeded, save and bail. 7843 * r9 holds A 7844 */ 7845.LOP_INSTANCE_OF_trivial: 7846 mov r0, #1 @ indicate success 7847 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7848 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7849 SET_VREG(r0, r9) @ vA<- r0 7850 GET_INST_OPCODE(ip) @ extract opcode from rINST 7851 GOTO_OPCODE(ip) @ jump to next instruction 7852 7853 /* 7854 * Resolution required. This is the least-likely path. 7855 * 7856 * r3 holds BBBB 7857 * r9 holds A 7858 */ 7859.LOP_INSTANCE_OF_resolve: 7860 EXPORT_PC() @ resolve() could throw 7861 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7862 mov r1, r3 @ r1<- BBBB 7863 mov r2, #1 @ r2<- true 7864 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7865 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7866 cmp r0, #0 @ got null? 7867 beq common_exceptionThrown @ yes, handle exception 7868 mov r1, r0 @ r1<- class resolved from BBB 7869 mov r3, rINST, lsr #12 @ r3<- B 7870 GET_VREG(r0, r3) @ r0<- vB (object) 7871 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7872 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7873 7874/* continuation for OP_NEW_INSTANCE */ 7875 7876 .balign 32 @ minimize cache lines 7877.LOP_NEW_INSTANCE_finish: @ r0=new object 7878 mov r3, rINST, lsr #8 @ r3<- AA 7879 cmp r0, #0 @ failed? 7880 beq common_exceptionThrown @ yes, handle the exception 7881 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7882 GET_INST_OPCODE(ip) @ extract opcode from rINST 7883 SET_VREG(r0, r3) @ vAA<- r0 7884 GOTO_OPCODE(ip) @ jump to next instruction 7885 7886 /* 7887 * Class initialization required. 7888 * 7889 * r0 holds class object 7890 */ 7891.LOP_NEW_INSTANCE_needinit: 7892 mov r9, r0 @ save r0 7893 bl dvmInitClass @ initialize class 7894 cmp r0, #0 @ check boolean result 7895 mov r0, r9 @ restore r0 7896 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7897 b common_exceptionThrown @ failed, deal with init exception 7898 7899 /* 7900 * Resolution required. This is the least-likely path. 7901 * 7902 * r1 holds BBBB 7903 */ 7904.LOP_NEW_INSTANCE_resolve: 7905 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7906 mov r2, #0 @ r2<- false 7907 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7908 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7909 cmp r0, #0 @ got null? 7910 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7911 b common_exceptionThrown @ yes, handle exception 7912 7913.LstrInstantiationErrorPtr: 7914 .word .LstrInstantiationError 7915 7916/* continuation for OP_NEW_ARRAY */ 7917 7918 7919 /* 7920 * Resolve class. (This is an uncommon case.) 7921 * 7922 * r1 holds array length 7923 * r2 holds class ref CCCC 7924 */ 7925.LOP_NEW_ARRAY_resolve: 7926 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7927 mov r9, r1 @ r9<- length (save) 7928 mov r1, r2 @ r1<- CCCC 7929 mov r2, #0 @ r2<- false 7930 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7931 bl dvmResolveClass @ r0<- call(clazz, ref) 7932 cmp r0, #0 @ got null? 7933 mov r1, r9 @ r1<- length (restore) 7934 beq common_exceptionThrown @ yes, handle exception 7935 @ fall through to OP_NEW_ARRAY_finish 7936 7937 /* 7938 * Finish allocation. 7939 * 7940 * r0 holds class 7941 * r1 holds array length 7942 */ 7943.LOP_NEW_ARRAY_finish: 7944 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 7945 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 7946 cmp r0, #0 @ failed? 7947 mov r2, rINST, lsr #8 @ r2<- A+ 7948 beq common_exceptionThrown @ yes, handle the exception 7949 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7950 and r2, r2, #15 @ r2<- A 7951 GET_INST_OPCODE(ip) @ extract opcode from rINST 7952 SET_VREG(r0, r2) @ vA<- r0 7953 GOTO_OPCODE(ip) @ jump to next instruction 7954 7955/* continuation for OP_FILLED_NEW_ARRAY */ 7956 7957 /* 7958 * On entry: 7959 * r0 holds array class 7960 * r10 holds AA or BA 7961 */ 7962.LOP_FILLED_NEW_ARRAY_continue: 7963 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7964 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7965 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 7966 .if 0 7967 mov r1, r10 @ r1<- AA (length) 7968 .else 7969 mov r1, r10, lsr #4 @ r1<- B (length) 7970 .endif 7971 cmp rINST, #'I' @ array of ints? 7972 cmpne rINST, #'L' @ array of objects? 7973 cmpne rINST, #'[' @ array of arrays? 7974 mov r9, r1 @ save length in r9 7975 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 7976 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7977 cmp r0, #0 @ null return? 7978 beq common_exceptionThrown @ alloc failed, handle exception 7979 7980 FETCH(r1, 2) @ r1<- FEDC or CCCC 7981 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 7982 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 7983 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 7984 subs r9, r9, #1 @ length--, check for neg 7985 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7986 bmi 2f @ was zero, bail 7987 7988 @ copy values from registers into the array 7989 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 7990 .if 0 7991 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 79921: ldr r3, [r2], #4 @ r3<- *r2++ 7993 subs r9, r9, #1 @ count-- 7994 str r3, [r0], #4 @ *contents++ = vX 7995 bpl 1b 7996 @ continue at 2 7997 .else 7998 cmp r9, #4 @ length was initially 5? 7999 and r2, r10, #15 @ r2<- A 8000 bne 1f @ <= 4 args, branch 8001 GET_VREG(r3, r2) @ r3<- vA 8002 sub r9, r9, #1 @ count-- 8003 str r3, [r0, #16] @ contents[4] = vA 80041: and r2, r1, #15 @ r2<- F/E/D/C 8005 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8006 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8007 subs r9, r9, #1 @ count-- 8008 str r3, [r0], #4 @ *contents++ = vX 8009 bpl 1b 8010 @ continue at 2 8011 .endif 8012 80132: 8014 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8015 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8016 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8017 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8018 cmp r1, #'I' @ Is int array? 8019 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head 8020 GOTO_OPCODE(ip) @ execute it 8021 8022 /* 8023 * Throw an exception indicating that we have not implemented this 8024 * mode of filled-new-array. 8025 */ 8026.LOP_FILLED_NEW_ARRAY_notimpl: 8027 ldr r0, .L_strInternalError 8028 ldr r1, .L_strFilledNewArrayNotImpl 8029 bl dvmThrowException 8030 b common_exceptionThrown 8031 8032 .if (!0) @ define in one or the other, not both 8033.L_strFilledNewArrayNotImpl: 8034 .word .LstrFilledNewArrayNotImpl 8035.L_strInternalError: 8036 .word .LstrInternalError 8037 .endif 8038 8039/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8040 8041 /* 8042 * On entry: 8043 * r0 holds array class 8044 * r10 holds AA or BA 8045 */ 8046.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8047 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8048 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8049 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8050 .if 1 8051 mov r1, r10 @ r1<- AA (length) 8052 .else 8053 mov r1, r10, lsr #4 @ r1<- B (length) 8054 .endif 8055 cmp rINST, #'I' @ array of ints? 8056 cmpne rINST, #'L' @ array of objects? 8057 cmpne rINST, #'[' @ array of arrays? 8058 mov r9, r1 @ save length in r9 8059 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8060 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8061 cmp r0, #0 @ null return? 8062 beq common_exceptionThrown @ alloc failed, handle exception 8063 8064 FETCH(r1, 2) @ r1<- FEDC or CCCC 8065 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8066 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8067 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8068 subs r9, r9, #1 @ length--, check for neg 8069 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8070 bmi 2f @ was zero, bail 8071 8072 @ copy values from registers into the array 8073 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8074 .if 1 8075 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 80761: ldr r3, [r2], #4 @ r3<- *r2++ 8077 subs r9, r9, #1 @ count-- 8078 str r3, [r0], #4 @ *contents++ = vX 8079 bpl 1b 8080 @ continue at 2 8081 .else 8082 cmp r9, #4 @ length was initially 5? 8083 and r2, r10, #15 @ r2<- A 8084 bne 1f @ <= 4 args, branch 8085 GET_VREG(r3, r2) @ r3<- vA 8086 sub r9, r9, #1 @ count-- 8087 str r3, [r0, #16] @ contents[4] = vA 80881: and r2, r1, #15 @ r2<- F/E/D/C 8089 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8090 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8091 subs r9, r9, #1 @ count-- 8092 str r3, [r0], #4 @ *contents++ = vX 8093 bpl 1b 8094 @ continue at 2 8095 .endif 8096 80972: 8098 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8099 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8100 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8101 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8102 cmp r1, #'I' @ Is int array? 8103 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head 8104 GOTO_OPCODE(ip) @ execute it 8105 8106 /* 8107 * Throw an exception indicating that we have not implemented this 8108 * mode of filled-new-array. 8109 */ 8110.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8111 ldr r0, .L_strInternalError 8112 ldr r1, .L_strFilledNewArrayNotImpl 8113 bl dvmThrowException 8114 b common_exceptionThrown 8115 8116 .if (!1) @ define in one or the other, not both 8117.L_strFilledNewArrayNotImpl: 8118 .word .LstrFilledNewArrayNotImpl 8119.L_strInternalError: 8120 .word .LstrInternalError 8121 .endif 8122 8123/* continuation for OP_CMPL_FLOAT */ 8124.LOP_CMPL_FLOAT_finish: 8125 SET_VREG(r0, r9) @ vAA<- r0 8126 GOTO_OPCODE(ip) @ jump to next instruction 8127 8128/* continuation for OP_CMPG_FLOAT */ 8129.LOP_CMPG_FLOAT_finish: 8130 SET_VREG(r0, r9) @ vAA<- r0 8131 GOTO_OPCODE(ip) @ jump to next instruction 8132 8133/* continuation for OP_CMPL_DOUBLE */ 8134.LOP_CMPL_DOUBLE_finish: 8135 SET_VREG(r0, r9) @ vAA<- r0 8136 GOTO_OPCODE(ip) @ jump to next instruction 8137 8138/* continuation for OP_CMPG_DOUBLE */ 8139.LOP_CMPG_DOUBLE_finish: 8140 SET_VREG(r0, r9) @ vAA<- r0 8141 GOTO_OPCODE(ip) @ jump to next instruction 8142 8143/* continuation for OP_CMP_LONG */ 8144 8145.LOP_CMP_LONG_less: 8146 mvn r1, #0 @ r1<- -1 8147 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8148 @ instead, we just replicate the tail end. 8149 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8150 SET_VREG(r1, r9) @ vAA<- r1 8151 GET_INST_OPCODE(ip) @ extract opcode from rINST 8152 GOTO_OPCODE(ip) @ jump to next instruction 8153 8154.LOP_CMP_LONG_greater: 8155 mov r1, #1 @ r1<- 1 8156 @ fall through to _finish 8157 8158.LOP_CMP_LONG_finish: 8159 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8160 SET_VREG(r1, r9) @ vAA<- r1 8161 GET_INST_OPCODE(ip) @ extract opcode from rINST 8162 GOTO_OPCODE(ip) @ jump to next instruction 8163 8164/* continuation for OP_AGET_WIDE */ 8165 8166.LOP_AGET_WIDE_finish: 8167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8168 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8169 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8170 GET_INST_OPCODE(ip) @ extract opcode from rINST 8171 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8172 GOTO_OPCODE(ip) @ jump to next instruction 8173 8174/* continuation for OP_APUT_WIDE */ 8175 8176.LOP_APUT_WIDE_finish: 8177 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8178 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8179 GET_INST_OPCODE(ip) @ extract opcode from rINST 8180 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8181 GOTO_OPCODE(ip) @ jump to next instruction 8182 8183/* continuation for OP_APUT_OBJECT */ 8184 /* 8185 * On entry: 8186 * rINST = vBB (arrayObj) 8187 * r9 = vAA (obj) 8188 * r10 = offset into array (vBB + vCC * width) 8189 */ 8190.LOP_APUT_OBJECT_finish: 8191 cmp r9, #0 @ storing null reference? 8192 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8193 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8194 ldr r1, [rINST, #offObject_clazz] @ r1<- arrayObj->clazz 8195 bl dvmCanPutArrayElement @ test object type vs. array type 8196 cmp r0, #0 @ okay? 8197 beq .LOP_APUT_OBJECT_throw @ no 8198 mov r1, rINST @ r1<- arrayObj 8199 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8200 ldr r2, [rGLUE, #offGlue_cardTable] @ get biased CT base 8201 add r10, #offArrayObject_contents @ r0<- pointer to slot 8202 GET_INST_OPCODE(ip) @ extract opcode from rINST 8203 str r9, [r10] @ vBB[vCC]<- vAA 8204 strb r2, [r2, r1, lsr #GC_CARD_SHIFT] @ mark card using object head 8205 GOTO_OPCODE(ip) @ jump to next instruction 8206.LOP_APUT_OBJECT_skip_check: 8207 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8208 GET_INST_OPCODE(ip) @ extract opcode from rINST 8209 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8210 GOTO_OPCODE(ip) @ jump to next instruction 8211.LOP_APUT_OBJECT_throw: 8212 @ The types don't match. We need to throw an ArrayStoreException. 8213 ldr r0, [r9, #offObject_clazz] 8214 ldr r1, [rINST, #offObject_clazz] 8215 EXPORT_PC() 8216 bl dvmThrowArrayStoreException 8217 b common_exceptionThrown 8218 8219/* continuation for OP_IGET */ 8220 8221 /* 8222 * Currently: 8223 * r0 holds resolved field 8224 * r9 holds object 8225 */ 8226.LOP_IGET_finish: 8227 @bl common_squeak0 8228 cmp r9, #0 @ check object for null 8229 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8230 beq common_errNullObject @ object was null 8231 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8232 ubfx r2, rINST, #8, #4 @ r2<- A 8233 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8234 GET_INST_OPCODE(ip) @ extract opcode from rINST 8235 SET_VREG(r0, r2) @ fp[A]<- r0 8236 GOTO_OPCODE(ip) @ jump to next instruction 8237 8238/* continuation for OP_IGET_WIDE */ 8239 8240 /* 8241 * Currently: 8242 * r0 holds resolved field 8243 * r9 holds object 8244 */ 8245.LOP_IGET_WIDE_finish: 8246 cmp r9, #0 @ check object for null 8247 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8248 beq common_errNullObject @ object was null 8249 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8250 ubfx r2, rINST, #8, #4 @ r2<- A 8251 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8252 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8253 GET_INST_OPCODE(ip) @ extract opcode from rINST 8254 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8255 GOTO_OPCODE(ip) @ jump to next instruction 8256 8257/* continuation for OP_IGET_OBJECT */ 8258 8259 /* 8260 * Currently: 8261 * r0 holds resolved field 8262 * r9 holds object 8263 */ 8264.LOP_IGET_OBJECT_finish: 8265 @bl common_squeak0 8266 cmp r9, #0 @ check object for null 8267 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8268 beq common_errNullObject @ object was null 8269 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8270 @ no-op @ acquiring load 8271 mov r2, rINST, lsr #8 @ r2<- A+ 8272 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8273 and r2, r2, #15 @ r2<- A 8274 GET_INST_OPCODE(ip) @ extract opcode from rINST 8275 SET_VREG(r0, r2) @ fp[A]<- r0 8276 GOTO_OPCODE(ip) @ jump to next instruction 8277 8278/* continuation for OP_IGET_BOOLEAN */ 8279 8280 /* 8281 * Currently: 8282 * r0 holds resolved field 8283 * r9 holds object 8284 */ 8285.LOP_IGET_BOOLEAN_finish: 8286 @bl common_squeak1 8287 cmp r9, #0 @ check object for null 8288 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8289 beq common_errNullObject @ object was null 8290 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8291 @ no-op @ acquiring load 8292 mov r2, rINST, lsr #8 @ r2<- A+ 8293 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8294 and r2, r2, #15 @ r2<- A 8295 GET_INST_OPCODE(ip) @ extract opcode from rINST 8296 SET_VREG(r0, r2) @ fp[A]<- r0 8297 GOTO_OPCODE(ip) @ jump to next instruction 8298 8299/* continuation for OP_IGET_BYTE */ 8300 8301 /* 8302 * Currently: 8303 * r0 holds resolved field 8304 * r9 holds object 8305 */ 8306.LOP_IGET_BYTE_finish: 8307 @bl common_squeak2 8308 cmp r9, #0 @ check object for null 8309 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8310 beq common_errNullObject @ object was null 8311 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8312 @ no-op @ acquiring load 8313 mov r2, rINST, lsr #8 @ r2<- A+ 8314 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8315 and r2, r2, #15 @ r2<- A 8316 GET_INST_OPCODE(ip) @ extract opcode from rINST 8317 SET_VREG(r0, r2) @ fp[A]<- r0 8318 GOTO_OPCODE(ip) @ jump to next instruction 8319 8320/* continuation for OP_IGET_CHAR */ 8321 8322 /* 8323 * Currently: 8324 * r0 holds resolved field 8325 * r9 holds object 8326 */ 8327.LOP_IGET_CHAR_finish: 8328 @bl common_squeak3 8329 cmp r9, #0 @ check object for null 8330 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8331 beq common_errNullObject @ object was null 8332 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8333 @ no-op @ acquiring load 8334 mov r2, rINST, lsr #8 @ r2<- A+ 8335 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8336 and r2, r2, #15 @ r2<- A 8337 GET_INST_OPCODE(ip) @ extract opcode from rINST 8338 SET_VREG(r0, r2) @ fp[A]<- r0 8339 GOTO_OPCODE(ip) @ jump to next instruction 8340 8341/* continuation for OP_IGET_SHORT */ 8342 8343 /* 8344 * Currently: 8345 * r0 holds resolved field 8346 * r9 holds object 8347 */ 8348.LOP_IGET_SHORT_finish: 8349 @bl common_squeak4 8350 cmp r9, #0 @ check object for null 8351 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8352 beq common_errNullObject @ object was null 8353 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8354 @ no-op @ acquiring load 8355 mov r2, rINST, lsr #8 @ r2<- A+ 8356 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8357 and r2, r2, #15 @ r2<- A 8358 GET_INST_OPCODE(ip) @ extract opcode from rINST 8359 SET_VREG(r0, r2) @ fp[A]<- r0 8360 GOTO_OPCODE(ip) @ jump to next instruction 8361 8362/* continuation for OP_IPUT */ 8363 8364 /* 8365 * Currently: 8366 * r0 holds resolved field 8367 * r9 holds object 8368 */ 8369.LOP_IPUT_finish: 8370 @bl common_squeak0 8371 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8372 ubfx r1, rINST, #8, #4 @ r1<- A 8373 cmp r9, #0 @ check object for null 8374 GET_VREG(r0, r1) @ r0<- fp[A] 8375 beq common_errNullObject @ object was null 8376 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8377 GET_INST_OPCODE(ip) @ extract opcode from rINST 8378 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8379 GOTO_OPCODE(ip) @ jump to next instruction 8380 8381/* continuation for OP_IPUT_WIDE */ 8382 8383 /* 8384 * Currently: 8385 * r0 holds resolved field 8386 * r9 holds object 8387 */ 8388.LOP_IPUT_WIDE_finish: 8389 ubfx r2, rINST, #8, #4 @ r2<- A 8390 cmp r9, #0 @ check object for null 8391 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8392 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8393 beq common_errNullObject @ object was null 8394 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8395 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8396 GET_INST_OPCODE(ip) @ extract opcode from rINST 8397 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0 8398 GOTO_OPCODE(ip) @ jump to next instruction 8399 8400/* continuation for OP_IPUT_OBJECT */ 8401 8402 /* 8403 * Currently: 8404 * r0 holds resolved field 8405 * r9 holds object 8406 */ 8407.LOP_IPUT_OBJECT_finish: 8408 @bl common_squeak0 8409 mov r1, rINST, lsr #8 @ r1<- A+ 8410 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8411 and r1, r1, #15 @ r1<- A 8412 cmp r9, #0 @ check object for null 8413 GET_VREG(r0, r1) @ r0<- fp[A] 8414 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8415 beq common_errNullObject @ object was null 8416 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8417 GET_INST_OPCODE(ip) @ extract opcode from rINST 8418 @ no-op @ releasing store 8419 str r0, [r9, r3] @ obj.field (32 bits)<- r0 8420 cmp r0, #0 @ stored a null reference? 8421 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 8422 GOTO_OPCODE(ip) @ jump to next instruction 8423 8424/* continuation for OP_IPUT_BOOLEAN */ 8425 8426 /* 8427 * Currently: 8428 * r0 holds resolved field 8429 * r9 holds object 8430 */ 8431.LOP_IPUT_BOOLEAN_finish: 8432 @bl common_squeak1 8433 mov r1, rINST, lsr #8 @ r1<- A+ 8434 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8435 and r1, r1, #15 @ r1<- A 8436 cmp r9, #0 @ check object for null 8437 GET_VREG(r0, r1) @ r0<- fp[A] 8438 beq common_errNullObject @ object was null 8439 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8440 GET_INST_OPCODE(ip) @ extract opcode from rINST 8441 @ no-op @ releasing store 8442 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8443 GOTO_OPCODE(ip) @ jump to next instruction 8444 8445/* continuation for OP_IPUT_BYTE */ 8446 8447 /* 8448 * Currently: 8449 * r0 holds resolved field 8450 * r9 holds object 8451 */ 8452.LOP_IPUT_BYTE_finish: 8453 @bl common_squeak2 8454 mov r1, rINST, lsr #8 @ r1<- A+ 8455 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8456 and r1, r1, #15 @ r1<- A 8457 cmp r9, #0 @ check object for null 8458 GET_VREG(r0, r1) @ r0<- fp[A] 8459 beq common_errNullObject @ object was null 8460 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8461 GET_INST_OPCODE(ip) @ extract opcode from rINST 8462 @ no-op @ releasing store 8463 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8464 GOTO_OPCODE(ip) @ jump to next instruction 8465 8466/* continuation for OP_IPUT_CHAR */ 8467 8468 /* 8469 * Currently: 8470 * r0 holds resolved field 8471 * r9 holds object 8472 */ 8473.LOP_IPUT_CHAR_finish: 8474 @bl common_squeak3 8475 mov r1, rINST, lsr #8 @ r1<- A+ 8476 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8477 and r1, r1, #15 @ r1<- A 8478 cmp r9, #0 @ check object for null 8479 GET_VREG(r0, r1) @ r0<- fp[A] 8480 beq common_errNullObject @ object was null 8481 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8482 GET_INST_OPCODE(ip) @ extract opcode from rINST 8483 @ no-op @ releasing store 8484 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8485 GOTO_OPCODE(ip) @ jump to next instruction 8486 8487/* continuation for OP_IPUT_SHORT */ 8488 8489 /* 8490 * Currently: 8491 * r0 holds resolved field 8492 * r9 holds object 8493 */ 8494.LOP_IPUT_SHORT_finish: 8495 @bl common_squeak4 8496 mov r1, rINST, lsr #8 @ r1<- A+ 8497 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8498 and r1, r1, #15 @ r1<- A 8499 cmp r9, #0 @ check object for null 8500 GET_VREG(r0, r1) @ r0<- fp[A] 8501 beq common_errNullObject @ object was null 8502 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8503 GET_INST_OPCODE(ip) @ extract opcode from rINST 8504 @ no-op @ releasing store 8505 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8506 GOTO_OPCODE(ip) @ jump to next instruction 8507 8508/* continuation for OP_SGET */ 8509 8510 /* 8511 * Continuation if the field has not yet been resolved. 8512 * r1: BBBB field ref 8513 */ 8514.LOP_SGET_resolve: 8515 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8516 EXPORT_PC() @ resolve() could throw, so export now 8517 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8518 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8519 cmp r0, #0 @ success? 8520 bne .LOP_SGET_finish @ yes, finish 8521 b common_exceptionThrown @ no, handle exception 8522 8523/* continuation for OP_SGET_WIDE */ 8524 8525 /* 8526 * Continuation if the field has not yet been resolved. 8527 * r1: BBBB field ref 8528 * 8529 * Returns StaticField pointer in r0. 8530 */ 8531.LOP_SGET_WIDE_resolve: 8532 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8533 EXPORT_PC() @ resolve() could throw, so export now 8534 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8535 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8536 cmp r0, #0 @ success? 8537 bne .LOP_SGET_WIDE_finish @ yes, finish 8538 b common_exceptionThrown @ no, handle exception 8539 8540/* continuation for OP_SGET_OBJECT */ 8541 8542 /* 8543 * Continuation if the field has not yet been resolved. 8544 * r1: BBBB field ref 8545 */ 8546.LOP_SGET_OBJECT_resolve: 8547 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8548 EXPORT_PC() @ resolve() could throw, so export now 8549 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8550 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8551 cmp r0, #0 @ success? 8552 bne .LOP_SGET_OBJECT_finish @ yes, finish 8553 b common_exceptionThrown @ no, handle exception 8554 8555/* continuation for OP_SGET_BOOLEAN */ 8556 8557 /* 8558 * Continuation if the field has not yet been resolved. 8559 * r1: BBBB field ref 8560 */ 8561.LOP_SGET_BOOLEAN_resolve: 8562 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8563 EXPORT_PC() @ resolve() could throw, so export now 8564 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8565 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8566 cmp r0, #0 @ success? 8567 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8568 b common_exceptionThrown @ no, handle exception 8569 8570/* continuation for OP_SGET_BYTE */ 8571 8572 /* 8573 * Continuation if the field has not yet been resolved. 8574 * r1: BBBB field ref 8575 */ 8576.LOP_SGET_BYTE_resolve: 8577 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8578 EXPORT_PC() @ resolve() could throw, so export now 8579 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8580 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8581 cmp r0, #0 @ success? 8582 bne .LOP_SGET_BYTE_finish @ yes, finish 8583 b common_exceptionThrown @ no, handle exception 8584 8585/* continuation for OP_SGET_CHAR */ 8586 8587 /* 8588 * Continuation if the field has not yet been resolved. 8589 * r1: BBBB field ref 8590 */ 8591.LOP_SGET_CHAR_resolve: 8592 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8593 EXPORT_PC() @ resolve() could throw, so export now 8594 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8595 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8596 cmp r0, #0 @ success? 8597 bne .LOP_SGET_CHAR_finish @ yes, finish 8598 b common_exceptionThrown @ no, handle exception 8599 8600/* continuation for OP_SGET_SHORT */ 8601 8602 /* 8603 * Continuation if the field has not yet been resolved. 8604 * r1: BBBB field ref 8605 */ 8606.LOP_SGET_SHORT_resolve: 8607 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8608 EXPORT_PC() @ resolve() could throw, so export now 8609 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8610 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8611 cmp r0, #0 @ success? 8612 bne .LOP_SGET_SHORT_finish @ yes, finish 8613 b common_exceptionThrown @ no, handle exception 8614 8615/* continuation for OP_SPUT */ 8616 8617 /* 8618 * Continuation if the field has not yet been resolved. 8619 * r1: BBBB field ref 8620 */ 8621.LOP_SPUT_resolve: 8622 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8623 EXPORT_PC() @ resolve() could throw, so export now 8624 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8625 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8626 cmp r0, #0 @ success? 8627 bne .LOP_SPUT_finish @ yes, finish 8628 b common_exceptionThrown @ no, handle exception 8629 8630/* continuation for OP_SPUT_WIDE */ 8631 8632 /* 8633 * Continuation if the field has not yet been resolved. 8634 * r1: BBBB field ref 8635 * r9: &fp[AA] 8636 * 8637 * Returns StaticField pointer in r2. 8638 */ 8639.LOP_SPUT_WIDE_resolve: 8640 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8641 EXPORT_PC() @ resolve() could throw, so export now 8642 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8643 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8644 cmp r0, #0 @ success? 8645 mov r2, r0 @ copy to r2 8646 bne .LOP_SPUT_WIDE_finish @ yes, finish 8647 b common_exceptionThrown @ no, handle exception 8648 8649/* continuation for OP_SPUT_OBJECT */ 8650.LOP_SPUT_OBJECT_finish: @ field ptr in r0 8651 mov r2, rINST, lsr #8 @ r2<- AA 8652 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8653 GET_VREG(r1, r2) @ r1<- fp[AA] 8654 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8655 ldr r9, [r0, #offField_clazz] @ r9<- field->clazz 8656 GET_INST_OPCODE(ip) @ extract opcode from rINST 8657 @ no-op @ releasing store 8658 str r1, [r0, #offStaticField_value] @ field<- vAA 8659 cmp r1, #0 @ stored a null object? 8660 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head 8661 GOTO_OPCODE(ip) @ jump to next instruction 8662 8663/* continuation for OP_SPUT_BOOLEAN */ 8664 8665 /* 8666 * Continuation if the field has not yet been resolved. 8667 * r1: BBBB field ref 8668 */ 8669.LOP_SPUT_BOOLEAN_resolve: 8670 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8671 EXPORT_PC() @ resolve() could throw, so export now 8672 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8673 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8674 cmp r0, #0 @ success? 8675 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8676 b common_exceptionThrown @ no, handle exception 8677 8678/* continuation for OP_SPUT_BYTE */ 8679 8680 /* 8681 * Continuation if the field has not yet been resolved. 8682 * r1: BBBB field ref 8683 */ 8684.LOP_SPUT_BYTE_resolve: 8685 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8686 EXPORT_PC() @ resolve() could throw, so export now 8687 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8688 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8689 cmp r0, #0 @ success? 8690 bne .LOP_SPUT_BYTE_finish @ yes, finish 8691 b common_exceptionThrown @ no, handle exception 8692 8693/* continuation for OP_SPUT_CHAR */ 8694 8695 /* 8696 * Continuation if the field has not yet been resolved. 8697 * r1: BBBB field ref 8698 */ 8699.LOP_SPUT_CHAR_resolve: 8700 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8701 EXPORT_PC() @ resolve() could throw, so export now 8702 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8703 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8704 cmp r0, #0 @ success? 8705 bne .LOP_SPUT_CHAR_finish @ yes, finish 8706 b common_exceptionThrown @ no, handle exception 8707 8708/* continuation for OP_SPUT_SHORT */ 8709 8710 /* 8711 * Continuation if the field has not yet been resolved. 8712 * r1: BBBB field ref 8713 */ 8714.LOP_SPUT_SHORT_resolve: 8715 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8716 EXPORT_PC() @ resolve() could throw, so export now 8717 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8718 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8719 cmp r0, #0 @ success? 8720 bne .LOP_SPUT_SHORT_finish @ yes, finish 8721 b common_exceptionThrown @ no, handle exception 8722 8723/* continuation for OP_INVOKE_VIRTUAL */ 8724 8725 /* 8726 * At this point: 8727 * r0 = resolved base method 8728 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8729 */ 8730.LOP_INVOKE_VIRTUAL_continue: 8731 GET_VREG(r1, r10) @ r1<- "this" ptr 8732 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8733 cmp r1, #0 @ is "this" null? 8734 beq common_errNullObject @ null "this", throw exception 8735 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8736 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8737 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8738 bl common_invokeMethodNoRange @ continue on 8739 8740/* continuation for OP_INVOKE_SUPER */ 8741 8742 /* 8743 * At this point: 8744 * r0 = resolved base method 8745 * r9 = method->clazz 8746 */ 8747.LOP_INVOKE_SUPER_continue: 8748 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8749 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8750 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8751 EXPORT_PC() @ must export for invoke 8752 cmp r2, r3 @ compare (methodIndex, vtableCount) 8753 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8754 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8755 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8756 bl common_invokeMethodNoRange @ continue on 8757 8758.LOP_INVOKE_SUPER_resolve: 8759 mov r0, r9 @ r0<- method->clazz 8760 mov r2, #METHOD_VIRTUAL @ resolver method type 8761 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8762 cmp r0, #0 @ got null? 8763 bne .LOP_INVOKE_SUPER_continue @ no, continue 8764 b common_exceptionThrown @ yes, handle exception 8765 8766 /* 8767 * Throw a NoSuchMethodError with the method name as the message. 8768 * r0 = resolved base method 8769 */ 8770.LOP_INVOKE_SUPER_nsm: 8771 ldr r1, [r0, #offMethod_name] @ r1<- method name 8772 b common_errNoSuchMethod 8773 8774/* continuation for OP_INVOKE_DIRECT */ 8775 8776 /* 8777 * On entry: 8778 * r1 = reference (BBBB or CCCC) 8779 * r10 = "this" register 8780 */ 8781.LOP_INVOKE_DIRECT_resolve: 8782 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8783 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8784 mov r2, #METHOD_DIRECT @ resolver method type 8785 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8786 cmp r0, #0 @ got null? 8787 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8788 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8789 b common_exceptionThrown @ yes, handle exception 8790 8791/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8792 8793 /* 8794 * At this point: 8795 * r0 = resolved base method 8796 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8797 */ 8798.LOP_INVOKE_VIRTUAL_RANGE_continue: 8799 GET_VREG(r1, r10) @ r1<- "this" ptr 8800 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8801 cmp r1, #0 @ is "this" null? 8802 beq common_errNullObject @ null "this", throw exception 8803 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8804 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8805 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8806 bl common_invokeMethodRange @ continue on 8807 8808/* continuation for OP_INVOKE_SUPER_RANGE */ 8809 8810 /* 8811 * At this point: 8812 * r0 = resolved base method 8813 * r9 = method->clazz 8814 */ 8815.LOP_INVOKE_SUPER_RANGE_continue: 8816 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8817 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8818 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8819 EXPORT_PC() @ must export for invoke 8820 cmp r2, r3 @ compare (methodIndex, vtableCount) 8821 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8822 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8823 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8824 bl common_invokeMethodRange @ continue on 8825 8826.LOP_INVOKE_SUPER_RANGE_resolve: 8827 mov r0, r9 @ r0<- method->clazz 8828 mov r2, #METHOD_VIRTUAL @ resolver method type 8829 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8830 cmp r0, #0 @ got null? 8831 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8832 b common_exceptionThrown @ yes, handle exception 8833 8834 /* 8835 * Throw a NoSuchMethodError with the method name as the message. 8836 * r0 = resolved base method 8837 */ 8838.LOP_INVOKE_SUPER_RANGE_nsm: 8839 ldr r1, [r0, #offMethod_name] @ r1<- method name 8840 b common_errNoSuchMethod 8841 8842/* continuation for OP_INVOKE_DIRECT_RANGE */ 8843 8844 /* 8845 * On entry: 8846 * r1 = reference (BBBB or CCCC) 8847 * r10 = "this" register 8848 */ 8849.LOP_INVOKE_DIRECT_RANGE_resolve: 8850 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8851 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8852 mov r2, #METHOD_DIRECT @ resolver method type 8853 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8854 cmp r0, #0 @ got null? 8855 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8856 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8857 b common_exceptionThrown @ yes, handle exception 8858 8859/* continuation for OP_FLOAT_TO_LONG */ 8860/* 8861 * Convert the float in r0 to a long in r0/r1. 8862 * 8863 * We have to clip values to long min/max per the specification. The 8864 * expected common case is a "reasonable" value that converts directly 8865 * to modest integer. The EABI convert function isn't doing this for us. 8866 */ 8867f2l_doconv: 8868 stmfd sp!, {r4, lr} 8869 mov r1, #0x5f000000 @ (float)maxlong 8870 mov r4, r0 8871 bl __aeabi_fcmpge @ is arg >= maxlong? 8872 cmp r0, #0 @ nonzero == yes 8873 mvnne r0, #0 @ return maxlong (7fffffff) 8874 mvnne r1, #0x80000000 8875 ldmnefd sp!, {r4, pc} 8876 8877 mov r0, r4 @ recover arg 8878 mov r1, #0xdf000000 @ (float)minlong 8879 bl __aeabi_fcmple @ is arg <= minlong? 8880 cmp r0, #0 @ nonzero == yes 8881 movne r0, #0 @ return minlong (80000000) 8882 movne r1, #0x80000000 8883 ldmnefd sp!, {r4, pc} 8884 8885 mov r0, r4 @ recover arg 8886 mov r1, r4 8887 bl __aeabi_fcmpeq @ is arg == self? 8888 cmp r0, #0 @ zero == no 8889 moveq r1, #0 @ return zero for NaN 8890 ldmeqfd sp!, {r4, pc} 8891 8892 mov r0, r4 @ recover arg 8893 bl __aeabi_f2lz @ convert float to long 8894 ldmfd sp!, {r4, pc} 8895 8896/* continuation for OP_DOUBLE_TO_LONG */ 8897/* 8898 * Convert the double in r0/r1 to a long in r0/r1. 8899 * 8900 * We have to clip values to long min/max per the specification. The 8901 * expected common case is a "reasonable" value that converts directly 8902 * to modest integer. The EABI convert function isn't doing this for us. 8903 */ 8904d2l_doconv: 8905 stmfd sp!, {r4, r5, lr} @ save regs 8906 mov r3, #0x43000000 @ maxlong, as a double (high word) 8907 add r3, #0x00e00000 @ 0x43e00000 8908 mov r2, #0 @ maxlong, as a double (low word) 8909 sub sp, sp, #4 @ align for EABI 8910 mov r4, r0 @ save a copy of r0 8911 mov r5, r1 @ and r1 8912 bl __aeabi_dcmpge @ is arg >= maxlong? 8913 cmp r0, #0 @ nonzero == yes 8914 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8915 mvnne r1, #0x80000000 8916 bne 1f 8917 8918 mov r0, r4 @ recover arg 8919 mov r1, r5 8920 mov r3, #0xc3000000 @ minlong, as a double (high word) 8921 add r3, #0x00e00000 @ 0xc3e00000 8922 mov r2, #0 @ minlong, as a double (low word) 8923 bl __aeabi_dcmple @ is arg <= minlong? 8924 cmp r0, #0 @ nonzero == yes 8925 movne r0, #0 @ return minlong (8000000000000000) 8926 movne r1, #0x80000000 8927 bne 1f 8928 8929 mov r0, r4 @ recover arg 8930 mov r1, r5 8931 mov r2, r4 @ compare against self 8932 mov r3, r5 8933 bl __aeabi_dcmpeq @ is arg == self? 8934 cmp r0, #0 @ zero == no 8935 moveq r1, #0 @ return zero for NaN 8936 beq 1f 8937 8938 mov r0, r4 @ recover arg 8939 mov r1, r5 8940 bl __aeabi_d2lz @ convert double to long 8941 89421: 8943 add sp, sp, #4 8944 ldmfd sp!, {r4, r5, pc} 8945 8946/* continuation for OP_MUL_LONG */ 8947 8948.LOP_MUL_LONG_finish: 8949 GET_INST_OPCODE(ip) @ extract opcode from rINST 8950 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 8951 GOTO_OPCODE(ip) @ jump to next instruction 8952 8953/* continuation for OP_SHL_LONG */ 8954 8955.LOP_SHL_LONG_finish: 8956 mov r0, r0, asl r2 @ r0<- r0 << r2 8957 GET_INST_OPCODE(ip) @ extract opcode from rINST 8958 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8959 GOTO_OPCODE(ip) @ jump to next instruction 8960 8961/* continuation for OP_SHR_LONG */ 8962 8963.LOP_SHR_LONG_finish: 8964 mov r1, r1, asr r2 @ r1<- r1 >> r2 8965 GET_INST_OPCODE(ip) @ extract opcode from rINST 8966 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8967 GOTO_OPCODE(ip) @ jump to next instruction 8968 8969/* continuation for OP_USHR_LONG */ 8970 8971.LOP_USHR_LONG_finish: 8972 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 8973 GET_INST_OPCODE(ip) @ extract opcode from rINST 8974 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8975 GOTO_OPCODE(ip) @ jump to next instruction 8976 8977/* continuation for OP_SHL_LONG_2ADDR */ 8978 8979.LOP_SHL_LONG_2ADDR_finish: 8980 GET_INST_OPCODE(ip) @ extract opcode from rINST 8981 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8982 GOTO_OPCODE(ip) @ jump to next instruction 8983 8984/* continuation for OP_SHR_LONG_2ADDR */ 8985 8986.LOP_SHR_LONG_2ADDR_finish: 8987 GET_INST_OPCODE(ip) @ extract opcode from rINST 8988 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8989 GOTO_OPCODE(ip) @ jump to next instruction 8990 8991/* continuation for OP_USHR_LONG_2ADDR */ 8992 8993.LOP_USHR_LONG_2ADDR_finish: 8994 GET_INST_OPCODE(ip) @ extract opcode from rINST 8995 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8996 GOTO_OPCODE(ip) @ jump to next instruction 8997 8998/* continuation for OP_IGET_VOLATILE */ 8999 9000 /* 9001 * Currently: 9002 * r0 holds resolved field 9003 * r9 holds object 9004 */ 9005.LOP_IGET_VOLATILE_finish: 9006 @bl common_squeak0 9007 cmp r9, #0 @ check object for null 9008 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9009 beq common_errNullObject @ object was null 9010 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9011 SMP_DMB @ acquiring load 9012 mov r2, rINST, lsr #8 @ r2<- A+ 9013 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9014 and r2, r2, #15 @ r2<- A 9015 GET_INST_OPCODE(ip) @ extract opcode from rINST 9016 SET_VREG(r0, r2) @ fp[A]<- r0 9017 GOTO_OPCODE(ip) @ jump to next instruction 9018 9019/* continuation for OP_IPUT_VOLATILE */ 9020 9021 /* 9022 * Currently: 9023 * r0 holds resolved field 9024 * r9 holds object 9025 */ 9026.LOP_IPUT_VOLATILE_finish: 9027 @bl common_squeak0 9028 mov r1, rINST, lsr #8 @ r1<- A+ 9029 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9030 and r1, r1, #15 @ r1<- A 9031 cmp r9, #0 @ check object for null 9032 GET_VREG(r0, r1) @ r0<- fp[A] 9033 beq common_errNullObject @ object was null 9034 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9035 GET_INST_OPCODE(ip) @ extract opcode from rINST 9036 SMP_DMB @ releasing store 9037 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9038 GOTO_OPCODE(ip) @ jump to next instruction 9039 9040/* continuation for OP_SGET_VOLATILE */ 9041 9042 /* 9043 * Continuation if the field has not yet been resolved. 9044 * r1: BBBB field ref 9045 */ 9046.LOP_SGET_VOLATILE_resolve: 9047 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9048 EXPORT_PC() @ resolve() could throw, so export now 9049 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9050 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9051 cmp r0, #0 @ success? 9052 bne .LOP_SGET_VOLATILE_finish @ yes, finish 9053 b common_exceptionThrown @ no, handle exception 9054 9055/* continuation for OP_SPUT_VOLATILE */ 9056 9057 /* 9058 * Continuation if the field has not yet been resolved. 9059 * r1: BBBB field ref 9060 */ 9061.LOP_SPUT_VOLATILE_resolve: 9062 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9063 EXPORT_PC() @ resolve() could throw, so export now 9064 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9065 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9066 cmp r0, #0 @ success? 9067 bne .LOP_SPUT_VOLATILE_finish @ yes, finish 9068 b common_exceptionThrown @ no, handle exception 9069 9070/* continuation for OP_IGET_OBJECT_VOLATILE */ 9071 9072 /* 9073 * Currently: 9074 * r0 holds resolved field 9075 * r9 holds object 9076 */ 9077.LOP_IGET_OBJECT_VOLATILE_finish: 9078 @bl common_squeak0 9079 cmp r9, #0 @ check object for null 9080 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9081 beq common_errNullObject @ object was null 9082 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9083 SMP_DMB @ acquiring load 9084 mov r2, rINST, lsr #8 @ r2<- A+ 9085 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9086 and r2, r2, #15 @ r2<- A 9087 GET_INST_OPCODE(ip) @ extract opcode from rINST 9088 SET_VREG(r0, r2) @ fp[A]<- r0 9089 GOTO_OPCODE(ip) @ jump to next instruction 9090 9091/* continuation for OP_IGET_WIDE_VOLATILE */ 9092 9093 /* 9094 * Currently: 9095 * r0 holds resolved field 9096 * r9 holds object 9097 */ 9098.LOP_IGET_WIDE_VOLATILE_finish: 9099 cmp r9, #0 @ check object for null 9100 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9101 beq common_errNullObject @ object was null 9102 .if 1 9103 add r0, r9, r3 @ r0<- address of field 9104 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 9105 .else 9106 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9107 .endif 9108 mov r2, rINST, lsr #8 @ r2<- A+ 9109 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9110 and r2, r2, #15 @ r2<- A 9111 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9112 GET_INST_OPCODE(ip) @ extract opcode from rINST 9113 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9114 GOTO_OPCODE(ip) @ jump to next instruction 9115 9116/* continuation for OP_IPUT_WIDE_VOLATILE */ 9117 9118 /* 9119 * Currently: 9120 * r0 holds resolved field 9121 * r9 holds object 9122 */ 9123.LOP_IPUT_WIDE_VOLATILE_finish: 9124 mov r2, rINST, lsr #8 @ r2<- A+ 9125 cmp r9, #0 @ check object for null 9126 and r2, r2, #15 @ r2<- A 9127 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9128 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9129 beq common_errNullObject @ object was null 9130 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9131 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9132 GET_INST_OPCODE(r10) @ extract opcode from rINST 9133 .if 1 9134 add r2, r9, r3 @ r2<- target address 9135 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 9136 .else 9137 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9138 .endif 9139 GOTO_OPCODE(r10) @ jump to next instruction 9140 9141/* continuation for OP_SGET_WIDE_VOLATILE */ 9142 9143 /* 9144 * Continuation if the field has not yet been resolved. 9145 * r1: BBBB field ref 9146 * 9147 * Returns StaticField pointer in r0. 9148 */ 9149.LOP_SGET_WIDE_VOLATILE_resolve: 9150 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9151 EXPORT_PC() @ resolve() could throw, so export now 9152 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9153 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9154 cmp r0, #0 @ success? 9155 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9156 b common_exceptionThrown @ no, handle exception 9157 9158/* continuation for OP_SPUT_WIDE_VOLATILE */ 9159 9160 /* 9161 * Continuation if the field has not yet been resolved. 9162 * r1: BBBB field ref 9163 * r9: &fp[AA] 9164 * 9165 * Returns StaticField pointer in r2. 9166 */ 9167.LOP_SPUT_WIDE_VOLATILE_resolve: 9168 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9169 EXPORT_PC() @ resolve() could throw, so export now 9170 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9171 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9172 cmp r0, #0 @ success? 9173 mov r2, r0 @ copy to r2 9174 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9175 b common_exceptionThrown @ no, handle exception 9176 9177/* continuation for OP_EXECUTE_INLINE */ 9178 9179 /* 9180 * Extract args, call function. 9181 * r0 = #of args (0-4) 9182 * r10 = call index 9183 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9184 * 9185 * Other ideas: 9186 * - Use a jump table from the main piece to jump directly into the 9187 * AND/LDR pairs. Costs a data load, saves a branch. 9188 * - Have five separate pieces that do the loading, so we can work the 9189 * interleave a little better. Increases code size. 9190 */ 9191.LOP_EXECUTE_INLINE_continue: 9192 rsb r0, r0, #4 @ r0<- 4-r0 9193 FETCH(r9, 2) @ r9<- FEDC 9194 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9195 bl common_abort @ (skipped due to ARM prefetch) 91964: and ip, r9, #0xf000 @ isolate F 9197 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 91983: and ip, r9, #0x0f00 @ isolate E 9199 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 92002: and ip, r9, #0x00f0 @ isolate D 9201 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 92021: and ip, r9, #0x000f @ isolate C 9203 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 92040: 9205 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9206 ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry 9207 @ (not reached) 9208 9209.LOP_EXECUTE_INLINE_table: 9210 .word gDvmInlineOpsTable 9211 9212/* continuation for OP_EXECUTE_INLINE_RANGE */ 9213 9214 /* 9215 * Extract args, call function. 9216 * r0 = #of args (0-4) 9217 * r10 = call index 9218 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9219 */ 9220.LOP_EXECUTE_INLINE_RANGE_continue: 9221 rsb r0, r0, #4 @ r0<- 4-r0 9222 FETCH(r9, 2) @ r9<- CCCC 9223 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9224 bl common_abort @ (skipped due to ARM prefetch) 92254: add ip, r9, #3 @ base+3 9226 GET_VREG(r3, ip) @ r3<- vBase[3] 92273: add ip, r9, #2 @ base+2 9228 GET_VREG(r2, ip) @ r2<- vBase[2] 92292: add ip, r9, #1 @ base+1 9230 GET_VREG(r1, ip) @ r1<- vBase[1] 92311: add ip, r9, #0 @ (nop) 9232 GET_VREG(r0, ip) @ r0<- vBase[0] 92330: 9234 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9235 ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry 9236 @ (not reached) 9237 9238.LOP_EXECUTE_INLINE_RANGE_table: 9239 .word gDvmInlineOpsTable 9240 9241/* continuation for OP_IPUT_OBJECT_VOLATILE */ 9242 9243 /* 9244 * Currently: 9245 * r0 holds resolved field 9246 * r9 holds object 9247 */ 9248.LOP_IPUT_OBJECT_VOLATILE_finish: 9249 @bl common_squeak0 9250 mov r1, rINST, lsr #8 @ r1<- A+ 9251 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9252 and r1, r1, #15 @ r1<- A 9253 cmp r9, #0 @ check object for null 9254 GET_VREG(r0, r1) @ r0<- fp[A] 9255 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9256 beq common_errNullObject @ object was null 9257 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9258 GET_INST_OPCODE(ip) @ extract opcode from rINST 9259 SMP_DMB @ releasing store 9260 str r0, [r9, r3] @ obj.field (32 bits)<- r0 9261 cmp r0, #0 @ stored a null reference? 9262 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 9263 GOTO_OPCODE(ip) @ jump to next instruction 9264 9265/* continuation for OP_SGET_OBJECT_VOLATILE */ 9266 9267 /* 9268 * Continuation if the field has not yet been resolved. 9269 * r1: BBBB field ref 9270 */ 9271.LOP_SGET_OBJECT_VOLATILE_resolve: 9272 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9273 EXPORT_PC() @ resolve() could throw, so export now 9274 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9275 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9276 cmp r0, #0 @ success? 9277 bne .LOP_SGET_OBJECT_VOLATILE_finish @ yes, finish 9278 b common_exceptionThrown @ no, handle exception 9279 9280/* continuation for OP_SPUT_OBJECT_VOLATILE */ 9281.LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0 9282 mov r2, rINST, lsr #8 @ r2<- AA 9283 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9284 GET_VREG(r1, r2) @ r1<- fp[AA] 9285 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9286 ldr r9, [r0, #offField_clazz] @ r9<- field->clazz 9287 GET_INST_OPCODE(ip) @ extract opcode from rINST 9288 SMP_DMB @ releasing store 9289 str r1, [r0, #offStaticField_value] @ field<- vAA 9290 cmp r1, #0 @ stored a null object? 9291 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head 9292 GOTO_OPCODE(ip) @ jump to next instruction 9293 9294 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9295 .global dvmAsmSisterEnd 9296dvmAsmSisterEnd: 9297 9298/* File: armv5te/footer.S */ 9299 9300/* 9301 * =========================================================================== 9302 * Common subroutines and data 9303 * =========================================================================== 9304 */ 9305 9306 9307 9308 .text 9309 .align 2 9310 9311#if defined(WITH_JIT) 9312#if defined(WITH_SELF_VERIFICATION) 9313 .global dvmJitToInterpPunt 9314dvmJitToInterpPunt: 9315 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9316 mov r2,#kSVSPunt @ r2<- interpreter entry point 9317 mov r3, #0 9318 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9319 b jitSVShadowRunEnd @ doesn't return 9320 9321 .global dvmJitToInterpSingleStep 9322dvmJitToInterpSingleStep: 9323 str lr,[rGLUE,#offGlue_jitResumeNPC] 9324 str r1,[rGLUE,#offGlue_jitResumeDPC] 9325 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9326 b jitSVShadowRunEnd @ doesn't return 9327 9328 .global dvmJitToInterpNoChainNoProfile 9329dvmJitToInterpNoChainNoProfile: 9330 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9331 mov r0,rPC @ pass our target PC 9332 mov r2,#kSVSNoProfile @ r2<- interpreter entry point 9333 mov r3, #0 @ 0 means !inJitCodeCache 9334 str r3, [r10, #offThread_inJitCodeCache] @ back to the interp land 9335 b jitSVShadowRunEnd @ doesn't return 9336 9337 .global dvmJitToInterpTraceSelectNoChain 9338dvmJitToInterpTraceSelectNoChain: 9339 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9340 mov r0,rPC @ pass our target PC 9341 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9342 mov r3, #0 @ 0 means !inJitCodeCache 9343 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9344 b jitSVShadowRunEnd @ doesn't return 9345 9346 .global dvmJitToInterpTraceSelect 9347dvmJitToInterpTraceSelect: 9348 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9349 ldr r0,[lr, #-1] @ pass our target PC 9350 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9351 mov r3, #0 @ 0 means !inJitCodeCache 9352 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9353 b jitSVShadowRunEnd @ doesn't return 9354 9355 .global dvmJitToInterpBackwardBranch 9356dvmJitToInterpBackwardBranch: 9357 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9358 ldr r0,[lr, #-1] @ pass our target PC 9359 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9360 mov r3, #0 @ 0 means !inJitCodeCache 9361 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9362 b jitSVShadowRunEnd @ doesn't return 9363 9364 .global dvmJitToInterpNormal 9365dvmJitToInterpNormal: 9366 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9367 ldr r0,[lr, #-1] @ pass our target PC 9368 mov r2,#kSVSNormal @ r2<- interpreter entry point 9369 mov r3, #0 @ 0 means !inJitCodeCache 9370 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9371 b jitSVShadowRunEnd @ doesn't return 9372 9373 .global dvmJitToInterpNoChain 9374dvmJitToInterpNoChain: 9375 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9376 mov r0,rPC @ pass our target PC 9377 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9378 mov r3, #0 @ 0 means !inJitCodeCache 9379 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9380 b jitSVShadowRunEnd @ doesn't return 9381#else 9382/* 9383 * Return from the translation cache to the interpreter when the compiler is 9384 * having issues translating/executing a Dalvik instruction. We have to skip 9385 * the code cache lookup otherwise it is possible to indefinitely bouce 9386 * between the interpreter and the code cache if the instruction that fails 9387 * to be compiled happens to be at a trace start. 9388 */ 9389 .global dvmJitToInterpPunt 9390dvmJitToInterpPunt: 9391 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9392 mov rPC, r0 9393#if defined(WITH_JIT_TUNING) 9394 mov r0,lr 9395 bl dvmBumpPunt; 9396#endif 9397 EXPORT_PC() 9398 mov r0, #0 9399 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9400 adrl rIBASE, dvmAsmInstructionStart 9401 FETCH_INST() 9402 GET_INST_OPCODE(ip) 9403 GOTO_OPCODE(ip) 9404 9405/* 9406 * Return to the interpreter to handle a single instruction. 9407 * On entry: 9408 * r0 <= PC 9409 * r1 <= PC of resume instruction 9410 * lr <= resume point in translation 9411 */ 9412 .global dvmJitToInterpSingleStep 9413dvmJitToInterpSingleStep: 9414 str lr,[rGLUE,#offGlue_jitResumeNPC] 9415 str r1,[rGLUE,#offGlue_jitResumeDPC] 9416 mov r1,#kInterpEntryInstr 9417 @ enum is 4 byte in aapcs-EABI 9418 str r1, [rGLUE, #offGlue_entryPoint] 9419 mov rPC,r0 9420 EXPORT_PC() 9421 9422 adrl rIBASE, dvmAsmInstructionStart 9423 mov r2,#kJitSingleStep @ Ask for single step and then revert 9424 str r2,[rGLUE,#offGlue_jitState] 9425 mov r1,#1 @ set changeInterp to bail to debug interp 9426 b common_gotoBail 9427 9428/* 9429 * Return from the translation cache and immediately request 9430 * a translation for the exit target. Commonly used for callees. 9431 */ 9432 .global dvmJitToInterpTraceSelectNoChain 9433dvmJitToInterpTraceSelectNoChain: 9434#if defined(WITH_JIT_TUNING) 9435 bl dvmBumpNoChain 9436#endif 9437 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9438 mov r0,rPC 9439 bl dvmJitGetCodeAddr @ Is there a translation? 9440 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9441 mov r1, rPC @ arg1 of translation may need this 9442 mov lr, #0 @ in case target is HANDLER_INTERPRET 9443 cmp r0,#0 @ !0 means translation exists 9444 bxne r0 @ continue native execution if so 9445 b 2f @ branch over to use the interpreter 9446 9447/* 9448 * Return from the translation cache and immediately request 9449 * a translation for the exit target. Commonly used following 9450 * invokes. 9451 */ 9452 .global dvmJitToInterpTraceSelect 9453dvmJitToInterpTraceSelect: 9454 ldr rPC,[lr, #-1] @ get our target PC 9455 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9456 add rINST,lr,#-5 @ save start of chain branch 9457 add rINST, #-4 @ .. which is 9 bytes back 9458 mov r0,rPC 9459 bl dvmJitGetCodeAddr @ Is there a translation? 9460 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9461 cmp r0,#0 9462 beq 2f 9463 mov r1,rINST 9464 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9465 mov r1, rPC @ arg1 of translation may need this 9466 mov lr, #0 @ in case target is HANDLER_INTERPRET 9467 cmp r0,#0 @ successful chain? 9468 bxne r0 @ continue native execution 9469 b toInterpreter @ didn't chain - resume with interpreter 9470 9471/* No translation, so request one if profiling isn't disabled*/ 94722: 9473 adrl rIBASE, dvmAsmInstructionStart 9474 GET_JIT_PROF_TABLE(r0) 9475 FETCH_INST() 9476 cmp r0, #0 9477 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9478 bne common_selectTrace 9479 GET_INST_OPCODE(ip) 9480 GOTO_OPCODE(ip) 9481 9482/* 9483 * Return from the translation cache to the interpreter. 9484 * The return was done with a BLX from thumb mode, and 9485 * the following 32-bit word contains the target rPC value. 9486 * Note that lr (r14) will have its low-order bit set to denote 9487 * its thumb-mode origin. 9488 * 9489 * We'll need to stash our lr origin away, recover the new 9490 * target and then check to see if there is a translation available 9491 * for our new target. If so, we do a translation chain and 9492 * go back to native execution. Otherwise, it's back to the 9493 * interpreter (after treating this entry as a potential 9494 * trace start). 9495 */ 9496 .global dvmJitToInterpNormal 9497dvmJitToInterpNormal: 9498 ldr rPC,[lr, #-1] @ get our target PC 9499 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9500 add rINST,lr,#-5 @ save start of chain branch 9501 add rINST,#-4 @ .. which is 9 bytes back 9502#if defined(WITH_JIT_TUNING) 9503 bl dvmBumpNormal 9504#endif 9505 mov r0,rPC 9506 bl dvmJitGetCodeAddr @ Is there a translation? 9507 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9508 cmp r0,#0 9509 beq toInterpreter @ go if not, otherwise do chain 9510 mov r1,rINST 9511 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9512 mov r1, rPC @ arg1 of translation may need this 9513 mov lr, #0 @ in case target is HANDLER_INTERPRET 9514 cmp r0,#0 @ successful chain? 9515 bxne r0 @ continue native execution 9516 b toInterpreter @ didn't chain - resume with interpreter 9517 9518/* 9519 * Return from the translation cache to the interpreter to do method invocation. 9520 * Check if translation exists for the callee, but don't chain to it. 9521 */ 9522 .global dvmJitToInterpNoChainNoProfile 9523dvmJitToInterpNoChainNoProfile: 9524#if defined(WITH_JIT_TUNING) 9525 bl dvmBumpNoChain 9526#endif 9527 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9528 mov r0,rPC 9529 bl dvmJitGetCodeAddr @ Is there a translation? 9530 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9531 mov r1, rPC @ arg1 of translation may need this 9532 mov lr, #0 @ in case target is HANDLER_INTERPRET 9533 cmp r0,#0 9534 bxne r0 @ continue native execution if so 9535 EXPORT_PC() 9536 adrl rIBASE, dvmAsmInstructionStart 9537 FETCH_INST() 9538 GET_INST_OPCODE(ip) @ extract opcode from rINST 9539 GOTO_OPCODE(ip) @ jump to next instruction 9540 9541/* 9542 * Return from the translation cache to the interpreter to do method invocation. 9543 * Check if translation exists for the callee, but don't chain to it. 9544 */ 9545 .global dvmJitToInterpNoChain 9546dvmJitToInterpNoChain: 9547#if defined(WITH_JIT_TUNING) 9548 bl dvmBumpNoChain 9549#endif 9550 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9551 mov r0,rPC 9552 bl dvmJitGetCodeAddr @ Is there a translation? 9553 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9554 mov r1, rPC @ arg1 of translation may need this 9555 mov lr, #0 @ in case target is HANDLER_INTERPRET 9556 cmp r0,#0 9557 bxne r0 @ continue native execution if so 9558#endif 9559 9560/* 9561 * No translation, restore interpreter regs and start interpreting. 9562 * rGLUE & rFP were preserved in the translated code, and rPC has 9563 * already been restored by the time we get here. We'll need to set 9564 * up rIBASE & rINST, and load the address of the JitTable into r0. 9565 */ 9566toInterpreter: 9567 EXPORT_PC() 9568 adrl rIBASE, dvmAsmInstructionStart 9569 FETCH_INST() 9570 GET_JIT_PROF_TABLE(r0) 9571 @ NOTE: intended fallthrough 9572 9573/* 9574 * Common code to update potential trace start counter, and initiate 9575 * a trace-build if appropriate. On entry, rPC should point to the 9576 * next instruction to execute, and rINST should be already loaded with 9577 * the next opcode word, and r0 holds a pointer to the jit profile 9578 * table (pJitProfTable). 9579 */ 9580common_testUpdateProfile: 9581 cmp r0,#0 9582 GET_INST_OPCODE(ip) 9583 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9584 9585common_updateProfile: 9586 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9587 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9588 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9589 GET_INST_OPCODE(ip) 9590 subs r1,r1,#1 @ decrement counter 9591 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9592 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9593 9594/* 9595 * Here, we switch to the debug interpreter to request 9596 * trace selection. First, though, check to see if there 9597 * is already a native translation in place (and, if so, 9598 * jump to it now). 9599 */ 9600 GET_JIT_THRESHOLD(r1) 9601 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9602 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9603 EXPORT_PC() 9604 mov r0,rPC 9605 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9606 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9607 mov r1, rPC @ arg1 of translation may need this 9608 mov lr, #0 @ in case target is HANDLER_INTERPRET 9609 cmp r0,#0 9610#if !defined(WITH_SELF_VERIFICATION) 9611 bxne r0 @ jump to the translation 9612 mov r2,#kJitTSelectRequest @ ask for trace selection 9613 @ fall-through to common_selectTrace 9614#else 9615 moveq r2,#kJitTSelectRequest @ ask for trace selection 9616 beq common_selectTrace 9617 /* 9618 * At this point, we have a target translation. However, if 9619 * that translation is actually the interpret-only pseudo-translation 9620 * we want to treat it the same as no translation. 9621 */ 9622 mov r10, r0 @ save target 9623 bl dvmCompilerGetInterpretTemplate 9624 cmp r0, r10 @ special case? 9625 bne jitSVShadowRunStart @ set up self verification shadow space 9626 @ Need to clear the inJitCodeCache flag 9627 ldr r10, [rGLUE, #offGlue_self] @ r10 <- glue->self 9628 mov r3, #0 @ 0 means not in the JIT code cache 9629 str r3, [r10, #offThread_inJitCodeCache] @ back to the interp land 9630 GET_INST_OPCODE(ip) 9631 GOTO_OPCODE(ip) 9632 /* no return */ 9633#endif 9634 9635/* 9636 * On entry: 9637 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9638 */ 9639common_selectTrace: 9640 str r2,[rGLUE,#offGlue_jitState] 9641 mov r2,#kInterpEntryInstr @ normal entry reason 9642 str r2,[rGLUE,#offGlue_entryPoint] 9643 mov r1,#1 @ set changeInterp 9644 b common_gotoBail 9645 9646#if defined(WITH_SELF_VERIFICATION) 9647/* 9648 * Save PC and registers to shadow memory for self verification mode 9649 * before jumping to native translation. 9650 * On entry: 9651 * rPC, rFP, rGLUE: the values that they should contain 9652 * r10: the address of the target translation. 9653 */ 9654jitSVShadowRunStart: 9655 mov r0,rPC @ r0<- program counter 9656 mov r1,rFP @ r1<- frame pointer 9657 mov r2,rGLUE @ r2<- InterpState pointer 9658 mov r3,r10 @ r3<- target translation 9659 bl dvmSelfVerificationSaveState @ save registers to shadow space 9660 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9661 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9662 bx r10 @ jump to the translation 9663 9664/* 9665 * Restore PC, registers, and interpState to original values 9666 * before jumping back to the interpreter. 9667 */ 9668jitSVShadowRunEnd: 9669 mov r1,rFP @ pass ending fp 9670 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9671 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9672 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9673 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9674 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9675 cmp r1,#0 @ check for punt condition 9676 beq 1f 9677 mov r2,#kJitSelfVerification @ ask for self verification 9678 str r2,[rGLUE,#offGlue_jitState] 9679 mov r2,#kInterpEntryInstr @ normal entry reason 9680 str r2,[rGLUE,#offGlue_entryPoint] 9681 mov r1,#1 @ set changeInterp 9682 b common_gotoBail 9683 96841: @ exit to interpreter without check 9685 EXPORT_PC() 9686 adrl rIBASE, dvmAsmInstructionStart 9687 FETCH_INST() 9688 GET_INST_OPCODE(ip) 9689 GOTO_OPCODE(ip) 9690#endif 9691 9692#endif 9693 9694/* 9695 * Common code when a backward branch is taken. 9696 * 9697 * TODO: we could avoid a branch by just setting r0 and falling through 9698 * into the common_periodicChecks code, and having a test on r0 at the 9699 * end determine if we should return to the caller or update & branch to 9700 * the next instr. 9701 * 9702 * On entry: 9703 * r9 is PC adjustment *in bytes* 9704 */ 9705common_backwardBranch: 9706 mov r0, #kInterpEntryInstr 9707 bl common_periodicChecks 9708#if defined(WITH_JIT) 9709 GET_JIT_PROF_TABLE(r0) 9710 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9711 cmp r0,#0 9712 bne common_updateProfile 9713 GET_INST_OPCODE(ip) 9714 GOTO_OPCODE(ip) 9715#else 9716 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9717 GET_INST_OPCODE(ip) @ extract opcode from rINST 9718 GOTO_OPCODE(ip) @ jump to next instruction 9719#endif 9720 9721 9722/* 9723 * Need to see if the thread needs to be suspended or debugger/profiler 9724 * activity has begun. If so, we suspend the thread or side-exit to 9725 * the debug interpreter as appropriate. 9726 * 9727 * The common case is no activity on any of these, so we want to figure 9728 * that out quickly. If something is up, we can then sort out what. 9729 * 9730 * We want to be fast if the VM was built without debugger or profiler 9731 * support, but we also need to recognize that the system is usually 9732 * shipped with both of these enabled. 9733 * 9734 * TODO: reduce this so we're just checking a single location. 9735 * 9736 * On entry: 9737 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 9738 * r9 is trampoline PC adjustment *in bytes* 9739 */ 9740common_periodicChecks: 9741 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9742 9743 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9744 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9745 9746 ldr ip, [r3] @ ip<- suspendCount (int) 9747 9748 cmp r1, #0 @ debugger enabled? 9749#if defined(WORKAROUND_CORTEX_A9_745320) 9750 /* Don't use conditional loads if the HW defect exists */ 9751 beq 101f 9752 ldrb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9753101: 9754#else 9755 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9756#endif 9757 ldr r2, [r2] @ r2<- activeProfilers (int) 9758 orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive 9759 /* 9760 * Don't switch the interpreter in the libdvm_traceview build even if the 9761 * profiler is active. 9762 * The code here is opted for less intrusion instead of performance. 9763 * That is, *pActiveProfilers is still loaded into r2 even though it is not 9764 * used when WITH_INLINE_PROFILING is defined. 9765 */ 9766#if !defined(WITH_INLINE_PROFILING) 9767 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 9768#endif 9769 9770 9771 bxeq lr @ all zero, return 9772 9773 /* 9774 * One or more interesting events have happened. Figure out what. 9775 * 9776 * If debugging or profiling are compiled in, we need to disambiguate. 9777 * 9778 * r0 still holds the reentry type. 9779 */ 9780 ldr ip, [r3] @ ip<- suspendCount (int) 9781 cmp ip, #0 @ want suspend? 9782 beq 1f @ no, must be debugger/profiler 9783 9784 stmfd sp!, {r0, lr} @ preserve r0 and lr 9785#if defined(WITH_JIT) 9786 /* 9787 * Refresh the Jit's cached copy of profile table pointer. This pointer 9788 * doubles as the Jit's on/off switch. 9789 */ 9790 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9791 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9792 ldr r3, [r3] @ r3 <- pJitProfTable 9793 EXPORT_PC() @ need for precise GC 9794 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9795#else 9796 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9797 EXPORT_PC() @ need for precise GC 9798#endif 9799 bl dvmCheckSuspendPending @ do full check, suspend if necessary 9800 ldmfd sp!, {r0, lr} @ restore r0 and lr 9801 9802 /* 9803 * Reload the debugger/profiler enable flags. We're checking to see 9804 * if either of these got set while we were suspended. 9805 * 9806 * If WITH_INLINE_PROFILING is configured, don't check whether the profiler 9807 * is enabled or not as the profiling will be done inline. 9808 */ 9809 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9810 cmp r1, #0 @ debugger enabled? 9811#if defined(WORKAROUND_CORTEX_A9_745320) 9812 /* Don't use conditional loads if the HW defect exists */ 9813 beq 101f 9814 ldrb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9815101: 9816#else 9817 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9818#endif 9819 9820#if !defined(WITH_INLINE_PROFILING) 9821 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9822 ldr r2, [r2] @ r2<- activeProfilers (int) 9823 orrs r1, r1, r2 9824#else 9825 cmp r1, #0 @ only consult the debuggerActive flag 9826#endif 9827 9828 beq 2f 9829 98301: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 9831 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 9832 add rPC, rPC, r9 @ update rPC 9833 mov r1, #1 @ "want switch" = true 9834 b common_gotoBail @ side exit 9835 98362: 9837 bx lr @ nothing to do, return 9838 9839 9840/* 9841 * The equivalent of "goto bail", this calls through the "bail handler". 9842 * 9843 * State registers will be saved to the "glue" area before bailing. 9844 * 9845 * On entry: 9846 * r1 is "bool changeInterp", indicating if we want to switch to the 9847 * other interpreter or just bail all the way out 9848 */ 9849common_gotoBail: 9850 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9851 mov r0, rGLUE @ r0<- glue ptr 9852 b dvmMterpStdBail @ call(glue, changeInterp) 9853 9854 @add r1, r1, #1 @ using (boolean+1) 9855 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9856 @bl _longjmp @ does not return 9857 @bl common_abort 9858 9859 9860/* 9861 * Common code for method invocation with range. 9862 * 9863 * On entry: 9864 * r0 is "Method* methodToCall", the method we're trying to call 9865 */ 9866common_invokeMethodRange: 9867.LinvokeNewRange: 9868 @ prepare to copy args to "outs" area of current frame 9869 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9870 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9871 beq .LinvokeArgsDone @ if no args, skip the rest 9872 FETCH(r1, 2) @ r1<- CCCC 9873 9874 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9875 @ (very few methods have > 10 args; could unroll for common cases) 9876 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9877 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9878 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 98791: ldr r1, [r3], #4 @ val = *fp++ 9880 subs r2, r2, #1 @ count-- 9881 str r1, [r10], #4 @ *outs++ = val 9882 bne 1b @ ...while count != 0 9883 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9884 b .LinvokeArgsDone 9885 9886/* 9887 * Common code for method invocation without range. 9888 * 9889 * On entry: 9890 * r0 is "Method* methodToCall", the method we're trying to call 9891 */ 9892common_invokeMethodNoRange: 9893.LinvokeNewNoRange: 9894 @ prepare to copy args to "outs" area of current frame 9895 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9896 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9897 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9898 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9899 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9900 beq .LinvokeArgsDone 9901 9902 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9903.LinvokeNonRange: 9904 rsb r2, r2, #5 @ r2<- 5-r2 9905 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9906 bl common_abort @ (skipped due to ARM prefetch) 99075: and ip, rINST, #0x0f00 @ isolate A 9908 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9909 mov r0, r0 @ nop 9910 str r2, [r10, #-4]! @ *--outs = vA 99114: and ip, r1, #0xf000 @ isolate G 9912 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9913 mov r0, r0 @ nop 9914 str r2, [r10, #-4]! @ *--outs = vG 99153: and ip, r1, #0x0f00 @ isolate F 9916 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9917 mov r0, r0 @ nop 9918 str r2, [r10, #-4]! @ *--outs = vF 99192: and ip, r1, #0x00f0 @ isolate E 9920 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9921 mov r0, r0 @ nop 9922 str r2, [r10, #-4]! @ *--outs = vE 99231: and ip, r1, #0x000f @ isolate D 9924 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9925 mov r0, r0 @ nop 9926 str r2, [r10, #-4]! @ *--outs = vD 99270: @ fall through to .LinvokeArgsDone 9928 9929.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9930 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9931 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9932 @ find space for the new stack frame, check for overflow 9933 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9934 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9935 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9936@ bl common_dumpRegs 9937 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9938 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9939 cmp r3, r9 @ bottom < interpStackEnd? 9940 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9941 blo .LstackOverflow @ yes, this frame will overflow stack 9942 9943 @ set up newSaveArea 9944#ifdef EASY_GDB 9945 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9946 str ip, [r10, #offStackSaveArea_prevSave] 9947#endif 9948 str rFP, [r10, #offStackSaveArea_prevFrame] 9949 str rPC, [r10, #offStackSaveArea_savedPc] 9950#if defined(WITH_JIT) 9951 mov r9, #0 9952 str r9, [r10, #offStackSaveArea_returnAddr] 9953#endif 9954#if defined(WITH_INLINE_PROFILING) 9955 stmfd sp!, {r0-r3} @ preserve r0-r3 9956 mov r1, r6 9957 @ r0=methodToCall, r1=rGlue 9958 bl dvmFastMethodTraceEnter 9959 ldmfd sp!, {r0-r3} @ restore r0-r3 9960#endif 9961 str r0, [r10, #offStackSaveArea_method] 9962 tst r3, #ACC_NATIVE 9963 bne .LinvokeNative 9964 9965 /* 9966 stmfd sp!, {r0-r3} 9967 bl common_printNewline 9968 mov r0, rFP 9969 mov r1, #0 9970 bl dvmDumpFp 9971 ldmfd sp!, {r0-r3} 9972 stmfd sp!, {r0-r3} 9973 mov r0, r1 9974 mov r1, r10 9975 bl dvmDumpFp 9976 bl common_printNewline 9977 ldmfd sp!, {r0-r3} 9978 */ 9979 9980 ldrh r9, [r2] @ r9 <- load INST from new PC 9981 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9982 mov rPC, r2 @ publish new rPC 9983 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9984 9985 @ Update "glue" values for the new method 9986 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9987 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9988 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9989#if defined(WITH_JIT) 9990 GET_JIT_PROF_TABLE(r0) 9991 mov rFP, r1 @ fp = newFp 9992 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9993 mov rINST, r9 @ publish new rINST 9994 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9995 cmp r0,#0 9996 bne common_updateProfile 9997 GOTO_OPCODE(ip) @ jump to next instruction 9998#else 9999 mov rFP, r1 @ fp = newFp 10000 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10001 mov rINST, r9 @ publish new rINST 10002 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10003 GOTO_OPCODE(ip) @ jump to next instruction 10004#endif 10005 10006.LinvokeNative: 10007 @ Prep for the native call 10008 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10009 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10010 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10011 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10012 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10013 mov r9, r3 @ r9<- glue->self (preserve) 10014 10015 mov r2, r0 @ r2<- methodToCall 10016 mov r0, r1 @ r0<- newFp (points to args) 10017 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10018 10019#ifdef ASSIST_DEBUGGER 10020 /* insert fake function header to help gdb find the stack frame */ 10021 b .Lskip 10022 .type dalvik_mterp, %function 10023dalvik_mterp: 10024 .fnstart 10025 MTERP_ENTRY1 10026 MTERP_ENTRY2 10027.Lskip: 10028#endif 10029 10030#if defined(WITH_INLINE_PROFILING) 10031 @ r2=JNIMethod, r6=rGLUE 10032 stmfd sp!, {r2,r6} 10033#endif 10034 10035 mov lr, pc @ set return addr 10036 ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10037 10038#if defined(WITH_INLINE_PROFILING) 10039 @ r0=JNIMethod, r1=rGLUE 10040 ldmfd sp!, {r0-r1} 10041 bl dvmFastNativeMethodTraceExit 10042#endif 10043 10044#if defined(WITH_JIT) 10045 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10046#endif 10047 10048 @ native return; r9=self, r10=newSaveArea 10049 @ equivalent to dvmPopJniLocals 10050 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10051 ldr r1, [r9, #offThread_exception] @ check for exception 10052#if defined(WITH_JIT) 10053 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10054#endif 10055 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10056 cmp r1, #0 @ null? 10057 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10058#if defined(WITH_JIT) 10059 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10060#endif 10061 bne common_exceptionThrown @ no, handle exception 10062 10063 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10064 GET_INST_OPCODE(ip) @ extract opcode from rINST 10065 GOTO_OPCODE(ip) @ jump to next instruction 10066 10067.LstackOverflow: @ r0=methodToCall 10068 mov r1, r0 @ r1<- methodToCall 10069 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10070 bl dvmHandleStackOverflow 10071 b common_exceptionThrown 10072#ifdef ASSIST_DEBUGGER 10073 .fnend 10074#endif 10075 10076 10077 /* 10078 * Common code for method invocation, calling through "glue code". 10079 * 10080 * TODO: now that we have range and non-range invoke handlers, this 10081 * needs to be split into two. Maybe just create entry points 10082 * that set r9 and jump here? 10083 * 10084 * On entry: 10085 * r0 is "Method* methodToCall", the method we're trying to call 10086 * r9 is "bool methodCallRange", indicating if this is a /range variant 10087 */ 10088 .if 0 10089.LinvokeOld: 10090 sub sp, sp, #8 @ space for args + pad 10091 FETCH(ip, 2) @ ip<- FEDC or CCCC 10092 mov r2, r0 @ A2<- methodToCall 10093 mov r0, rGLUE @ A0<- glue 10094 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10095 mov r1, r9 @ A1<- methodCallRange 10096 mov r3, rINST, lsr #8 @ A3<- AA 10097 str ip, [sp, #0] @ A4<- ip 10098 bl dvmMterp_invokeMethod @ call the C invokeMethod 10099 add sp, sp, #8 @ remove arg area 10100 b common_resumeAfterGlueCall @ continue to next instruction 10101 .endif 10102 10103 10104 10105/* 10106 * Common code for handling a return instruction. 10107 * 10108 * This does not return. 10109 */ 10110common_returnFromMethod: 10111.LreturnNew: 10112 mov r0, #kInterpEntryReturn 10113 mov r9, #0 10114 bl common_periodicChecks 10115 10116#if defined(WITH_INLINE_PROFILING) 10117 stmfd sp!, {r0-r3} @ preserve r0-r3 10118 mov r0, r6 10119 @ r0=rGlue 10120 bl dvmFastJavaMethodTraceExit 10121 ldmfd sp!, {r0-r3} @ restore r0-r3 10122#endif 10123 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10124 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10125 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10126 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10127 @ r2<- method we're returning to 10128 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10129 cmp r2, #0 @ is this a break frame? 10130#if defined(WORKAROUND_CORTEX_A9_745320) 10131 /* Don't use conditional loads if the HW defect exists */ 10132 beq 101f 10133 ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10134101: 10135#else 10136 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10137#endif 10138 mov r1, #0 @ "want switch" = false 10139 beq common_gotoBail @ break frame, bail out completely 10140 10141 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10142 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10143 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10144 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10145#if defined(WITH_JIT) 10146 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10147 mov rPC, r9 @ publish new rPC 10148 str r1, [rGLUE, #offGlue_methodClassDex] 10149 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10150 cmp r10, #0 @ caller is compiled code 10151 blxne r10 10152 GET_INST_OPCODE(ip) @ extract opcode from rINST 10153 GOTO_OPCODE(ip) @ jump to next instruction 10154#else 10155 GET_INST_OPCODE(ip) @ extract opcode from rINST 10156 mov rPC, r9 @ publish new rPC 10157 str r1, [rGLUE, #offGlue_methodClassDex] 10158 GOTO_OPCODE(ip) @ jump to next instruction 10159#endif 10160 10161 /* 10162 * Return handling, calls through "glue code". 10163 */ 10164 .if 0 10165.LreturnOld: 10166 SAVE_PC_FP_TO_GLUE() @ export state 10167 mov r0, rGLUE @ arg to function 10168 bl dvmMterp_returnFromMethod 10169 b common_resumeAfterGlueCall 10170 .endif 10171 10172 10173/* 10174 * Somebody has thrown an exception. Handle it. 10175 * 10176 * If the exception processing code returns to us (instead of falling 10177 * out of the interpreter), continue with whatever the next instruction 10178 * now happens to be. 10179 * 10180 * This does not return. 10181 */ 10182 .global dvmMterpCommonExceptionThrown 10183dvmMterpCommonExceptionThrown: 10184common_exceptionThrown: 10185.LexceptionNew: 10186 mov r0, #kInterpEntryThrow 10187 mov r9, #0 10188 bl common_periodicChecks 10189 10190 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10191 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10192 mov r1, r10 @ r1<- self 10193 mov r0, r9 @ r0<- exception 10194 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10195 mov r3, #0 @ r3<- NULL 10196 str r3, [r10, #offThread_exception] @ self->exception = NULL 10197 10198 /* set up args and a local for "&fp" */ 10199 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10200 str rFP, [sp, #-4]! @ *--sp = fp 10201 mov ip, sp @ ip<- &fp 10202 mov r3, #0 @ r3<- false 10203 str ip, [sp, #-4]! @ *--sp = &fp 10204 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10205 mov r0, r10 @ r0<- self 10206 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10207 mov r2, r9 @ r2<- exception 10208 sub r1, rPC, r1 @ r1<- pc - method->insns 10209 mov r1, r1, asr #1 @ r1<- offset in code units 10210 10211 /* call, r0 gets catchRelPc (a code-unit offset) */ 10212 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10213 10214 /* fix earlier stack overflow if necessary; may trash rFP */ 10215 ldrb r1, [r10, #offThread_stackOverflowed] 10216 cmp r1, #0 @ did we overflow earlier? 10217 beq 1f @ no, skip ahead 10218 mov rFP, r0 @ save relPc result in rFP 10219 mov r0, r10 @ r0<- self 10220 mov r1, r9 @ r1<- exception 10221 bl dvmCleanupStackOverflow @ call(self) 10222 mov r0, rFP @ restore result 102231: 10224 10225 /* update frame pointer and check result from dvmFindCatchBlock */ 10226 ldr rFP, [sp, #4] @ retrieve the updated rFP 10227 cmp r0, #0 @ is catchRelPc < 0? 10228 add sp, sp, #8 @ restore stack 10229 bmi .LnotCaughtLocally 10230 10231 /* adjust locals to match self->curFrame and updated PC */ 10232 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10233 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10234 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10235 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10236 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10237 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10238 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10239 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10240 10241 /* release the tracked alloc on the exception */ 10242 mov r0, r9 @ r0<- exception 10243 mov r1, r10 @ r1<- self 10244 bl dvmReleaseTrackedAlloc @ release the exception 10245 10246 /* restore the exception if the handler wants it */ 10247 FETCH_INST() @ load rINST from rPC 10248 GET_INST_OPCODE(ip) @ extract opcode from rINST 10249 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10250 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10251 GOTO_OPCODE(ip) @ jump to next instruction 10252 10253.LnotCaughtLocally: @ r9=exception, r10=self 10254 /* fix stack overflow if necessary */ 10255 ldrb r1, [r10, #offThread_stackOverflowed] 10256 cmp r1, #0 @ did we overflow earlier? 10257 movne r0, r10 @ if yes: r0<- self 10258 movne r1, r9 @ if yes: r1<- exception 10259 blne dvmCleanupStackOverflow @ if yes: call(self) 10260 10261 @ may want to show "not caught locally" debug messages here 10262#if DVM_SHOW_EXCEPTION >= 2 10263 /* call __android_log_print(prio, tag, format, ...) */ 10264 /* "Exception %s from %s:%d not caught locally" */ 10265 @ dvmLineNumFromPC(method, pc - method->insns) 10266 ldr r0, [rGLUE, #offGlue_method] 10267 ldr r1, [r0, #offMethod_insns] 10268 sub r1, rPC, r1 10269 asr r1, r1, #1 10270 bl dvmLineNumFromPC 10271 str r0, [sp, #-4]! 10272 @ dvmGetMethodSourceFile(method) 10273 ldr r0, [rGLUE, #offGlue_method] 10274 bl dvmGetMethodSourceFile 10275 str r0, [sp, #-4]! 10276 @ exception->clazz->descriptor 10277 ldr r3, [r9, #offObject_clazz] 10278 ldr r3, [r3, #offClassObject_descriptor] 10279 @ 10280 ldr r2, strExceptionNotCaughtLocally 10281 ldr r1, strLogTag 10282 mov r0, #3 @ LOG_DEBUG 10283 bl __android_log_print 10284#endif 10285 str r9, [r10, #offThread_exception] @ restore exception 10286 mov r0, r9 @ r0<- exception 10287 mov r1, r10 @ r1<- self 10288 bl dvmReleaseTrackedAlloc @ release the exception 10289 mov r1, #0 @ "want switch" = false 10290 b common_gotoBail @ bail out 10291 10292 10293 /* 10294 * Exception handling, calls through "glue code". 10295 */ 10296 .if 0 10297.LexceptionOld: 10298 SAVE_PC_FP_TO_GLUE() @ export state 10299 mov r0, rGLUE @ arg to function 10300 bl dvmMterp_exceptionThrown 10301 b common_resumeAfterGlueCall 10302 .endif 10303 10304 10305/* 10306 * After returning from a "glued" function, pull out the updated 10307 * values and start executing at the next instruction. 10308 */ 10309common_resumeAfterGlueCall: 10310 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10311 FETCH_INST() @ load rINST from rPC 10312 GET_INST_OPCODE(ip) @ extract opcode from rINST 10313 GOTO_OPCODE(ip) @ jump to next instruction 10314 10315/* 10316 * Invalid array index. Note that our calling convention is strange; we use r1 10317 * and r3 because those just happen to be the registers all our callers are 10318 * using. We shuffle them here before calling the C function. 10319 * r1: index 10320 * r3: size 10321 */ 10322common_errArrayIndex: 10323 EXPORT_PC() 10324 mov r0, r1 10325 mov r1, r3 10326 bl dvmThrowAIOOBE 10327 b common_exceptionThrown 10328 10329/* 10330 * Integer divide or mod by zero. 10331 */ 10332common_errDivideByZero: 10333 EXPORT_PC() 10334 ldr r0, strArithmeticException 10335 ldr r1, strDivideByZero 10336 bl dvmThrowException 10337 b common_exceptionThrown 10338 10339/* 10340 * Attempt to allocate an array with a negative size. 10341 */ 10342common_errNegativeArraySize: 10343 EXPORT_PC() 10344 ldr r0, strNegativeArraySizeException 10345 mov r1, #0 10346 bl dvmThrowException 10347 b common_exceptionThrown 10348 10349/* 10350 * Invocation of a non-existent method. 10351 */ 10352common_errNoSuchMethod: 10353 EXPORT_PC() 10354 ldr r0, strNoSuchMethodError 10355 mov r1, #0 10356 bl dvmThrowException 10357 b common_exceptionThrown 10358 10359/* 10360 * We encountered a null object when we weren't expecting one. We 10361 * export the PC, throw a NullPointerException, and goto the exception 10362 * processing code. 10363 */ 10364common_errNullObject: 10365 EXPORT_PC() 10366 ldr r0, strNullPointerException 10367 mov r1, #0 10368 bl dvmThrowException 10369 b common_exceptionThrown 10370 10371/* 10372 * For debugging, cause an immediate fault. The source address will 10373 * be in lr (use a bl instruction to jump here). 10374 */ 10375common_abort: 10376 ldr pc, .LdeadFood 10377.LdeadFood: 10378 .word 0xdeadf00d 10379 10380/* 10381 * Spit out a "we were here", preserving all registers. (The attempt 10382 * to save ip won't work, but we need to save an even number of 10383 * registers for EABI 64-bit stack alignment.) 10384 */ 10385 .macro SQUEAK num 10386common_squeak\num: 10387 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10388 ldr r0, strSqueak 10389 mov r1, #\num 10390 bl printf 10391 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10392 bx lr 10393 .endm 10394 10395 SQUEAK 0 10396 SQUEAK 1 10397 SQUEAK 2 10398 SQUEAK 3 10399 SQUEAK 4 10400 SQUEAK 5 10401 10402/* 10403 * Spit out the number in r0, preserving registers. 10404 */ 10405common_printNum: 10406 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10407 mov r1, r0 10408 ldr r0, strSqueak 10409 bl printf 10410 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10411 bx lr 10412 10413/* 10414 * Print a newline, preserving registers. 10415 */ 10416common_printNewline: 10417 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10418 ldr r0, strNewline 10419 bl printf 10420 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10421 bx lr 10422 10423 /* 10424 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10425 */ 10426common_printHex: 10427 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10428 mov r1, r0 10429 ldr r0, strPrintHex 10430 bl printf 10431 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10432 bx lr 10433 10434/* 10435 * Print the 64-bit quantity in r0-r1, preserving registers. 10436 */ 10437common_printLong: 10438 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10439 mov r3, r1 10440 mov r2, r0 10441 ldr r0, strPrintLong 10442 bl printf 10443 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10444 bx lr 10445 10446/* 10447 * Print full method info. Pass the Method* in r0. Preserves regs. 10448 */ 10449common_printMethod: 10450 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10451 bl dvmMterpPrintMethod 10452 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10453 bx lr 10454 10455/* 10456 * Call a C helper function that dumps regs and possibly some 10457 * additional info. Requires the C function to be compiled in. 10458 */ 10459 .if 0 10460common_dumpRegs: 10461 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10462 bl dvmMterpDumpArmRegs 10463 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10464 bx lr 10465 .endif 10466 10467#if 0 10468/* 10469 * Experiment on VFP mode. 10470 * 10471 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10472 * 10473 * Updates the bits specified by "mask", setting them to the values in "val". 10474 */ 10475setFPSCR: 10476 and r0, r0, r1 @ make sure no stray bits are set 10477 fmrx r2, fpscr @ get VFP reg 10478 mvn r1, r1 @ bit-invert mask 10479 and r2, r2, r1 @ clear masked bits 10480 orr r2, r2, r0 @ set specified bits 10481 fmxr fpscr, r2 @ set VFP reg 10482 mov r0, r2 @ return new value 10483 bx lr 10484 10485 .align 2 10486 .global dvmConfigureFP 10487 .type dvmConfigureFP, %function 10488dvmConfigureFP: 10489 stmfd sp!, {ip, lr} 10490 /* 0x03000000 sets DN/FZ */ 10491 /* 0x00009f00 clears the six exception enable flags */ 10492 bl common_squeak0 10493 mov r0, #0x03000000 @ r0<- 0x03000000 10494 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10495 bl setFPSCR 10496 ldmfd sp!, {ip, pc} 10497#endif 10498 10499 10500/* 10501 * String references, must be close to the code that uses them. 10502 */ 10503 .align 2 10504strArithmeticException: 10505 .word .LstrArithmeticException 10506strDivideByZero: 10507 .word .LstrDivideByZero 10508strNegativeArraySizeException: 10509 .word .LstrNegativeArraySizeException 10510strNoSuchMethodError: 10511 .word .LstrNoSuchMethodError 10512strNullPointerException: 10513 .word .LstrNullPointerException 10514 10515strLogTag: 10516 .word .LstrLogTag 10517strExceptionNotCaughtLocally: 10518 .word .LstrExceptionNotCaughtLocally 10519 10520strNewline: 10521 .word .LstrNewline 10522strSqueak: 10523 .word .LstrSqueak 10524strPrintHex: 10525 .word .LstrPrintHex 10526strPrintLong: 10527 .word .LstrPrintLong 10528 10529/* 10530 * Zero-terminated ASCII string data. 10531 * 10532 * On ARM we have two choices: do like gcc does, and LDR from a .word 10533 * with the address, or use an ADR pseudo-op to get the address 10534 * directly. ADR saves 4 bytes and an indirection, but it's using a 10535 * PC-relative addressing mode and hence has a limited range, which 10536 * makes it not work well with mergeable string sections. 10537 */ 10538 .section .rodata.str1.4,"aMS",%progbits,1 10539 10540.LstrBadEntryPoint: 10541 .asciz "Bad entry point %d\n" 10542.LstrArithmeticException: 10543 .asciz "Ljava/lang/ArithmeticException;" 10544.LstrDivideByZero: 10545 .asciz "divide by zero" 10546.LstrFilledNewArrayNotImpl: 10547 .asciz "filled-new-array only implemented for objects and 'int'" 10548.LstrInternalError: 10549 .asciz "Ljava/lang/InternalError;" 10550.LstrInstantiationError: 10551 .asciz "Ljava/lang/InstantiationError;" 10552.LstrNegativeArraySizeException: 10553 .asciz "Ljava/lang/NegativeArraySizeException;" 10554.LstrNoSuchMethodError: 10555 .asciz "Ljava/lang/NoSuchMethodError;" 10556.LstrNullPointerException: 10557 .asciz "Ljava/lang/NullPointerException;" 10558 10559.LstrLogTag: 10560 .asciz "mterp" 10561.LstrExceptionNotCaughtLocally: 10562 .asciz "Exception %s from %s:%d not caught locally\n" 10563 10564.LstrNewline: 10565 .asciz "\n" 10566.LstrSqueak: 10567 .asciz "<%d>" 10568.LstrPrintHex: 10569 .asciz "<0x%x>" 10570.LstrPrintLong: 10571 .asciz "<%lld>" 10572 10573