InterpAsm-armv5te-vfp.S revision 919eb063ce4542d3698e10e20aba9a2dfbdd0f82
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te-vfp'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24/* 25 * ARMv5 definitions and declarations. 26 */ 27 28/* 29ARM EABI general notes: 30 31r0-r3 hold first 4 args to a method; they are not preserved across method calls 32r4-r8 are available for general use 33r9 is given special treatment in some situations, but not for us 34r10 (sl) seems to be generally available 35r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 36r12 (ip) is scratch -- not preserved across method calls 37r13 (sp) should be managed carefully in case a signal arrives 38r14 (lr) must be preserved 39r15 (pc) can be tinkered with directly 40 41r0 holds returns of <= 4 bytes 42r0-r1 hold returns of 8 bytes, low word in r0 43 44Callee must save/restore r4+ (except r12) if it modifies them. If VFP 45is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 46s0-s15 (d0-d7, q0-a3) do not need to be. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59Mterp and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r4 rPC interpreted program counter, used for fetching instructions 65 r5 rFP interpreted frame pointer, used for accessing locals and args 66 r6 rGLUE MterpGlue pointer 67 r7 rINST first 16-bit code unit of current instruction 68 r8 rIBASE interpreted instruction base pointer, used for computed goto 69 70Macros are provided for common operations. Each macro MUST emit only 71one instruction to make instruction-counting easier. They MUST NOT alter 72unspecified registers or condition codes. 73*/ 74 75/* single-purpose registers, given names for clarity */ 76#define rPC r4 77#define rFP r5 78#define rGLUE r6 79#define rINST r7 80#define rIBASE r8 81 82/* save/restore the PC and/or FP from the glue struct */ 83#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 84#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 85#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 86#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 87#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 88#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 89 90/* 91 * "export" the PC to the stack frame, f/b/o future exception objects. Must 92 * be done *before* something calls dvmThrowException. 93 * 94 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 95 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 96 * 97 * It's okay to do this more than once. 98 */ 99#define EXPORT_PC() \ 100 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 101 102/* 103 * Given a frame pointer, find the stack save area. 104 * 105 * In C this is "((StackSaveArea*)(_fp) -1)". 106 */ 107#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 108 sub _reg, _fpreg, #sizeofStackSaveArea 109 110/* 111 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 112 */ 113#define FETCH_INST() ldrh rINST, [rPC] 114 115/* 116 * Fetch the next instruction from the specified offset. Advances rPC 117 * to point to the next instruction. "_count" is in 16-bit code units. 118 * 119 * Because of the limited size of immediate constants on ARM, this is only 120 * suitable for small forward movements (i.e. don't try to implement "goto" 121 * with this). 122 * 123 * This must come AFTER anything that can throw an exception, or the 124 * exception catch may miss. (This also implies that it must come after 125 * EXPORT_PC().) 126 */ 127#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 128 129/* 130 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 131 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 132 */ 133#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 134 ldrh _dreg, [_sreg, #(_count*2)]! 135 136/* 137 * Fetch the next instruction from an offset specified by _reg. Updates 138 * rPC to point to the next instruction. "_reg" must specify the distance 139 * in bytes, *not* 16-bit code units, and may be a signed value. 140 * 141 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 142 * bits that hold the shift distance are used for the half/byte/sign flags. 143 * In some cases we can pre-double _reg for free, so we require a byte offset 144 * here. 145 */ 146#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 147 148/* 149 * Fetch a half-word code unit from an offset past the current PC. The 150 * "_count" value is in 16-bit code units. Does not advance rPC. 151 * 152 * The "_S" variant works the same but treats the value as signed. 153 */ 154#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 155#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 156 157/* 158 * Fetch one byte from an offset past the current PC. Pass in the same 159 * "_count" as you would for FETCH, and an additional 0/1 indicating which 160 * byte of the halfword you want (lo/hi). 161 */ 162#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 163 164/* 165 * Put the instruction's opcode field into the specified register. 166 */ 167#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 168 169/* 170 * Put the prefetched instruction's opcode field into the specified register. 171 */ 172#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 173 174/* 175 * Begin executing the opcode in _reg. Because this only jumps within the 176 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 177 */ 178#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 180#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 181 182/* 183 * Get/set the 32-bit value from a Dalvik register. 184 */ 185#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 186#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 187 188#if defined(WITH_JIT) 189#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 191#endif 192 193/* 194 * Convert a virtual register index into an address. 195 */ 196#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 197 add _reg, rFP, _vreg, lsl #2 198 199/* 200 * This is a #include, not a %include, because we want the C pre-processor 201 * to expand the macros into assembler assignment statements. 202 */ 203#include "../common/asm-constants.h" 204 205#if defined(WITH_JIT) 206#include "../common/jit-config.h" 207#endif 208 209/* File: armv5te/platform.S */ 210/* 211 * =========================================================================== 212 * CPU-version-specific defines 213 * =========================================================================== 214 */ 215 216/* 217 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 218 * one-way branch. 219 * 220 * May modify IP. Does not modify LR. 221 */ 222.macro LDR_PC source 223 ldr pc, \source 224.endm 225 226/* 227 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 228 * Jump to subroutine. 229 * 230 * May modify IP and LR. 231 */ 232.macro LDR_PC_LR source 233 mov lr, pc 234 ldr pc, \source 235.endm 236 237/* 238 * Macro for "LDMFD SP!, {...regs...,PC}". 239 * 240 * May modify IP and LR. 241 */ 242.macro LDMFD_PC regs 243 ldmfd sp!, {\regs,pc} 244.endm 245 246/* 247 * Macro for data memory barrier; not meaningful pre-ARMv6K. 248 */ 249.macro SMP_DMB 250.endm 251 252/* File: armv5te/entry.S */ 253/* 254 * Copyright (C) 2008 The Android Open Source Project 255 * 256 * Licensed under the Apache License, Version 2.0 (the "License"); 257 * you may not use this file except in compliance with the License. 258 * You may obtain a copy of the License at 259 * 260 * http://www.apache.org/licenses/LICENSE-2.0 261 * 262 * Unless required by applicable law or agreed to in writing, software 263 * distributed under the License is distributed on an "AS IS" BASIS, 264 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 265 * See the License for the specific language governing permissions and 266 * limitations under the License. 267 */ 268/* 269 * Interpreter entry point. 270 */ 271 272/* 273 * We don't have formal stack frames, so gdb scans upward in the code 274 * to find the start of the function (a label with the %function type), 275 * and then looks at the next few instructions to figure out what 276 * got pushed onto the stack. From this it figures out how to restore 277 * the registers, including PC, for the previous stack frame. If gdb 278 * sees a non-function label, it stops scanning, so either we need to 279 * have nothing but assembler-local labels between the entry point and 280 * the break, or we need to fake it out. 281 * 282 * When this is defined, we add some stuff to make gdb less confused. 283 */ 284#define ASSIST_DEBUGGER 1 285 286 .text 287 .align 2 288 .global dvmMterpStdRun 289 .type dvmMterpStdRun, %function 290 291/* 292 * On entry: 293 * r0 MterpGlue* glue 294 * 295 * This function returns a boolean "changeInterp" value. The return comes 296 * via a call to dvmMterpStdBail(). 297 */ 298dvmMterpStdRun: 299#define MTERP_ENTRY1 \ 300 .save {r4-r10,fp,lr}; \ 301 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 302#define MTERP_ENTRY2 \ 303 .pad #4; \ 304 sub sp, sp, #4 @ align 64 305 306 .fnstart 307 MTERP_ENTRY1 308 MTERP_ENTRY2 309 310 /* save stack pointer, add magic word for debuggerd */ 311 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 312 313 /* set up "named" registers, figure out entry point */ 314 mov rGLUE, r0 @ set rGLUE 315 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 316 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 317 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 318 cmp r1, #kInterpEntryInstr @ usual case? 319 bne .Lnot_instr @ no, handle it 320 321#if defined(WITH_JIT) 322.LentryInstr: 323 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 324 /* Entry is always a possible trace start */ 325 GET_JIT_PROF_TABLE(r0) 326 FETCH_INST() 327 mov r1, #0 @ prepare the value for the new state 328 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 329 cmp r0,#0 330 bne common_updateProfile 331 GET_INST_OPCODE(ip) 332 GOTO_OPCODE(ip) 333#else 334 /* start executing the instruction at rPC */ 335 FETCH_INST() @ load rINST from rPC 336 GET_INST_OPCODE(ip) @ extract opcode from rINST 337 GOTO_OPCODE(ip) @ jump to next instruction 338#endif 339 340.Lnot_instr: 341 cmp r1, #kInterpEntryReturn @ were we returning from a method? 342 beq common_returnFromMethod 343 344.Lnot_return: 345 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 346 beq common_exceptionThrown 347 348#if defined(WITH_JIT) 349.Lnot_throw: 350 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 351 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 352 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 353 bne .Lbad_arg 354 cmp rPC,r2 355 bne .LentryInstr @ must have branched, don't resume 356#if defined(WITH_SELF_VERIFICATION) 357 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 358 b jitSVShadowRunStart @ re-enter the translation after the 359 @ single-stepped instruction 360 @noreturn 361#endif 362 mov r1, #kInterpEntryInstr 363 str r1, [rGLUE, #offGlue_entryPoint] 364 bx r10 @ re-enter the translation 365#endif 366 367.Lbad_arg: 368 ldr r0, strBadEntryPoint 369 @ r1 holds value of entryPoint 370 bl printf 371 bl dvmAbort 372 .fnend 373 374 375 .global dvmMterpStdBail 376 .type dvmMterpStdBail, %function 377 378/* 379 * Restore the stack pointer and PC from the save point established on entry. 380 * This is essentially the same as a longjmp, but should be cheaper. The 381 * last instruction causes us to return to whoever called dvmMterpStdRun. 382 * 383 * We pushed some registers on the stack in dvmMterpStdRun, then saved 384 * SP and LR. Here we restore SP, restore the registers, and then restore 385 * LR to PC. 386 * 387 * On entry: 388 * r0 MterpGlue* glue 389 * r1 bool changeInterp 390 */ 391dvmMterpStdBail: 392 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 393 mov r0, r1 @ return the changeInterp value 394 add sp, sp, #4 @ un-align 64 395 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 396 397 398/* 399 * String references. 400 */ 401strBadEntryPoint: 402 .word .LstrBadEntryPoint 403 404 405 .global dvmAsmInstructionStart 406 .type dvmAsmInstructionStart, %function 407dvmAsmInstructionStart = .L_OP_NOP 408 .text 409 410/* ------------------------------ */ 411 .balign 64 412.L_OP_NOP: /* 0x00 */ 413/* File: armv5te/OP_NOP.S */ 414 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 415 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 416 GOTO_OPCODE(ip) @ execute it 417 418#ifdef ASSIST_DEBUGGER 419 /* insert fake function header to help gdb find the stack frame */ 420 .type dalvik_inst, %function 421dalvik_inst: 422 .fnstart 423 MTERP_ENTRY1 424 MTERP_ENTRY2 425 .fnend 426#endif 427 428/* ------------------------------ */ 429 .balign 64 430.L_OP_MOVE: /* 0x01 */ 431/* File: armv5te/OP_MOVE.S */ 432 /* for move, move-object, long-to-int */ 433 /* op vA, vB */ 434 mov r1, rINST, lsr #12 @ r1<- B from 15:12 435 mov r0, rINST, lsr #8 @ r0<- A from 11:8 436 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 437 GET_VREG(r2, r1) @ r2<- fp[B] 438 and r0, r0, #15 439 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 440 SET_VREG(r2, r0) @ fp[A]<- r2 441 GOTO_OPCODE(ip) @ execute next instruction 442 443/* ------------------------------ */ 444 .balign 64 445.L_OP_MOVE_FROM16: /* 0x02 */ 446/* File: armv5te/OP_MOVE_FROM16.S */ 447 /* for: move/from16, move-object/from16 */ 448 /* op vAA, vBBBB */ 449 FETCH(r1, 1) @ r1<- BBBB 450 mov r0, rINST, lsr #8 @ r0<- AA 451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 452 GET_VREG(r2, r1) @ r2<- fp[BBBB] 453 GET_INST_OPCODE(ip) @ extract opcode from rINST 454 SET_VREG(r2, r0) @ fp[AA]<- r2 455 GOTO_OPCODE(ip) @ jump to next instruction 456 457/* ------------------------------ */ 458 .balign 64 459.L_OP_MOVE_16: /* 0x03 */ 460/* File: armv5te/OP_MOVE_16.S */ 461 /* for: move/16, move-object/16 */ 462 /* op vAAAA, vBBBB */ 463 FETCH(r1, 2) @ r1<- BBBB 464 FETCH(r0, 1) @ r0<- AAAA 465 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 466 GET_VREG(r2, r1) @ r2<- fp[BBBB] 467 GET_INST_OPCODE(ip) @ extract opcode from rINST 468 SET_VREG(r2, r0) @ fp[AAAA]<- r2 469 GOTO_OPCODE(ip) @ jump to next instruction 470 471/* ------------------------------ */ 472 .balign 64 473.L_OP_MOVE_WIDE: /* 0x04 */ 474/* File: armv5te/OP_MOVE_WIDE.S */ 475 /* move-wide vA, vB */ 476 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 477 mov r2, rINST, lsr #8 @ r2<- A(+) 478 mov r3, rINST, lsr #12 @ r3<- B 479 and r2, r2, #15 480 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 481 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 482 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 483 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 484 GET_INST_OPCODE(ip) @ extract opcode from rINST 485 stmia r2, {r0-r1} @ fp[A]<- r0/r1 486 GOTO_OPCODE(ip) @ jump to next instruction 487 488/* ------------------------------ */ 489 .balign 64 490.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 491/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 492 /* move-wide/from16 vAA, vBBBB */ 493 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 494 FETCH(r3, 1) @ r3<- BBBB 495 mov r2, rINST, lsr #8 @ r2<- AA 496 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 497 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 498 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 500 GET_INST_OPCODE(ip) @ extract opcode from rINST 501 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 502 GOTO_OPCODE(ip) @ jump to next instruction 503 504/* ------------------------------ */ 505 .balign 64 506.L_OP_MOVE_WIDE_16: /* 0x06 */ 507/* File: armv5te/OP_MOVE_WIDE_16.S */ 508 /* move-wide/16 vAAAA, vBBBB */ 509 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 510 FETCH(r3, 2) @ r3<- BBBB 511 FETCH(r2, 1) @ r2<- AAAA 512 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 513 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 514 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 515 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 516 GET_INST_OPCODE(ip) @ extract opcode from rINST 517 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 518 GOTO_OPCODE(ip) @ jump to next instruction 519 520/* ------------------------------ */ 521 .balign 64 522.L_OP_MOVE_OBJECT: /* 0x07 */ 523/* File: armv5te/OP_MOVE_OBJECT.S */ 524/* File: armv5te/OP_MOVE.S */ 525 /* for move, move-object, long-to-int */ 526 /* op vA, vB */ 527 mov r1, rINST, lsr #12 @ r1<- B from 15:12 528 mov r0, rINST, lsr #8 @ r0<- A from 11:8 529 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 530 GET_VREG(r2, r1) @ r2<- fp[B] 531 and r0, r0, #15 532 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 533 SET_VREG(r2, r0) @ fp[A]<- r2 534 GOTO_OPCODE(ip) @ execute next instruction 535 536 537/* ------------------------------ */ 538 .balign 64 539.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 540/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 541/* File: armv5te/OP_MOVE_FROM16.S */ 542 /* for: move/from16, move-object/from16 */ 543 /* op vAA, vBBBB */ 544 FETCH(r1, 1) @ r1<- BBBB 545 mov r0, rINST, lsr #8 @ r0<- AA 546 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 547 GET_VREG(r2, r1) @ r2<- fp[BBBB] 548 GET_INST_OPCODE(ip) @ extract opcode from rINST 549 SET_VREG(r2, r0) @ fp[AA]<- r2 550 GOTO_OPCODE(ip) @ jump to next instruction 551 552 553/* ------------------------------ */ 554 .balign 64 555.L_OP_MOVE_OBJECT_16: /* 0x09 */ 556/* File: armv5te/OP_MOVE_OBJECT_16.S */ 557/* File: armv5te/OP_MOVE_16.S */ 558 /* for: move/16, move-object/16 */ 559 /* op vAAAA, vBBBB */ 560 FETCH(r1, 2) @ r1<- BBBB 561 FETCH(r0, 1) @ r0<- AAAA 562 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 563 GET_VREG(r2, r1) @ r2<- fp[BBBB] 564 GET_INST_OPCODE(ip) @ extract opcode from rINST 565 SET_VREG(r2, r0) @ fp[AAAA]<- r2 566 GOTO_OPCODE(ip) @ jump to next instruction 567 568 569/* ------------------------------ */ 570 .balign 64 571.L_OP_MOVE_RESULT: /* 0x0a */ 572/* File: armv5te/OP_MOVE_RESULT.S */ 573 /* for: move-result, move-result-object */ 574 /* op vAA */ 575 mov r2, rINST, lsr #8 @ r2<- AA 576 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 577 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 578 GET_INST_OPCODE(ip) @ extract opcode from rINST 579 SET_VREG(r0, r2) @ fp[AA]<- r0 580 GOTO_OPCODE(ip) @ jump to next instruction 581 582/* ------------------------------ */ 583 .balign 64 584.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 585/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 586 /* move-result-wide vAA */ 587 mov r2, rINST, lsr #8 @ r2<- AA 588 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 589 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 590 ldmia r3, {r0-r1} @ r0/r1<- retval.j 591 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 592 GET_INST_OPCODE(ip) @ extract opcode from rINST 593 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 594 GOTO_OPCODE(ip) @ jump to next instruction 595 596/* ------------------------------ */ 597 .balign 64 598.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 599/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 600/* File: armv5te/OP_MOVE_RESULT.S */ 601 /* for: move-result, move-result-object */ 602 /* op vAA */ 603 mov r2, rINST, lsr #8 @ r2<- AA 604 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 605 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 606 GET_INST_OPCODE(ip) @ extract opcode from rINST 607 SET_VREG(r0, r2) @ fp[AA]<- r0 608 GOTO_OPCODE(ip) @ jump to next instruction 609 610 611/* ------------------------------ */ 612 .balign 64 613.L_OP_MOVE_EXCEPTION: /* 0x0d */ 614/* File: armv5te/OP_MOVE_EXCEPTION.S */ 615 /* move-exception vAA */ 616 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 617 mov r2, rINST, lsr #8 @ r2<- AA 618 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 619 mov r1, #0 @ r1<- 0 620 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 621 SET_VREG(r3, r2) @ fp[AA]<- exception obj 622 GET_INST_OPCODE(ip) @ extract opcode from rINST 623 str r1, [r0, #offThread_exception] @ dvmClearException bypass 624 GOTO_OPCODE(ip) @ jump to next instruction 625 626/* ------------------------------ */ 627 .balign 64 628.L_OP_RETURN_VOID: /* 0x0e */ 629/* File: armv5te/OP_RETURN_VOID.S */ 630 b common_returnFromMethod 631 632/* ------------------------------ */ 633 .balign 64 634.L_OP_RETURN: /* 0x0f */ 635/* File: armv5te/OP_RETURN.S */ 636 /* 637 * Return a 32-bit value. Copies the return value into the "glue" 638 * structure, then jumps to the return handler. 639 * 640 * for: return, return-object 641 */ 642 /* op vAA */ 643 mov r2, rINST, lsr #8 @ r2<- AA 644 GET_VREG(r0, r2) @ r0<- vAA 645 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 646 b common_returnFromMethod 647 648/* ------------------------------ */ 649 .balign 64 650.L_OP_RETURN_WIDE: /* 0x10 */ 651/* File: armv5te/OP_RETURN_WIDE.S */ 652 /* 653 * Return a 64-bit value. Copies the return value into the "glue" 654 * structure, then jumps to the return handler. 655 */ 656 /* return-wide vAA */ 657 mov r2, rINST, lsr #8 @ r2<- AA 658 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 659 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 660 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 661 stmia r3, {r0-r1} @ retval<- r0/r1 662 b common_returnFromMethod 663 664/* ------------------------------ */ 665 .balign 64 666.L_OP_RETURN_OBJECT: /* 0x11 */ 667/* File: armv5te/OP_RETURN_OBJECT.S */ 668/* File: armv5te/OP_RETURN.S */ 669 /* 670 * Return a 32-bit value. Copies the return value into the "glue" 671 * structure, then jumps to the return handler. 672 * 673 * for: return, return-object 674 */ 675 /* op vAA */ 676 mov r2, rINST, lsr #8 @ r2<- AA 677 GET_VREG(r0, r2) @ r0<- vAA 678 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 679 b common_returnFromMethod 680 681 682/* ------------------------------ */ 683 .balign 64 684.L_OP_CONST_4: /* 0x12 */ 685/* File: armv5te/OP_CONST_4.S */ 686 /* const/4 vA, #+B */ 687 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 688 mov r0, rINST, lsr #8 @ r0<- A+ 689 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 690 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 691 and r0, r0, #15 692 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 693 SET_VREG(r1, r0) @ fp[A]<- r1 694 GOTO_OPCODE(ip) @ execute next instruction 695 696/* ------------------------------ */ 697 .balign 64 698.L_OP_CONST_16: /* 0x13 */ 699/* File: armv5te/OP_CONST_16.S */ 700 /* const/16 vAA, #+BBBB */ 701 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 702 mov r3, rINST, lsr #8 @ r3<- AA 703 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 704 SET_VREG(r0, r3) @ vAA<- r0 705 GET_INST_OPCODE(ip) @ extract opcode from rINST 706 GOTO_OPCODE(ip) @ jump to next instruction 707 708/* ------------------------------ */ 709 .balign 64 710.L_OP_CONST: /* 0x14 */ 711/* File: armv5te/OP_CONST.S */ 712 /* const vAA, #+BBBBbbbb */ 713 mov r3, rINST, lsr #8 @ r3<- AA 714 FETCH(r0, 1) @ r0<- bbbb (low) 715 FETCH(r1, 2) @ r1<- BBBB (high) 716 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 717 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 718 GET_INST_OPCODE(ip) @ extract opcode from rINST 719 SET_VREG(r0, r3) @ vAA<- r0 720 GOTO_OPCODE(ip) @ jump to next instruction 721 722/* ------------------------------ */ 723 .balign 64 724.L_OP_CONST_HIGH16: /* 0x15 */ 725/* File: armv5te/OP_CONST_HIGH16.S */ 726 /* const/high16 vAA, #+BBBB0000 */ 727 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 728 mov r3, rINST, lsr #8 @ r3<- AA 729 mov r0, r0, lsl #16 @ r0<- BBBB0000 730 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 731 SET_VREG(r0, r3) @ vAA<- r0 732 GET_INST_OPCODE(ip) @ extract opcode from rINST 733 GOTO_OPCODE(ip) @ jump to next instruction 734 735/* ------------------------------ */ 736 .balign 64 737.L_OP_CONST_WIDE_16: /* 0x16 */ 738/* File: armv5te/OP_CONST_WIDE_16.S */ 739 /* const-wide/16 vAA, #+BBBB */ 740 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 741 mov r3, rINST, lsr #8 @ r3<- AA 742 mov r1, r0, asr #31 @ r1<- ssssssss 743 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 744 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 745 GET_INST_OPCODE(ip) @ extract opcode from rINST 746 stmia r3, {r0-r1} @ vAA<- r0/r1 747 GOTO_OPCODE(ip) @ jump to next instruction 748 749/* ------------------------------ */ 750 .balign 64 751.L_OP_CONST_WIDE_32: /* 0x17 */ 752/* File: armv5te/OP_CONST_WIDE_32.S */ 753 /* const-wide/32 vAA, #+BBBBbbbb */ 754 FETCH(r0, 1) @ r0<- 0000bbbb (low) 755 mov r3, rINST, lsr #8 @ r3<- AA 756 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 757 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 758 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 759 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 760 mov r1, r0, asr #31 @ r1<- ssssssss 761 GET_INST_OPCODE(ip) @ extract opcode from rINST 762 stmia r3, {r0-r1} @ vAA<- r0/r1 763 GOTO_OPCODE(ip) @ jump to next instruction 764 765/* ------------------------------ */ 766 .balign 64 767.L_OP_CONST_WIDE: /* 0x18 */ 768/* File: armv5te/OP_CONST_WIDE.S */ 769 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 770 FETCH(r0, 1) @ r0<- bbbb (low) 771 FETCH(r1, 2) @ r1<- BBBB (low middle) 772 FETCH(r2, 3) @ r2<- hhhh (high middle) 773 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 774 FETCH(r3, 4) @ r3<- HHHH (high) 775 mov r9, rINST, lsr #8 @ r9<- AA 776 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 777 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 778 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 779 GET_INST_OPCODE(ip) @ extract opcode from rINST 780 stmia r9, {r0-r1} @ vAA<- r0/r1 781 GOTO_OPCODE(ip) @ jump to next instruction 782 783/* ------------------------------ */ 784 .balign 64 785.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 786/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 787 /* const-wide/high16 vAA, #+BBBB000000000000 */ 788 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 789 mov r3, rINST, lsr #8 @ r3<- AA 790 mov r0, #0 @ r0<- 00000000 791 mov r1, r1, lsl #16 @ r1<- BBBB0000 792 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 793 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 794 GET_INST_OPCODE(ip) @ extract opcode from rINST 795 stmia r3, {r0-r1} @ vAA<- r0/r1 796 GOTO_OPCODE(ip) @ jump to next instruction 797 798/* ------------------------------ */ 799 .balign 64 800.L_OP_CONST_STRING: /* 0x1a */ 801/* File: armv5te/OP_CONST_STRING.S */ 802 /* const/string vAA, String@BBBB */ 803 FETCH(r1, 1) @ r1<- BBBB 804 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 805 mov r9, rINST, lsr #8 @ r9<- AA 806 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 807 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 808 cmp r0, #0 @ not yet resolved? 809 beq .LOP_CONST_STRING_resolve 810 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 811 GET_INST_OPCODE(ip) @ extract opcode from rINST 812 SET_VREG(r0, r9) @ vAA<- r0 813 GOTO_OPCODE(ip) @ jump to next instruction 814 815/* ------------------------------ */ 816 .balign 64 817.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 818/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 819 /* const/string vAA, String@BBBBBBBB */ 820 FETCH(r0, 1) @ r0<- bbbb (low) 821 FETCH(r1, 2) @ r1<- BBBB (high) 822 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 823 mov r9, rINST, lsr #8 @ r9<- AA 824 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 825 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 826 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 827 cmp r0, #0 828 beq .LOP_CONST_STRING_JUMBO_resolve 829 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 830 GET_INST_OPCODE(ip) @ extract opcode from rINST 831 SET_VREG(r0, r9) @ vAA<- r0 832 GOTO_OPCODE(ip) @ jump to next instruction 833 834/* ------------------------------ */ 835 .balign 64 836.L_OP_CONST_CLASS: /* 0x1c */ 837/* File: armv5te/OP_CONST_CLASS.S */ 838 /* const/class vAA, Class@BBBB */ 839 FETCH(r1, 1) @ r1<- BBBB 840 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 841 mov r9, rINST, lsr #8 @ r9<- AA 842 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 843 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 844 cmp r0, #0 @ not yet resolved? 845 beq .LOP_CONST_CLASS_resolve 846 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 847 GET_INST_OPCODE(ip) @ extract opcode from rINST 848 SET_VREG(r0, r9) @ vAA<- r0 849 GOTO_OPCODE(ip) @ jump to next instruction 850 851/* ------------------------------ */ 852 .balign 64 853.L_OP_MONITOR_ENTER: /* 0x1d */ 854/* File: armv5te/OP_MONITOR_ENTER.S */ 855 /* 856 * Synchronize on an object. 857 */ 858 /* monitor-enter vAA */ 859 mov r2, rINST, lsr #8 @ r2<- AA 860 GET_VREG(r1, r2) @ r1<- vAA (object) 861 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 862 cmp r1, #0 @ null object? 863 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 864 beq common_errNullObject @ null object, throw an exception 865 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 866 bl dvmLockObject @ call(self, obj) 867#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 868 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 869 ldr r1, [r0, #offThread_exception] @ check for exception 870 cmp r1, #0 871 bne common_exceptionThrown @ exception raised, bail out 872#endif 873 GET_INST_OPCODE(ip) @ extract opcode from rINST 874 GOTO_OPCODE(ip) @ jump to next instruction 875 876/* ------------------------------ */ 877 .balign 64 878.L_OP_MONITOR_EXIT: /* 0x1e */ 879/* File: armv5te/OP_MONITOR_EXIT.S */ 880 /* 881 * Unlock an object. 882 * 883 * Exceptions that occur when unlocking a monitor need to appear as 884 * if they happened at the following instruction. See the Dalvik 885 * instruction spec. 886 */ 887 /* monitor-exit vAA */ 888 mov r2, rINST, lsr #8 @ r2<- AA 889 EXPORT_PC() @ before fetch: export the PC 890 GET_VREG(r1, r2) @ r1<- vAA (object) 891 cmp r1, #0 @ null object? 892 beq 1f @ yes 893 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 894 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 895 cmp r0, #0 @ failed? 896 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 897 beq common_exceptionThrown @ yes, exception is pending 898 GET_INST_OPCODE(ip) @ extract opcode from rINST 899 GOTO_OPCODE(ip) @ jump to next instruction 9001: 901 FETCH_ADVANCE_INST(1) @ advance before throw 902 b common_errNullObject 903 904/* ------------------------------ */ 905 .balign 64 906.L_OP_CHECK_CAST: /* 0x1f */ 907/* File: armv5te/OP_CHECK_CAST.S */ 908 /* 909 * Check to see if a cast from one class to another is allowed. 910 */ 911 /* check-cast vAA, class@BBBB */ 912 mov r3, rINST, lsr #8 @ r3<- AA 913 FETCH(r2, 1) @ r2<- BBBB 914 GET_VREG(r9, r3) @ r9<- object 915 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 916 cmp r9, #0 @ is object null? 917 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 918 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 919 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 920 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 921 cmp r1, #0 @ have we resolved this before? 922 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 923.LOP_CHECK_CAST_resolved: 924 cmp r0, r1 @ same class (trivial success)? 925 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 926.LOP_CHECK_CAST_okay: 927 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 928 GET_INST_OPCODE(ip) @ extract opcode from rINST 929 GOTO_OPCODE(ip) @ jump to next instruction 930 931/* ------------------------------ */ 932 .balign 64 933.L_OP_INSTANCE_OF: /* 0x20 */ 934/* File: armv5te/OP_INSTANCE_OF.S */ 935 /* 936 * Check to see if an object reference is an instance of a class. 937 * 938 * Most common situation is a non-null object, being compared against 939 * an already-resolved class. 940 */ 941 /* instance-of vA, vB, class@CCCC */ 942 mov r3, rINST, lsr #12 @ r3<- B 943 mov r9, rINST, lsr #8 @ r9<- A+ 944 GET_VREG(r0, r3) @ r0<- vB (object) 945 and r9, r9, #15 @ r9<- A 946 cmp r0, #0 @ is object null? 947 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 948 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 949 FETCH(r3, 1) @ r3<- CCCC 950 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 951 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 952 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 953 cmp r1, #0 @ have we resolved this before? 954 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 955.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 956 cmp r0, r1 @ same class (trivial success)? 957 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 958 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 959 960/* ------------------------------ */ 961 .balign 64 962.L_OP_ARRAY_LENGTH: /* 0x21 */ 963/* File: armv5te/OP_ARRAY_LENGTH.S */ 964 /* 965 * Return the length of an array. 966 */ 967 mov r1, rINST, lsr #12 @ r1<- B 968 mov r2, rINST, lsr #8 @ r2<- A+ 969 GET_VREG(r0, r1) @ r0<- vB (object ref) 970 and r2, r2, #15 @ r2<- A 971 cmp r0, #0 @ is object null? 972 beq common_errNullObject @ yup, fail 973 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 974 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 975 GET_INST_OPCODE(ip) @ extract opcode from rINST 976 SET_VREG(r3, r2) @ vB<- length 977 GOTO_OPCODE(ip) @ jump to next instruction 978 979/* ------------------------------ */ 980 .balign 64 981.L_OP_NEW_INSTANCE: /* 0x22 */ 982/* File: armv5te/OP_NEW_INSTANCE.S */ 983 /* 984 * Create a new instance of a class. 985 */ 986 /* new-instance vAA, class@BBBB */ 987 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 988 FETCH(r1, 1) @ r1<- BBBB 989 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 990 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 991 EXPORT_PC() @ req'd for init, resolve, alloc 992 cmp r0, #0 @ already resolved? 993 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 994.LOP_NEW_INSTANCE_resolved: @ r0=class 995 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 996 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 997 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 998.LOP_NEW_INSTANCE_initialized: @ r0=class 999 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1000 bl dvmAllocObject @ r0<- new object 1001 b .LOP_NEW_INSTANCE_finish @ continue 1002 1003/* ------------------------------ */ 1004 .balign 64 1005.L_OP_NEW_ARRAY: /* 0x23 */ 1006/* File: armv5te/OP_NEW_ARRAY.S */ 1007 /* 1008 * Allocate an array of objects, specified with the array class 1009 * and a count. 1010 * 1011 * The verifier guarantees that this is an array class, so we don't 1012 * check for it here. 1013 */ 1014 /* new-array vA, vB, class@CCCC */ 1015 mov r0, rINST, lsr #12 @ r0<- B 1016 FETCH(r2, 1) @ r2<- CCCC 1017 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1018 GET_VREG(r1, r0) @ r1<- vB (array length) 1019 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1020 cmp r1, #0 @ check length 1021 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1022 bmi common_errNegativeArraySize @ negative length, bail 1023 cmp r0, #0 @ already resolved? 1024 EXPORT_PC() @ req'd for resolve, alloc 1025 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1026 b .LOP_NEW_ARRAY_resolve @ do resolve now 1027 1028/* ------------------------------ */ 1029 .balign 64 1030.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1031/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1032 /* 1033 * Create a new array with elements filled from registers. 1034 * 1035 * for: filled-new-array, filled-new-array/range 1036 */ 1037 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1038 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1039 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1040 FETCH(r1, 1) @ r1<- BBBB 1041 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1042 EXPORT_PC() @ need for resolve and alloc 1043 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1044 mov r10, rINST, lsr #8 @ r10<- AA or BA 1045 cmp r0, #0 @ already resolved? 1046 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10478: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1048 mov r2, #0 @ r2<- false 1049 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1050 bl dvmResolveClass @ r0<- call(clazz, ref) 1051 cmp r0, #0 @ got null? 1052 beq common_exceptionThrown @ yes, handle exception 1053 b .LOP_FILLED_NEW_ARRAY_continue 1054 1055/* ------------------------------ */ 1056 .balign 64 1057.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1058/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1059/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1060 /* 1061 * Create a new array with elements filled from registers. 1062 * 1063 * for: filled-new-array, filled-new-array/range 1064 */ 1065 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1066 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1067 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1068 FETCH(r1, 1) @ r1<- BBBB 1069 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1070 EXPORT_PC() @ need for resolve and alloc 1071 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1072 mov r10, rINST, lsr #8 @ r10<- AA or BA 1073 cmp r0, #0 @ already resolved? 1074 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10758: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1076 mov r2, #0 @ r2<- false 1077 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1078 bl dvmResolveClass @ r0<- call(clazz, ref) 1079 cmp r0, #0 @ got null? 1080 beq common_exceptionThrown @ yes, handle exception 1081 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1082 1083 1084/* ------------------------------ */ 1085 .balign 64 1086.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1087/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1088 /* fill-array-data vAA, +BBBBBBBB */ 1089 FETCH(r0, 1) @ r0<- bbbb (lo) 1090 FETCH(r1, 2) @ r1<- BBBB (hi) 1091 mov r3, rINST, lsr #8 @ r3<- AA 1092 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1093 GET_VREG(r0, r3) @ r0<- vAA (array object) 1094 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1095 EXPORT_PC(); 1096 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1097 cmp r0, #0 @ 0 means an exception is thrown 1098 beq common_exceptionThrown @ has exception 1099 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1100 GET_INST_OPCODE(ip) @ extract opcode from rINST 1101 GOTO_OPCODE(ip) @ jump to next instruction 1102 1103/* ------------------------------ */ 1104 .balign 64 1105.L_OP_THROW: /* 0x27 */ 1106/* File: armv5te/OP_THROW.S */ 1107 /* 1108 * Throw an exception object in the current thread. 1109 */ 1110 /* throw vAA */ 1111 mov r2, rINST, lsr #8 @ r2<- AA 1112 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1113 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1114 EXPORT_PC() @ exception handler can throw 1115 cmp r1, #0 @ null object? 1116 beq common_errNullObject @ yes, throw an NPE instead 1117 @ bypass dvmSetException, just store it 1118 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1119 b common_exceptionThrown 1120 1121/* ------------------------------ */ 1122 .balign 64 1123.L_OP_GOTO: /* 0x28 */ 1124/* File: armv5te/OP_GOTO.S */ 1125 /* 1126 * Unconditional branch, 8-bit offset. 1127 * 1128 * The branch distance is a signed code-unit offset, which we need to 1129 * double to get a byte offset. 1130 */ 1131 /* goto +AA */ 1132 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1133 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1134 mov r9, r9, lsl #1 @ r9<- byte offset 1135 bmi common_backwardBranch @ backward branch, do periodic checks 1136#if defined(WITH_JIT) 1137 GET_JIT_PROF_TABLE(r0) 1138 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1139 cmp r0,#0 1140 bne common_updateProfile 1141 GET_INST_OPCODE(ip) @ extract opcode from rINST 1142 GOTO_OPCODE(ip) @ jump to next instruction 1143#else 1144 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1145 GET_INST_OPCODE(ip) @ extract opcode from rINST 1146 GOTO_OPCODE(ip) @ jump to next instruction 1147#endif 1148 1149/* ------------------------------ */ 1150 .balign 64 1151.L_OP_GOTO_16: /* 0x29 */ 1152/* File: armv5te/OP_GOTO_16.S */ 1153 /* 1154 * Unconditional branch, 16-bit offset. 1155 * 1156 * The branch distance is a signed code-unit offset, which we need to 1157 * double to get a byte offset. 1158 */ 1159 /* goto/16 +AAAA */ 1160 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1161 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1162 bmi common_backwardBranch @ backward branch, do periodic checks 1163#if defined(WITH_JIT) 1164 GET_JIT_PROF_TABLE(r0) 1165 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1166 cmp r0,#0 1167 bne common_updateProfile 1168 GET_INST_OPCODE(ip) @ extract opcode from rINST 1169 GOTO_OPCODE(ip) @ jump to next instruction 1170#else 1171 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1172 GET_INST_OPCODE(ip) @ extract opcode from rINST 1173 GOTO_OPCODE(ip) @ jump to next instruction 1174#endif 1175 1176/* ------------------------------ */ 1177 .balign 64 1178.L_OP_GOTO_32: /* 0x2a */ 1179/* File: armv5te/OP_GOTO_32.S */ 1180 /* 1181 * Unconditional branch, 32-bit offset. 1182 * 1183 * The branch distance is a signed code-unit offset, which we need to 1184 * double to get a byte offset. 1185 * 1186 * Unlike most opcodes, this one is allowed to branch to itself, so 1187 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1188 * instruction doesn't affect the V flag, so we need to clear it 1189 * explicitly. 1190 */ 1191 /* goto/32 +AAAAAAAA */ 1192 FETCH(r0, 1) @ r0<- aaaa (lo) 1193 FETCH(r1, 2) @ r1<- AAAA (hi) 1194 cmp ip, ip @ (clear V flag during stall) 1195 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1196 mov r9, r0, asl #1 @ r9<- byte offset 1197 ble common_backwardBranch @ backward branch, do periodic checks 1198#if defined(WITH_JIT) 1199 GET_JIT_PROF_TABLE(r0) 1200 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1201 cmp r0,#0 1202 bne common_updateProfile 1203 GET_INST_OPCODE(ip) @ extract opcode from rINST 1204 GOTO_OPCODE(ip) @ jump to next instruction 1205#else 1206 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1207 GET_INST_OPCODE(ip) @ extract opcode from rINST 1208 GOTO_OPCODE(ip) @ jump to next instruction 1209#endif 1210 1211/* ------------------------------ */ 1212 .balign 64 1213.L_OP_PACKED_SWITCH: /* 0x2b */ 1214/* File: armv5te/OP_PACKED_SWITCH.S */ 1215 /* 1216 * Handle a packed-switch or sparse-switch instruction. In both cases 1217 * we decode it and hand it off to a helper function. 1218 * 1219 * We don't really expect backward branches in a switch statement, but 1220 * they're perfectly legal, so we check for them here. 1221 * 1222 * for: packed-switch, sparse-switch 1223 */ 1224 /* op vAA, +BBBB */ 1225 FETCH(r0, 1) @ r0<- bbbb (lo) 1226 FETCH(r1, 2) @ r1<- BBBB (hi) 1227 mov r3, rINST, lsr #8 @ r3<- AA 1228 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1229 GET_VREG(r1, r3) @ r1<- vAA 1230 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1231 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1232 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1233 bmi common_backwardBranch @ backward branch, do periodic checks 1234 beq common_backwardBranch @ (want to use BLE but V is unknown) 1235#if defined(WITH_JIT) 1236 GET_JIT_PROF_TABLE(r0) 1237 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1238 cmp r0,#0 1239 bne common_updateProfile 1240 GET_INST_OPCODE(ip) @ extract opcode from rINST 1241 GOTO_OPCODE(ip) @ jump to next instruction 1242#else 1243 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1244 GET_INST_OPCODE(ip) @ extract opcode from rINST 1245 GOTO_OPCODE(ip) @ jump to next instruction 1246#endif 1247 1248/* ------------------------------ */ 1249 .balign 64 1250.L_OP_SPARSE_SWITCH: /* 0x2c */ 1251/* File: armv5te/OP_SPARSE_SWITCH.S */ 1252/* File: armv5te/OP_PACKED_SWITCH.S */ 1253 /* 1254 * Handle a packed-switch or sparse-switch instruction. In both cases 1255 * we decode it and hand it off to a helper function. 1256 * 1257 * We don't really expect backward branches in a switch statement, but 1258 * they're perfectly legal, so we check for them here. 1259 * 1260 * for: packed-switch, sparse-switch 1261 */ 1262 /* op vAA, +BBBB */ 1263 FETCH(r0, 1) @ r0<- bbbb (lo) 1264 FETCH(r1, 2) @ r1<- BBBB (hi) 1265 mov r3, rINST, lsr #8 @ r3<- AA 1266 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1267 GET_VREG(r1, r3) @ r1<- vAA 1268 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1269 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1270 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1271 bmi common_backwardBranch @ backward branch, do periodic checks 1272 beq common_backwardBranch @ (want to use BLE but V is unknown) 1273#if defined(WITH_JIT) 1274 GET_JIT_PROF_TABLE(r0) 1275 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1276 cmp r0,#0 1277 bne common_updateProfile 1278 GET_INST_OPCODE(ip) @ extract opcode from rINST 1279 GOTO_OPCODE(ip) @ jump to next instruction 1280#else 1281 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1282 GET_INST_OPCODE(ip) @ extract opcode from rINST 1283 GOTO_OPCODE(ip) @ jump to next instruction 1284#endif 1285 1286 1287/* ------------------------------ */ 1288 .balign 64 1289.L_OP_CMPL_FLOAT: /* 0x2d */ 1290/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1291 /* 1292 * Compare two floating-point values. Puts 0, 1, or -1 into the 1293 * destination register based on the results of the comparison. 1294 * 1295 * int compare(x, y) { 1296 * if (x == y) { 1297 * return 0; 1298 * } else if (x > y) { 1299 * return 1; 1300 * } else if (x < y) { 1301 * return -1; 1302 * } else { 1303 * return -1; 1304 * } 1305 * } 1306 */ 1307 /* op vAA, vBB, vCC */ 1308 FETCH(r0, 1) @ r0<- CCBB 1309 mov r9, rINST, lsr #8 @ r9<- AA 1310 and r2, r0, #255 @ r2<- BB 1311 mov r3, r0, lsr #8 @ r3<- CC 1312 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1313 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1314 flds s0, [r2] @ s0<- vBB 1315 flds s1, [r3] @ s1<- vCC 1316 fcmpes s0, s1 @ compare (vBB, vCC) 1317 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1318 mvn r0, #0 @ r0<- -1 (default) 1319 GET_INST_OPCODE(ip) @ extract opcode from rINST 1320 fmstat @ export status flags 1321 movgt r0, #1 @ (greater than) r1<- 1 1322 moveq r0, #0 @ (equal) r1<- 0 1323 b .LOP_CMPL_FLOAT_finish @ argh 1324 1325 1326/* ------------------------------ */ 1327 .balign 64 1328.L_OP_CMPG_FLOAT: /* 0x2e */ 1329/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1330 /* 1331 * Compare two floating-point values. Puts 0, 1, or -1 into the 1332 * destination register based on the results of the comparison. 1333 * 1334 * int compare(x, y) { 1335 * if (x == y) { 1336 * return 0; 1337 * } else if (x < y) { 1338 * return -1; 1339 * } else if (x > y) { 1340 * return 1; 1341 * } else { 1342 * return 1; 1343 * } 1344 * } 1345 */ 1346 /* op vAA, vBB, vCC */ 1347 FETCH(r0, 1) @ r0<- CCBB 1348 mov r9, rINST, lsr #8 @ r9<- AA 1349 and r2, r0, #255 @ r2<- BB 1350 mov r3, r0, lsr #8 @ r3<- CC 1351 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1352 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1353 flds s0, [r2] @ s0<- vBB 1354 flds s1, [r3] @ s1<- vCC 1355 fcmpes s0, s1 @ compare (vBB, vCC) 1356 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1357 mov r0, #1 @ r0<- 1 (default) 1358 GET_INST_OPCODE(ip) @ extract opcode from rINST 1359 fmstat @ export status flags 1360 mvnmi r0, #0 @ (less than) r1<- -1 1361 moveq r0, #0 @ (equal) r1<- 0 1362 b .LOP_CMPG_FLOAT_finish @ argh 1363 1364 1365/* ------------------------------ */ 1366 .balign 64 1367.L_OP_CMPL_DOUBLE: /* 0x2f */ 1368/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1369 /* 1370 * Compare two floating-point values. Puts 0, 1, or -1 into the 1371 * destination register based on the results of the comparison. 1372 * 1373 * int compare(x, y) { 1374 * if (x == y) { 1375 * return 0; 1376 * } else if (x > y) { 1377 * return 1; 1378 * } else if (x < y) { 1379 * return -1; 1380 * } else { 1381 * return -1; 1382 * } 1383 * } 1384 */ 1385 /* op vAA, vBB, vCC */ 1386 FETCH(r0, 1) @ r0<- CCBB 1387 mov r9, rINST, lsr #8 @ r9<- AA 1388 and r2, r0, #255 @ r2<- BB 1389 mov r3, r0, lsr #8 @ r3<- CC 1390 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1391 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1392 fldd d0, [r2] @ d0<- vBB 1393 fldd d1, [r3] @ d1<- vCC 1394 fcmped d0, d1 @ compare (vBB, vCC) 1395 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1396 mvn r0, #0 @ r0<- -1 (default) 1397 GET_INST_OPCODE(ip) @ extract opcode from rINST 1398 fmstat @ export status flags 1399 movgt r0, #1 @ (greater than) r1<- 1 1400 moveq r0, #0 @ (equal) r1<- 0 1401 b .LOP_CMPL_DOUBLE_finish @ argh 1402 1403 1404/* ------------------------------ */ 1405 .balign 64 1406.L_OP_CMPG_DOUBLE: /* 0x30 */ 1407/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1408 /* 1409 * Compare two floating-point values. Puts 0, 1, or -1 into the 1410 * destination register based on the results of the comparison. 1411 * 1412 * int compare(x, y) { 1413 * if (x == y) { 1414 * return 0; 1415 * } else if (x < y) { 1416 * return -1; 1417 * } else if (x > y) { 1418 * return 1; 1419 * } else { 1420 * return 1; 1421 * } 1422 * } 1423 */ 1424 /* op vAA, vBB, vCC */ 1425 FETCH(r0, 1) @ r0<- CCBB 1426 mov r9, rINST, lsr #8 @ r9<- AA 1427 and r2, r0, #255 @ r2<- BB 1428 mov r3, r0, lsr #8 @ r3<- CC 1429 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1430 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1431 fldd d0, [r2] @ d0<- vBB 1432 fldd d1, [r3] @ d1<- vCC 1433 fcmped d0, d1 @ compare (vBB, vCC) 1434 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1435 mov r0, #1 @ r0<- 1 (default) 1436 GET_INST_OPCODE(ip) @ extract opcode from rINST 1437 fmstat @ export status flags 1438 mvnmi r0, #0 @ (less than) r1<- -1 1439 moveq r0, #0 @ (equal) r1<- 0 1440 b .LOP_CMPG_DOUBLE_finish @ argh 1441 1442 1443/* ------------------------------ */ 1444 .balign 64 1445.L_OP_CMP_LONG: /* 0x31 */ 1446/* File: armv5te/OP_CMP_LONG.S */ 1447 /* 1448 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1449 * register based on the results of the comparison. 1450 * 1451 * We load the full values with LDM, but in practice many values could 1452 * be resolved by only looking at the high word. This could be made 1453 * faster or slower by splitting the LDM into a pair of LDRs. 1454 * 1455 * If we just wanted to set condition flags, we could do this: 1456 * subs ip, r0, r2 1457 * sbcs ip, r1, r3 1458 * subeqs ip, r0, r2 1459 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1460 * integer value, which we can do with 2 conditional mov/mvn instructions 1461 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1462 * us a constant 5-cycle path plus a branch at the end to the 1463 * instruction epilogue code. The multi-compare approach below needs 1464 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1465 * in the worst case (the 64-bit values are equal). 1466 */ 1467 /* cmp-long vAA, vBB, vCC */ 1468 FETCH(r0, 1) @ r0<- CCBB 1469 mov r9, rINST, lsr #8 @ r9<- AA 1470 and r2, r0, #255 @ r2<- BB 1471 mov r3, r0, lsr #8 @ r3<- CC 1472 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1473 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1474 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1475 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1476 cmp r1, r3 @ compare (vBB+1, vCC+1) 1477 blt .LOP_CMP_LONG_less @ signed compare on high part 1478 bgt .LOP_CMP_LONG_greater 1479 subs r1, r0, r2 @ r1<- r0 - r2 1480 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1481 bne .LOP_CMP_LONG_less 1482 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1483 1484/* ------------------------------ */ 1485 .balign 64 1486.L_OP_IF_EQ: /* 0x32 */ 1487/* File: armv5te/OP_IF_EQ.S */ 1488/* File: armv5te/bincmp.S */ 1489 /* 1490 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1491 * fragment that specifies the *reverse* comparison to perform, e.g. 1492 * for "if-le" you would use "gt". 1493 * 1494 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1495 */ 1496 /* if-cmp vA, vB, +CCCC */ 1497 mov r0, rINST, lsr #8 @ r0<- A+ 1498 mov r1, rINST, lsr #12 @ r1<- B 1499 and r0, r0, #15 1500 GET_VREG(r3, r1) @ r3<- vB 1501 GET_VREG(r2, r0) @ r2<- vA 1502 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1503 cmp r2, r3 @ compare (vA, vB) 1504 bne 1f @ branch to 1 if comparison failed 1505 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1506 movs r9, r9, asl #1 @ convert to bytes, check sign 1507 bmi common_backwardBranch @ yes, do periodic checks 15081: 1509#if defined(WITH_JIT) 1510 GET_JIT_PROF_TABLE(r0) 1511 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1512 b common_testUpdateProfile 1513#else 1514 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1515 GET_INST_OPCODE(ip) @ extract opcode from rINST 1516 GOTO_OPCODE(ip) @ jump to next instruction 1517#endif 1518 1519 1520/* ------------------------------ */ 1521 .balign 64 1522.L_OP_IF_NE: /* 0x33 */ 1523/* File: armv5te/OP_IF_NE.S */ 1524/* File: armv5te/bincmp.S */ 1525 /* 1526 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1527 * fragment that specifies the *reverse* comparison to perform, e.g. 1528 * for "if-le" you would use "gt". 1529 * 1530 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1531 */ 1532 /* if-cmp vA, vB, +CCCC */ 1533 mov r0, rINST, lsr #8 @ r0<- A+ 1534 mov r1, rINST, lsr #12 @ r1<- B 1535 and r0, r0, #15 1536 GET_VREG(r3, r1) @ r3<- vB 1537 GET_VREG(r2, r0) @ r2<- vA 1538 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1539 cmp r2, r3 @ compare (vA, vB) 1540 beq 1f @ branch to 1 if comparison failed 1541 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1542 movs r9, r9, asl #1 @ convert to bytes, check sign 1543 bmi common_backwardBranch @ yes, do periodic checks 15441: 1545#if defined(WITH_JIT) 1546 GET_JIT_PROF_TABLE(r0) 1547 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1548 b common_testUpdateProfile 1549#else 1550 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1551 GET_INST_OPCODE(ip) @ extract opcode from rINST 1552 GOTO_OPCODE(ip) @ jump to next instruction 1553#endif 1554 1555 1556/* ------------------------------ */ 1557 .balign 64 1558.L_OP_IF_LT: /* 0x34 */ 1559/* File: armv5te/OP_IF_LT.S */ 1560/* File: armv5te/bincmp.S */ 1561 /* 1562 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1563 * fragment that specifies the *reverse* comparison to perform, e.g. 1564 * for "if-le" you would use "gt". 1565 * 1566 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1567 */ 1568 /* if-cmp vA, vB, +CCCC */ 1569 mov r0, rINST, lsr #8 @ r0<- A+ 1570 mov r1, rINST, lsr #12 @ r1<- B 1571 and r0, r0, #15 1572 GET_VREG(r3, r1) @ r3<- vB 1573 GET_VREG(r2, r0) @ r2<- vA 1574 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1575 cmp r2, r3 @ compare (vA, vB) 1576 bge 1f @ branch to 1 if comparison failed 1577 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1578 movs r9, r9, asl #1 @ convert to bytes, check sign 1579 bmi common_backwardBranch @ yes, do periodic checks 15801: 1581#if defined(WITH_JIT) 1582 GET_JIT_PROF_TABLE(r0) 1583 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1584 b common_testUpdateProfile 1585#else 1586 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1587 GET_INST_OPCODE(ip) @ extract opcode from rINST 1588 GOTO_OPCODE(ip) @ jump to next instruction 1589#endif 1590 1591 1592/* ------------------------------ */ 1593 .balign 64 1594.L_OP_IF_GE: /* 0x35 */ 1595/* File: armv5te/OP_IF_GE.S */ 1596/* File: armv5te/bincmp.S */ 1597 /* 1598 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1599 * fragment that specifies the *reverse* comparison to perform, e.g. 1600 * for "if-le" you would use "gt". 1601 * 1602 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1603 */ 1604 /* if-cmp vA, vB, +CCCC */ 1605 mov r0, rINST, lsr #8 @ r0<- A+ 1606 mov r1, rINST, lsr #12 @ r1<- B 1607 and r0, r0, #15 1608 GET_VREG(r3, r1) @ r3<- vB 1609 GET_VREG(r2, r0) @ r2<- vA 1610 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1611 cmp r2, r3 @ compare (vA, vB) 1612 blt 1f @ branch to 1 if comparison failed 1613 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1614 movs r9, r9, asl #1 @ convert to bytes, check sign 1615 bmi common_backwardBranch @ yes, do periodic checks 16161: 1617#if defined(WITH_JIT) 1618 GET_JIT_PROF_TABLE(r0) 1619 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1620 b common_testUpdateProfile 1621#else 1622 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1623 GET_INST_OPCODE(ip) @ extract opcode from rINST 1624 GOTO_OPCODE(ip) @ jump to next instruction 1625#endif 1626 1627 1628/* ------------------------------ */ 1629 .balign 64 1630.L_OP_IF_GT: /* 0x36 */ 1631/* File: armv5te/OP_IF_GT.S */ 1632/* File: armv5te/bincmp.S */ 1633 /* 1634 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1635 * fragment that specifies the *reverse* comparison to perform, e.g. 1636 * for "if-le" you would use "gt". 1637 * 1638 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1639 */ 1640 /* if-cmp vA, vB, +CCCC */ 1641 mov r0, rINST, lsr #8 @ r0<- A+ 1642 mov r1, rINST, lsr #12 @ r1<- B 1643 and r0, r0, #15 1644 GET_VREG(r3, r1) @ r3<- vB 1645 GET_VREG(r2, r0) @ r2<- vA 1646 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1647 cmp r2, r3 @ compare (vA, vB) 1648 ble 1f @ branch to 1 if comparison failed 1649 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1650 movs r9, r9, asl #1 @ convert to bytes, check sign 1651 bmi common_backwardBranch @ yes, do periodic checks 16521: 1653#if defined(WITH_JIT) 1654 GET_JIT_PROF_TABLE(r0) 1655 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1656 b common_testUpdateProfile 1657#else 1658 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1659 GET_INST_OPCODE(ip) @ extract opcode from rINST 1660 GOTO_OPCODE(ip) @ jump to next instruction 1661#endif 1662 1663 1664/* ------------------------------ */ 1665 .balign 64 1666.L_OP_IF_LE: /* 0x37 */ 1667/* File: armv5te/OP_IF_LE.S */ 1668/* File: armv5te/bincmp.S */ 1669 /* 1670 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1671 * fragment that specifies the *reverse* comparison to perform, e.g. 1672 * for "if-le" you would use "gt". 1673 * 1674 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1675 */ 1676 /* if-cmp vA, vB, +CCCC */ 1677 mov r0, rINST, lsr #8 @ r0<- A+ 1678 mov r1, rINST, lsr #12 @ r1<- B 1679 and r0, r0, #15 1680 GET_VREG(r3, r1) @ r3<- vB 1681 GET_VREG(r2, r0) @ r2<- vA 1682 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1683 cmp r2, r3 @ compare (vA, vB) 1684 bgt 1f @ branch to 1 if comparison failed 1685 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1686 movs r9, r9, asl #1 @ convert to bytes, check sign 1687 bmi common_backwardBranch @ yes, do periodic checks 16881: 1689#if defined(WITH_JIT) 1690 GET_JIT_PROF_TABLE(r0) 1691 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1692 b common_testUpdateProfile 1693#else 1694 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1695 GET_INST_OPCODE(ip) @ extract opcode from rINST 1696 GOTO_OPCODE(ip) @ jump to next instruction 1697#endif 1698 1699 1700/* ------------------------------ */ 1701 .balign 64 1702.L_OP_IF_EQZ: /* 0x38 */ 1703/* File: armv5te/OP_IF_EQZ.S */ 1704/* File: armv5te/zcmp.S */ 1705 /* 1706 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1707 * fragment that specifies the *reverse* comparison to perform, e.g. 1708 * for "if-le" you would use "gt". 1709 * 1710 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1711 */ 1712 /* if-cmp vAA, +BBBB */ 1713 mov r0, rINST, lsr #8 @ r0<- AA 1714 GET_VREG(r2, r0) @ r2<- vAA 1715 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1716 cmp r2, #0 @ compare (vA, 0) 1717 bne 1f @ branch to 1 if comparison failed 1718 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1719 movs r9, r9, asl #1 @ convert to bytes, check sign 1720 bmi common_backwardBranch @ backward branch, do periodic checks 17211: 1722#if defined(WITH_JIT) 1723 GET_JIT_PROF_TABLE(r0) 1724 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1725 cmp r0,#0 1726 bne common_updateProfile 1727 GET_INST_OPCODE(ip) @ extract opcode from rINST 1728 GOTO_OPCODE(ip) @ jump to next instruction 1729#else 1730 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1731 GET_INST_OPCODE(ip) @ extract opcode from rINST 1732 GOTO_OPCODE(ip) @ jump to next instruction 1733#endif 1734 1735 1736/* ------------------------------ */ 1737 .balign 64 1738.L_OP_IF_NEZ: /* 0x39 */ 1739/* File: armv5te/OP_IF_NEZ.S */ 1740/* File: armv5te/zcmp.S */ 1741 /* 1742 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1743 * fragment that specifies the *reverse* comparison to perform, e.g. 1744 * for "if-le" you would use "gt". 1745 * 1746 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1747 */ 1748 /* if-cmp vAA, +BBBB */ 1749 mov r0, rINST, lsr #8 @ r0<- AA 1750 GET_VREG(r2, r0) @ r2<- vAA 1751 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1752 cmp r2, #0 @ compare (vA, 0) 1753 beq 1f @ branch to 1 if comparison failed 1754 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1755 movs r9, r9, asl #1 @ convert to bytes, check sign 1756 bmi common_backwardBranch @ backward branch, do periodic checks 17571: 1758#if defined(WITH_JIT) 1759 GET_JIT_PROF_TABLE(r0) 1760 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1761 cmp r0,#0 1762 bne common_updateProfile 1763 GET_INST_OPCODE(ip) @ extract opcode from rINST 1764 GOTO_OPCODE(ip) @ jump to next instruction 1765#else 1766 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1767 GET_INST_OPCODE(ip) @ extract opcode from rINST 1768 GOTO_OPCODE(ip) @ jump to next instruction 1769#endif 1770 1771 1772/* ------------------------------ */ 1773 .balign 64 1774.L_OP_IF_LTZ: /* 0x3a */ 1775/* File: armv5te/OP_IF_LTZ.S */ 1776/* File: armv5te/zcmp.S */ 1777 /* 1778 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1779 * fragment that specifies the *reverse* comparison to perform, e.g. 1780 * for "if-le" you would use "gt". 1781 * 1782 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1783 */ 1784 /* if-cmp vAA, +BBBB */ 1785 mov r0, rINST, lsr #8 @ r0<- AA 1786 GET_VREG(r2, r0) @ r2<- vAA 1787 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1788 cmp r2, #0 @ compare (vA, 0) 1789 bge 1f @ branch to 1 if comparison failed 1790 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1791 movs r9, r9, asl #1 @ convert to bytes, check sign 1792 bmi common_backwardBranch @ backward branch, do periodic checks 17931: 1794#if defined(WITH_JIT) 1795 GET_JIT_PROF_TABLE(r0) 1796 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1797 cmp r0,#0 1798 bne common_updateProfile 1799 GET_INST_OPCODE(ip) @ extract opcode from rINST 1800 GOTO_OPCODE(ip) @ jump to next instruction 1801#else 1802 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1803 GET_INST_OPCODE(ip) @ extract opcode from rINST 1804 GOTO_OPCODE(ip) @ jump to next instruction 1805#endif 1806 1807 1808/* ------------------------------ */ 1809 .balign 64 1810.L_OP_IF_GEZ: /* 0x3b */ 1811/* File: armv5te/OP_IF_GEZ.S */ 1812/* File: armv5te/zcmp.S */ 1813 /* 1814 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1815 * fragment that specifies the *reverse* comparison to perform, e.g. 1816 * for "if-le" you would use "gt". 1817 * 1818 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1819 */ 1820 /* if-cmp vAA, +BBBB */ 1821 mov r0, rINST, lsr #8 @ r0<- AA 1822 GET_VREG(r2, r0) @ r2<- vAA 1823 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1824 cmp r2, #0 @ compare (vA, 0) 1825 blt 1f @ branch to 1 if comparison failed 1826 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1827 movs r9, r9, asl #1 @ convert to bytes, check sign 1828 bmi common_backwardBranch @ backward branch, do periodic checks 18291: 1830#if defined(WITH_JIT) 1831 GET_JIT_PROF_TABLE(r0) 1832 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1833 cmp r0,#0 1834 bne common_updateProfile 1835 GET_INST_OPCODE(ip) @ extract opcode from rINST 1836 GOTO_OPCODE(ip) @ jump to next instruction 1837#else 1838 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1839 GET_INST_OPCODE(ip) @ extract opcode from rINST 1840 GOTO_OPCODE(ip) @ jump to next instruction 1841#endif 1842 1843 1844/* ------------------------------ */ 1845 .balign 64 1846.L_OP_IF_GTZ: /* 0x3c */ 1847/* File: armv5te/OP_IF_GTZ.S */ 1848/* File: armv5te/zcmp.S */ 1849 /* 1850 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1851 * fragment that specifies the *reverse* comparison to perform, e.g. 1852 * for "if-le" you would use "gt". 1853 * 1854 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1855 */ 1856 /* if-cmp vAA, +BBBB */ 1857 mov r0, rINST, lsr #8 @ r0<- AA 1858 GET_VREG(r2, r0) @ r2<- vAA 1859 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1860 cmp r2, #0 @ compare (vA, 0) 1861 ble 1f @ branch to 1 if comparison failed 1862 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1863 movs r9, r9, asl #1 @ convert to bytes, check sign 1864 bmi common_backwardBranch @ backward branch, do periodic checks 18651: 1866#if defined(WITH_JIT) 1867 GET_JIT_PROF_TABLE(r0) 1868 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1869 cmp r0,#0 1870 bne common_updateProfile 1871 GET_INST_OPCODE(ip) @ extract opcode from rINST 1872 GOTO_OPCODE(ip) @ jump to next instruction 1873#else 1874 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1875 GET_INST_OPCODE(ip) @ extract opcode from rINST 1876 GOTO_OPCODE(ip) @ jump to next instruction 1877#endif 1878 1879 1880/* ------------------------------ */ 1881 .balign 64 1882.L_OP_IF_LEZ: /* 0x3d */ 1883/* File: armv5te/OP_IF_LEZ.S */ 1884/* File: armv5te/zcmp.S */ 1885 /* 1886 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1887 * fragment that specifies the *reverse* comparison to perform, e.g. 1888 * for "if-le" you would use "gt". 1889 * 1890 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1891 */ 1892 /* if-cmp vAA, +BBBB */ 1893 mov r0, rINST, lsr #8 @ r0<- AA 1894 GET_VREG(r2, r0) @ r2<- vAA 1895 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1896 cmp r2, #0 @ compare (vA, 0) 1897 bgt 1f @ branch to 1 if comparison failed 1898 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1899 movs r9, r9, asl #1 @ convert to bytes, check sign 1900 bmi common_backwardBranch @ backward branch, do periodic checks 19011: 1902#if defined(WITH_JIT) 1903 GET_JIT_PROF_TABLE(r0) 1904 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1905 cmp r0,#0 1906 bne common_updateProfile 1907 GET_INST_OPCODE(ip) @ extract opcode from rINST 1908 GOTO_OPCODE(ip) @ jump to next instruction 1909#else 1910 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1911 GET_INST_OPCODE(ip) @ extract opcode from rINST 1912 GOTO_OPCODE(ip) @ jump to next instruction 1913#endif 1914 1915 1916/* ------------------------------ */ 1917 .balign 64 1918.L_OP_UNUSED_3E: /* 0x3e */ 1919/* File: armv5te/OP_UNUSED_3E.S */ 1920/* File: armv5te/unused.S */ 1921 bl common_abort 1922 1923 1924/* ------------------------------ */ 1925 .balign 64 1926.L_OP_UNUSED_3F: /* 0x3f */ 1927/* File: armv5te/OP_UNUSED_3F.S */ 1928/* File: armv5te/unused.S */ 1929 bl common_abort 1930 1931 1932/* ------------------------------ */ 1933 .balign 64 1934.L_OP_UNUSED_40: /* 0x40 */ 1935/* File: armv5te/OP_UNUSED_40.S */ 1936/* File: armv5te/unused.S */ 1937 bl common_abort 1938 1939 1940/* ------------------------------ */ 1941 .balign 64 1942.L_OP_UNUSED_41: /* 0x41 */ 1943/* File: armv5te/OP_UNUSED_41.S */ 1944/* File: armv5te/unused.S */ 1945 bl common_abort 1946 1947 1948/* ------------------------------ */ 1949 .balign 64 1950.L_OP_UNUSED_42: /* 0x42 */ 1951/* File: armv5te/OP_UNUSED_42.S */ 1952/* File: armv5te/unused.S */ 1953 bl common_abort 1954 1955 1956/* ------------------------------ */ 1957 .balign 64 1958.L_OP_UNUSED_43: /* 0x43 */ 1959/* File: armv5te/OP_UNUSED_43.S */ 1960/* File: armv5te/unused.S */ 1961 bl common_abort 1962 1963 1964/* ------------------------------ */ 1965 .balign 64 1966.L_OP_AGET: /* 0x44 */ 1967/* File: armv5te/OP_AGET.S */ 1968 /* 1969 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1970 * 1971 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1972 * instructions. We use a pair of FETCH_Bs instead. 1973 * 1974 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 1975 */ 1976 /* op vAA, vBB, vCC */ 1977 FETCH_B(r2, 1, 0) @ r2<- BB 1978 mov r9, rINST, lsr #8 @ r9<- AA 1979 FETCH_B(r3, 1, 1) @ r3<- CC 1980 GET_VREG(r0, r2) @ r0<- vBB (array object) 1981 GET_VREG(r1, r3) @ r1<- vCC (requested index) 1982 cmp r0, #0 @ null array object? 1983 beq common_errNullObject @ yes, bail 1984 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 1985 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 1986 cmp r1, r3 @ compare unsigned index, length 1987 bcs common_errArrayIndex @ index >= length, bail 1988 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1989 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 1990 GET_INST_OPCODE(ip) @ extract opcode from rINST 1991 SET_VREG(r2, r9) @ vAA<- r2 1992 GOTO_OPCODE(ip) @ jump to next instruction 1993 1994/* ------------------------------ */ 1995 .balign 64 1996.L_OP_AGET_WIDE: /* 0x45 */ 1997/* File: armv5te/OP_AGET_WIDE.S */ 1998 /* 1999 * Array get, 64 bits. vAA <- vBB[vCC]. 2000 * 2001 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2002 */ 2003 /* aget-wide vAA, vBB, vCC */ 2004 FETCH(r0, 1) @ r0<- CCBB 2005 mov r9, rINST, lsr #8 @ r9<- AA 2006 and r2, r0, #255 @ r2<- BB 2007 mov r3, r0, lsr #8 @ r3<- CC 2008 GET_VREG(r0, r2) @ r0<- vBB (array object) 2009 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2010 cmp r0, #0 @ null array object? 2011 beq common_errNullObject @ yes, bail 2012 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2013 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2014 cmp r1, r3 @ compare unsigned index, length 2015 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2016 b common_errArrayIndex @ index >= length, bail 2017 @ May want to swap the order of these two branches depending on how the 2018 @ branch prediction (if any) handles conditional forward branches vs. 2019 @ unconditional forward branches. 2020 2021/* ------------------------------ */ 2022 .balign 64 2023.L_OP_AGET_OBJECT: /* 0x46 */ 2024/* File: armv5te/OP_AGET_OBJECT.S */ 2025/* File: armv5te/OP_AGET.S */ 2026 /* 2027 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2028 * 2029 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2030 * instructions. We use a pair of FETCH_Bs instead. 2031 * 2032 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2033 */ 2034 /* op vAA, vBB, vCC */ 2035 FETCH_B(r2, 1, 0) @ r2<- BB 2036 mov r9, rINST, lsr #8 @ r9<- AA 2037 FETCH_B(r3, 1, 1) @ r3<- CC 2038 GET_VREG(r0, r2) @ r0<- vBB (array object) 2039 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2040 cmp r0, #0 @ null array object? 2041 beq common_errNullObject @ yes, bail 2042 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2043 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2044 cmp r1, r3 @ compare unsigned index, length 2045 bcs common_errArrayIndex @ index >= length, bail 2046 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2047 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2048 GET_INST_OPCODE(ip) @ extract opcode from rINST 2049 SET_VREG(r2, r9) @ vAA<- r2 2050 GOTO_OPCODE(ip) @ jump to next instruction 2051 2052 2053/* ------------------------------ */ 2054 .balign 64 2055.L_OP_AGET_BOOLEAN: /* 0x47 */ 2056/* File: armv5te/OP_AGET_BOOLEAN.S */ 2057/* File: armv5te/OP_AGET.S */ 2058 /* 2059 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2060 * 2061 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2062 * instructions. We use a pair of FETCH_Bs instead. 2063 * 2064 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2065 */ 2066 /* op vAA, vBB, vCC */ 2067 FETCH_B(r2, 1, 0) @ r2<- BB 2068 mov r9, rINST, lsr #8 @ r9<- AA 2069 FETCH_B(r3, 1, 1) @ r3<- CC 2070 GET_VREG(r0, r2) @ r0<- vBB (array object) 2071 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2072 cmp r0, #0 @ null array object? 2073 beq common_errNullObject @ yes, bail 2074 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2075 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2076 cmp r1, r3 @ compare unsigned index, length 2077 bcs common_errArrayIndex @ index >= length, bail 2078 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2079 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2080 GET_INST_OPCODE(ip) @ extract opcode from rINST 2081 SET_VREG(r2, r9) @ vAA<- r2 2082 GOTO_OPCODE(ip) @ jump to next instruction 2083 2084 2085/* ------------------------------ */ 2086 .balign 64 2087.L_OP_AGET_BYTE: /* 0x48 */ 2088/* File: armv5te/OP_AGET_BYTE.S */ 2089/* File: armv5te/OP_AGET.S */ 2090 /* 2091 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2092 * 2093 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2094 * instructions. We use a pair of FETCH_Bs instead. 2095 * 2096 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2097 */ 2098 /* op vAA, vBB, vCC */ 2099 FETCH_B(r2, 1, 0) @ r2<- BB 2100 mov r9, rINST, lsr #8 @ r9<- AA 2101 FETCH_B(r3, 1, 1) @ r3<- CC 2102 GET_VREG(r0, r2) @ r0<- vBB (array object) 2103 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2104 cmp r0, #0 @ null array object? 2105 beq common_errNullObject @ yes, bail 2106 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2107 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2108 cmp r1, r3 @ compare unsigned index, length 2109 bcs common_errArrayIndex @ index >= length, bail 2110 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2111 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2112 GET_INST_OPCODE(ip) @ extract opcode from rINST 2113 SET_VREG(r2, r9) @ vAA<- r2 2114 GOTO_OPCODE(ip) @ jump to next instruction 2115 2116 2117/* ------------------------------ */ 2118 .balign 64 2119.L_OP_AGET_CHAR: /* 0x49 */ 2120/* File: armv5te/OP_AGET_CHAR.S */ 2121/* File: armv5te/OP_AGET.S */ 2122 /* 2123 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2124 * 2125 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2126 * instructions. We use a pair of FETCH_Bs instead. 2127 * 2128 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2129 */ 2130 /* op vAA, vBB, vCC */ 2131 FETCH_B(r2, 1, 0) @ r2<- BB 2132 mov r9, rINST, lsr #8 @ r9<- AA 2133 FETCH_B(r3, 1, 1) @ r3<- CC 2134 GET_VREG(r0, r2) @ r0<- vBB (array object) 2135 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2136 cmp r0, #0 @ null array object? 2137 beq common_errNullObject @ yes, bail 2138 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2139 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2140 cmp r1, r3 @ compare unsigned index, length 2141 bcs common_errArrayIndex @ index >= length, bail 2142 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2143 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2144 GET_INST_OPCODE(ip) @ extract opcode from rINST 2145 SET_VREG(r2, r9) @ vAA<- r2 2146 GOTO_OPCODE(ip) @ jump to next instruction 2147 2148 2149/* ------------------------------ */ 2150 .balign 64 2151.L_OP_AGET_SHORT: /* 0x4a */ 2152/* File: armv5te/OP_AGET_SHORT.S */ 2153/* File: armv5te/OP_AGET.S */ 2154 /* 2155 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2156 * 2157 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2158 * instructions. We use a pair of FETCH_Bs instead. 2159 * 2160 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2161 */ 2162 /* op vAA, vBB, vCC */ 2163 FETCH_B(r2, 1, 0) @ r2<- BB 2164 mov r9, rINST, lsr #8 @ r9<- AA 2165 FETCH_B(r3, 1, 1) @ r3<- CC 2166 GET_VREG(r0, r2) @ r0<- vBB (array object) 2167 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2168 cmp r0, #0 @ null array object? 2169 beq common_errNullObject @ yes, bail 2170 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2171 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2172 cmp r1, r3 @ compare unsigned index, length 2173 bcs common_errArrayIndex @ index >= length, bail 2174 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2175 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2176 GET_INST_OPCODE(ip) @ extract opcode from rINST 2177 SET_VREG(r2, r9) @ vAA<- r2 2178 GOTO_OPCODE(ip) @ jump to next instruction 2179 2180 2181/* ------------------------------ */ 2182 .balign 64 2183.L_OP_APUT: /* 0x4b */ 2184/* File: armv5te/OP_APUT.S */ 2185 /* 2186 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2187 * 2188 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2189 * instructions. We use a pair of FETCH_Bs instead. 2190 * 2191 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2192 */ 2193 /* op vAA, vBB, vCC */ 2194 FETCH_B(r2, 1, 0) @ r2<- BB 2195 mov r9, rINST, lsr #8 @ r9<- AA 2196 FETCH_B(r3, 1, 1) @ r3<- CC 2197 GET_VREG(r0, r2) @ r0<- vBB (array object) 2198 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2199 cmp r0, #0 @ null array object? 2200 beq common_errNullObject @ yes, bail 2201 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2202 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2203 cmp r1, r3 @ compare unsigned index, length 2204 bcs common_errArrayIndex @ index >= length, bail 2205 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2206 GET_VREG(r2, r9) @ r2<- vAA 2207 GET_INST_OPCODE(ip) @ extract opcode from rINST 2208 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2209 GOTO_OPCODE(ip) @ jump to next instruction 2210 2211/* ------------------------------ */ 2212 .balign 64 2213.L_OP_APUT_WIDE: /* 0x4c */ 2214/* File: armv5te/OP_APUT_WIDE.S */ 2215 /* 2216 * Array put, 64 bits. vBB[vCC] <- vAA. 2217 * 2218 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2219 */ 2220 /* aput-wide vAA, vBB, vCC */ 2221 FETCH(r0, 1) @ r0<- CCBB 2222 mov r9, rINST, lsr #8 @ r9<- AA 2223 and r2, r0, #255 @ r2<- BB 2224 mov r3, r0, lsr #8 @ r3<- CC 2225 GET_VREG(r0, r2) @ r0<- vBB (array object) 2226 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2227 cmp r0, #0 @ null array object? 2228 beq common_errNullObject @ yes, bail 2229 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2230 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2231 cmp r1, r3 @ compare unsigned index, length 2232 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2233 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2234 b common_errArrayIndex @ index >= length, bail 2235 @ May want to swap the order of these two branches depending on how the 2236 @ branch prediction (if any) handles conditional forward branches vs. 2237 @ unconditional forward branches. 2238 2239/* ------------------------------ */ 2240 .balign 64 2241.L_OP_APUT_OBJECT: /* 0x4d */ 2242/* File: armv5te/OP_APUT_OBJECT.S */ 2243 /* 2244 * Store an object into an array. vBB[vCC] <- vAA. 2245 * 2246 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2247 * instructions. We use a pair of FETCH_Bs instead. 2248 */ 2249 /* op vAA, vBB, vCC */ 2250 FETCH(r0, 1) @ r0<- CCBB 2251 mov r9, rINST, lsr #8 @ r9<- AA 2252 and r2, r0, #255 @ r2<- BB 2253 mov r3, r0, lsr #8 @ r3<- CC 2254 GET_VREG(r1, r2) @ r1<- vBB (array object) 2255 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2256 cmp r1, #0 @ null array object? 2257 GET_VREG(r9, r9) @ r9<- vAA 2258 beq common_errNullObject @ yes, bail 2259 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2260 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2261 cmp r0, r3 @ compare unsigned index, length 2262 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2263 b common_errArrayIndex @ index >= length, bail 2264 2265 2266/* ------------------------------ */ 2267 .balign 64 2268.L_OP_APUT_BOOLEAN: /* 0x4e */ 2269/* File: armv5te/OP_APUT_BOOLEAN.S */ 2270/* File: armv5te/OP_APUT.S */ 2271 /* 2272 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2273 * 2274 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2275 * instructions. We use a pair of FETCH_Bs instead. 2276 * 2277 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2278 */ 2279 /* op vAA, vBB, vCC */ 2280 FETCH_B(r2, 1, 0) @ r2<- BB 2281 mov r9, rINST, lsr #8 @ r9<- AA 2282 FETCH_B(r3, 1, 1) @ r3<- CC 2283 GET_VREG(r0, r2) @ r0<- vBB (array object) 2284 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2285 cmp r0, #0 @ null array object? 2286 beq common_errNullObject @ yes, bail 2287 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2288 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2289 cmp r1, r3 @ compare unsigned index, length 2290 bcs common_errArrayIndex @ index >= length, bail 2291 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2292 GET_VREG(r2, r9) @ r2<- vAA 2293 GET_INST_OPCODE(ip) @ extract opcode from rINST 2294 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2295 GOTO_OPCODE(ip) @ jump to next instruction 2296 2297 2298/* ------------------------------ */ 2299 .balign 64 2300.L_OP_APUT_BYTE: /* 0x4f */ 2301/* File: armv5te/OP_APUT_BYTE.S */ 2302/* File: armv5te/OP_APUT.S */ 2303 /* 2304 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2305 * 2306 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2307 * instructions. We use a pair of FETCH_Bs instead. 2308 * 2309 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2310 */ 2311 /* op vAA, vBB, vCC */ 2312 FETCH_B(r2, 1, 0) @ r2<- BB 2313 mov r9, rINST, lsr #8 @ r9<- AA 2314 FETCH_B(r3, 1, 1) @ r3<- CC 2315 GET_VREG(r0, r2) @ r0<- vBB (array object) 2316 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2317 cmp r0, #0 @ null array object? 2318 beq common_errNullObject @ yes, bail 2319 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2320 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2321 cmp r1, r3 @ compare unsigned index, length 2322 bcs common_errArrayIndex @ index >= length, bail 2323 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2324 GET_VREG(r2, r9) @ r2<- vAA 2325 GET_INST_OPCODE(ip) @ extract opcode from rINST 2326 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2327 GOTO_OPCODE(ip) @ jump to next instruction 2328 2329 2330/* ------------------------------ */ 2331 .balign 64 2332.L_OP_APUT_CHAR: /* 0x50 */ 2333/* File: armv5te/OP_APUT_CHAR.S */ 2334/* File: armv5te/OP_APUT.S */ 2335 /* 2336 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2337 * 2338 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2339 * instructions. We use a pair of FETCH_Bs instead. 2340 * 2341 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2342 */ 2343 /* op vAA, vBB, vCC */ 2344 FETCH_B(r2, 1, 0) @ r2<- BB 2345 mov r9, rINST, lsr #8 @ r9<- AA 2346 FETCH_B(r3, 1, 1) @ r3<- CC 2347 GET_VREG(r0, r2) @ r0<- vBB (array object) 2348 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2349 cmp r0, #0 @ null array object? 2350 beq common_errNullObject @ yes, bail 2351 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2352 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2353 cmp r1, r3 @ compare unsigned index, length 2354 bcs common_errArrayIndex @ index >= length, bail 2355 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2356 GET_VREG(r2, r9) @ r2<- vAA 2357 GET_INST_OPCODE(ip) @ extract opcode from rINST 2358 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2359 GOTO_OPCODE(ip) @ jump to next instruction 2360 2361 2362/* ------------------------------ */ 2363 .balign 64 2364.L_OP_APUT_SHORT: /* 0x51 */ 2365/* File: armv5te/OP_APUT_SHORT.S */ 2366/* File: armv5te/OP_APUT.S */ 2367 /* 2368 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2369 * 2370 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2371 * instructions. We use a pair of FETCH_Bs instead. 2372 * 2373 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2374 */ 2375 /* op vAA, vBB, vCC */ 2376 FETCH_B(r2, 1, 0) @ r2<- BB 2377 mov r9, rINST, lsr #8 @ r9<- AA 2378 FETCH_B(r3, 1, 1) @ r3<- CC 2379 GET_VREG(r0, r2) @ r0<- vBB (array object) 2380 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2381 cmp r0, #0 @ null array object? 2382 beq common_errNullObject @ yes, bail 2383 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2384 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2385 cmp r1, r3 @ compare unsigned index, length 2386 bcs common_errArrayIndex @ index >= length, bail 2387 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2388 GET_VREG(r2, r9) @ r2<- vAA 2389 GET_INST_OPCODE(ip) @ extract opcode from rINST 2390 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2391 GOTO_OPCODE(ip) @ jump to next instruction 2392 2393 2394/* ------------------------------ */ 2395 .balign 64 2396.L_OP_IGET: /* 0x52 */ 2397/* File: armv5te/OP_IGET.S */ 2398 /* 2399 * General 32-bit instance field get. 2400 * 2401 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2402 */ 2403 /* op vA, vB, field@CCCC */ 2404 mov r0, rINST, lsr #12 @ r0<- B 2405 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2406 FETCH(r1, 1) @ r1<- field ref CCCC 2407 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2408 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2409 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2410 cmp r0, #0 @ is resolved entry null? 2411 bne .LOP_IGET_finish @ no, already resolved 24128: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2413 EXPORT_PC() @ resolve() could throw 2414 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2415 bl dvmResolveInstField @ r0<- resolved InstField ptr 2416 cmp r0, #0 2417 bne .LOP_IGET_finish 2418 b common_exceptionThrown 2419 2420/* ------------------------------ */ 2421 .balign 64 2422.L_OP_IGET_WIDE: /* 0x53 */ 2423/* File: armv5te/OP_IGET_WIDE.S */ 2424 /* 2425 * Wide 32-bit instance field get. 2426 */ 2427 /* iget-wide vA, vB, field@CCCC */ 2428 mov r0, rINST, lsr #12 @ r0<- B 2429 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2430 FETCH(r1, 1) @ r1<- field ref CCCC 2431 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2432 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2433 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2434 cmp r0, #0 @ is resolved entry null? 2435 bne .LOP_IGET_WIDE_finish @ no, already resolved 24368: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2437 EXPORT_PC() @ resolve() could throw 2438 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2439 bl dvmResolveInstField @ r0<- resolved InstField ptr 2440 cmp r0, #0 2441 bne .LOP_IGET_WIDE_finish 2442 b common_exceptionThrown 2443 2444/* ------------------------------ */ 2445 .balign 64 2446.L_OP_IGET_OBJECT: /* 0x54 */ 2447/* File: armv5te/OP_IGET_OBJECT.S */ 2448/* File: armv5te/OP_IGET.S */ 2449 /* 2450 * General 32-bit instance field get. 2451 * 2452 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2453 */ 2454 /* op vA, vB, field@CCCC */ 2455 mov r0, rINST, lsr #12 @ r0<- B 2456 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2457 FETCH(r1, 1) @ r1<- field ref CCCC 2458 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2459 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2460 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2461 cmp r0, #0 @ is resolved entry null? 2462 bne .LOP_IGET_OBJECT_finish @ no, already resolved 24638: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2464 EXPORT_PC() @ resolve() could throw 2465 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2466 bl dvmResolveInstField @ r0<- resolved InstField ptr 2467 cmp r0, #0 2468 bne .LOP_IGET_OBJECT_finish 2469 b common_exceptionThrown 2470 2471 2472/* ------------------------------ */ 2473 .balign 64 2474.L_OP_IGET_BOOLEAN: /* 0x55 */ 2475/* File: armv5te/OP_IGET_BOOLEAN.S */ 2476@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2477/* File: armv5te/OP_IGET.S */ 2478 /* 2479 * General 32-bit instance field get. 2480 * 2481 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2482 */ 2483 /* op vA, vB, field@CCCC */ 2484 mov r0, rINST, lsr #12 @ r0<- B 2485 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2486 FETCH(r1, 1) @ r1<- field ref CCCC 2487 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2488 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2489 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2490 cmp r0, #0 @ is resolved entry null? 2491 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 24928: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2493 EXPORT_PC() @ resolve() could throw 2494 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2495 bl dvmResolveInstField @ r0<- resolved InstField ptr 2496 cmp r0, #0 2497 bne .LOP_IGET_BOOLEAN_finish 2498 b common_exceptionThrown 2499 2500 2501/* ------------------------------ */ 2502 .balign 64 2503.L_OP_IGET_BYTE: /* 0x56 */ 2504/* File: armv5te/OP_IGET_BYTE.S */ 2505@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2506/* File: armv5te/OP_IGET.S */ 2507 /* 2508 * General 32-bit instance field get. 2509 * 2510 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2511 */ 2512 /* op vA, vB, field@CCCC */ 2513 mov r0, rINST, lsr #12 @ r0<- B 2514 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2515 FETCH(r1, 1) @ r1<- field ref CCCC 2516 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2517 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2518 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2519 cmp r0, #0 @ is resolved entry null? 2520 bne .LOP_IGET_BYTE_finish @ no, already resolved 25218: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2522 EXPORT_PC() @ resolve() could throw 2523 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2524 bl dvmResolveInstField @ r0<- resolved InstField ptr 2525 cmp r0, #0 2526 bne .LOP_IGET_BYTE_finish 2527 b common_exceptionThrown 2528 2529 2530/* ------------------------------ */ 2531 .balign 64 2532.L_OP_IGET_CHAR: /* 0x57 */ 2533/* File: armv5te/OP_IGET_CHAR.S */ 2534@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2535/* File: armv5te/OP_IGET.S */ 2536 /* 2537 * General 32-bit instance field get. 2538 * 2539 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2540 */ 2541 /* op vA, vB, field@CCCC */ 2542 mov r0, rINST, lsr #12 @ r0<- B 2543 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2544 FETCH(r1, 1) @ r1<- field ref CCCC 2545 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2546 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2547 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2548 cmp r0, #0 @ is resolved entry null? 2549 bne .LOP_IGET_CHAR_finish @ no, already resolved 25508: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2551 EXPORT_PC() @ resolve() could throw 2552 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2553 bl dvmResolveInstField @ r0<- resolved InstField ptr 2554 cmp r0, #0 2555 bne .LOP_IGET_CHAR_finish 2556 b common_exceptionThrown 2557 2558 2559/* ------------------------------ */ 2560 .balign 64 2561.L_OP_IGET_SHORT: /* 0x58 */ 2562/* File: armv5te/OP_IGET_SHORT.S */ 2563@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2564/* File: armv5te/OP_IGET.S */ 2565 /* 2566 * General 32-bit instance field get. 2567 * 2568 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2569 */ 2570 /* op vA, vB, field@CCCC */ 2571 mov r0, rINST, lsr #12 @ r0<- B 2572 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2573 FETCH(r1, 1) @ r1<- field ref CCCC 2574 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2575 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2576 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2577 cmp r0, #0 @ is resolved entry null? 2578 bne .LOP_IGET_SHORT_finish @ no, already resolved 25798: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2580 EXPORT_PC() @ resolve() could throw 2581 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2582 bl dvmResolveInstField @ r0<- resolved InstField ptr 2583 cmp r0, #0 2584 bne .LOP_IGET_SHORT_finish 2585 b common_exceptionThrown 2586 2587 2588/* ------------------------------ */ 2589 .balign 64 2590.L_OP_IPUT: /* 0x59 */ 2591/* File: armv5te/OP_IPUT.S */ 2592 /* 2593 * General 32-bit instance field put. 2594 * 2595 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2596 */ 2597 /* op vA, vB, field@CCCC */ 2598 mov r0, rINST, lsr #12 @ r0<- B 2599 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2600 FETCH(r1, 1) @ r1<- field ref CCCC 2601 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2602 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2603 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2604 cmp r0, #0 @ is resolved entry null? 2605 bne .LOP_IPUT_finish @ no, already resolved 26068: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2607 EXPORT_PC() @ resolve() could throw 2608 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2609 bl dvmResolveInstField @ r0<- resolved InstField ptr 2610 cmp r0, #0 @ success? 2611 bne .LOP_IPUT_finish @ yes, finish up 2612 b common_exceptionThrown 2613 2614/* ------------------------------ */ 2615 .balign 64 2616.L_OP_IPUT_WIDE: /* 0x5a */ 2617/* File: armv5te/OP_IPUT_WIDE.S */ 2618 /* iput-wide vA, vB, field@CCCC */ 2619 mov r0, rINST, lsr #12 @ r0<- B 2620 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2621 FETCH(r1, 1) @ r1<- field ref CCCC 2622 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2623 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2624 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2625 cmp r0, #0 @ is resolved entry null? 2626 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26278: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2628 EXPORT_PC() @ resolve() could throw 2629 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2630 bl dvmResolveInstField @ r0<- resolved InstField ptr 2631 cmp r0, #0 @ success? 2632 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2633 b common_exceptionThrown 2634 2635/* ------------------------------ */ 2636 .balign 64 2637.L_OP_IPUT_OBJECT: /* 0x5b */ 2638/* File: armv5te/OP_IPUT_OBJECT.S */ 2639 /* 2640 * 32-bit instance field put. 2641 * 2642 * for: iput-object, iput-object-volatile 2643 */ 2644 /* op vA, vB, field@CCCC */ 2645 mov r0, rINST, lsr #12 @ r0<- B 2646 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2647 FETCH(r1, 1) @ r1<- field ref CCCC 2648 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2649 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2650 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2651 cmp r0, #0 @ is resolved entry null? 2652 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26538: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2654 EXPORT_PC() @ resolve() could throw 2655 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2656 bl dvmResolveInstField @ r0<- resolved InstField ptr 2657 cmp r0, #0 @ success? 2658 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2659 b common_exceptionThrown 2660 2661/* ------------------------------ */ 2662 .balign 64 2663.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2664/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2665@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2666/* File: armv5te/OP_IPUT.S */ 2667 /* 2668 * General 32-bit instance field put. 2669 * 2670 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2671 */ 2672 /* op vA, vB, field@CCCC */ 2673 mov r0, rINST, lsr #12 @ r0<- B 2674 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2675 FETCH(r1, 1) @ r1<- field ref CCCC 2676 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2677 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2678 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2679 cmp r0, #0 @ is resolved entry null? 2680 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 26818: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2682 EXPORT_PC() @ resolve() could throw 2683 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2684 bl dvmResolveInstField @ r0<- resolved InstField ptr 2685 cmp r0, #0 @ success? 2686 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2687 b common_exceptionThrown 2688 2689 2690/* ------------------------------ */ 2691 .balign 64 2692.L_OP_IPUT_BYTE: /* 0x5d */ 2693/* File: armv5te/OP_IPUT_BYTE.S */ 2694@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2695/* File: armv5te/OP_IPUT.S */ 2696 /* 2697 * General 32-bit instance field put. 2698 * 2699 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2700 */ 2701 /* op vA, vB, field@CCCC */ 2702 mov r0, rINST, lsr #12 @ r0<- B 2703 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2704 FETCH(r1, 1) @ r1<- field ref CCCC 2705 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2706 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2707 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2708 cmp r0, #0 @ is resolved entry null? 2709 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27108: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2711 EXPORT_PC() @ resolve() could throw 2712 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2713 bl dvmResolveInstField @ r0<- resolved InstField ptr 2714 cmp r0, #0 @ success? 2715 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2716 b common_exceptionThrown 2717 2718 2719/* ------------------------------ */ 2720 .balign 64 2721.L_OP_IPUT_CHAR: /* 0x5e */ 2722/* File: armv5te/OP_IPUT_CHAR.S */ 2723@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2724/* File: armv5te/OP_IPUT.S */ 2725 /* 2726 * General 32-bit instance field put. 2727 * 2728 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2729 */ 2730 /* op vA, vB, field@CCCC */ 2731 mov r0, rINST, lsr #12 @ r0<- B 2732 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2733 FETCH(r1, 1) @ r1<- field ref CCCC 2734 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2735 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2736 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2737 cmp r0, #0 @ is resolved entry null? 2738 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27398: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2740 EXPORT_PC() @ resolve() could throw 2741 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2742 bl dvmResolveInstField @ r0<- resolved InstField ptr 2743 cmp r0, #0 @ success? 2744 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2745 b common_exceptionThrown 2746 2747 2748/* ------------------------------ */ 2749 .balign 64 2750.L_OP_IPUT_SHORT: /* 0x5f */ 2751/* File: armv5te/OP_IPUT_SHORT.S */ 2752@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2753/* File: armv5te/OP_IPUT.S */ 2754 /* 2755 * General 32-bit instance field put. 2756 * 2757 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2758 */ 2759 /* op vA, vB, field@CCCC */ 2760 mov r0, rINST, lsr #12 @ r0<- B 2761 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2762 FETCH(r1, 1) @ r1<- field ref CCCC 2763 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2764 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2765 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2766 cmp r0, #0 @ is resolved entry null? 2767 bne .LOP_IPUT_SHORT_finish @ no, already resolved 27688: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2769 EXPORT_PC() @ resolve() could throw 2770 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2771 bl dvmResolveInstField @ r0<- resolved InstField ptr 2772 cmp r0, #0 @ success? 2773 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2774 b common_exceptionThrown 2775 2776 2777/* ------------------------------ */ 2778 .balign 64 2779.L_OP_SGET: /* 0x60 */ 2780/* File: armv5te/OP_SGET.S */ 2781 /* 2782 * General 32-bit SGET handler. 2783 * 2784 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2785 */ 2786 /* op vAA, field@BBBB */ 2787 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2788 FETCH(r1, 1) @ r1<- field ref BBBB 2789 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2790 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2791 cmp r0, #0 @ is resolved entry null? 2792 beq .LOP_SGET_resolve @ yes, do resolve 2793.LOP_SGET_finish: @ field ptr in r0 2794 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2795 @ no-op @ acquiring load 2796 mov r2, rINST, lsr #8 @ r2<- AA 2797 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2798 SET_VREG(r1, r2) @ fp[AA]<- r1 2799 GET_INST_OPCODE(ip) @ extract opcode from rINST 2800 GOTO_OPCODE(ip) @ jump to next instruction 2801 2802/* ------------------------------ */ 2803 .balign 64 2804.L_OP_SGET_WIDE: /* 0x61 */ 2805/* File: armv5te/OP_SGET_WIDE.S */ 2806 /* 2807 * 64-bit SGET handler. 2808 */ 2809 /* sget-wide vAA, field@BBBB */ 2810 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2811 FETCH(r1, 1) @ r1<- field ref BBBB 2812 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2813 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2814 cmp r0, #0 @ is resolved entry null? 2815 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2816.LOP_SGET_WIDE_finish: 2817 mov r9, rINST, lsr #8 @ r9<- AA 2818 .if 0 2819 add r0, r0, #offStaticField_value @ r0<- pointer to data 2820 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 2821 .else 2822 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2823 .endif 2824 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2825 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2826 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2827 GET_INST_OPCODE(ip) @ extract opcode from rINST 2828 GOTO_OPCODE(ip) @ jump to next instruction 2829 2830/* ------------------------------ */ 2831 .balign 64 2832.L_OP_SGET_OBJECT: /* 0x62 */ 2833/* File: armv5te/OP_SGET_OBJECT.S */ 2834/* File: armv5te/OP_SGET.S */ 2835 /* 2836 * General 32-bit SGET handler. 2837 * 2838 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2839 */ 2840 /* op vAA, field@BBBB */ 2841 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2842 FETCH(r1, 1) @ r1<- field ref BBBB 2843 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2844 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2845 cmp r0, #0 @ is resolved entry null? 2846 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2847.LOP_SGET_OBJECT_finish: @ field ptr in r0 2848 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2849 @ no-op @ acquiring load 2850 mov r2, rINST, lsr #8 @ r2<- AA 2851 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2852 SET_VREG(r1, r2) @ fp[AA]<- r1 2853 GET_INST_OPCODE(ip) @ extract opcode from rINST 2854 GOTO_OPCODE(ip) @ jump to next instruction 2855 2856 2857/* ------------------------------ */ 2858 .balign 64 2859.L_OP_SGET_BOOLEAN: /* 0x63 */ 2860/* File: armv5te/OP_SGET_BOOLEAN.S */ 2861/* File: armv5te/OP_SGET.S */ 2862 /* 2863 * General 32-bit SGET handler. 2864 * 2865 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2866 */ 2867 /* op vAA, field@BBBB */ 2868 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2869 FETCH(r1, 1) @ r1<- field ref BBBB 2870 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2871 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2872 cmp r0, #0 @ is resolved entry null? 2873 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2874.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2875 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2876 @ no-op @ acquiring load 2877 mov r2, rINST, lsr #8 @ r2<- AA 2878 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2879 SET_VREG(r1, r2) @ fp[AA]<- r1 2880 GET_INST_OPCODE(ip) @ extract opcode from rINST 2881 GOTO_OPCODE(ip) @ jump to next instruction 2882 2883 2884/* ------------------------------ */ 2885 .balign 64 2886.L_OP_SGET_BYTE: /* 0x64 */ 2887/* File: armv5te/OP_SGET_BYTE.S */ 2888/* File: armv5te/OP_SGET.S */ 2889 /* 2890 * General 32-bit SGET handler. 2891 * 2892 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2893 */ 2894 /* op vAA, field@BBBB */ 2895 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2896 FETCH(r1, 1) @ r1<- field ref BBBB 2897 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2898 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2899 cmp r0, #0 @ is resolved entry null? 2900 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2901.LOP_SGET_BYTE_finish: @ field ptr in r0 2902 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2903 @ no-op @ acquiring load 2904 mov r2, rINST, lsr #8 @ r2<- AA 2905 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2906 SET_VREG(r1, r2) @ fp[AA]<- r1 2907 GET_INST_OPCODE(ip) @ extract opcode from rINST 2908 GOTO_OPCODE(ip) @ jump to next instruction 2909 2910 2911/* ------------------------------ */ 2912 .balign 64 2913.L_OP_SGET_CHAR: /* 0x65 */ 2914/* File: armv5te/OP_SGET_CHAR.S */ 2915/* File: armv5te/OP_SGET.S */ 2916 /* 2917 * General 32-bit SGET handler. 2918 * 2919 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2920 */ 2921 /* op vAA, field@BBBB */ 2922 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2923 FETCH(r1, 1) @ r1<- field ref BBBB 2924 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2925 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2926 cmp r0, #0 @ is resolved entry null? 2927 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2928.LOP_SGET_CHAR_finish: @ field ptr in r0 2929 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2930 @ no-op @ acquiring load 2931 mov r2, rINST, lsr #8 @ r2<- AA 2932 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2933 SET_VREG(r1, r2) @ fp[AA]<- r1 2934 GET_INST_OPCODE(ip) @ extract opcode from rINST 2935 GOTO_OPCODE(ip) @ jump to next instruction 2936 2937 2938/* ------------------------------ */ 2939 .balign 64 2940.L_OP_SGET_SHORT: /* 0x66 */ 2941/* File: armv5te/OP_SGET_SHORT.S */ 2942/* File: armv5te/OP_SGET.S */ 2943 /* 2944 * General 32-bit SGET handler. 2945 * 2946 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2947 */ 2948 /* op vAA, field@BBBB */ 2949 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2950 FETCH(r1, 1) @ r1<- field ref BBBB 2951 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2952 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2953 cmp r0, #0 @ is resolved entry null? 2954 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2955.LOP_SGET_SHORT_finish: @ field ptr in r0 2956 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2957 @ no-op @ acquiring load 2958 mov r2, rINST, lsr #8 @ r2<- AA 2959 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2960 SET_VREG(r1, r2) @ fp[AA]<- r1 2961 GET_INST_OPCODE(ip) @ extract opcode from rINST 2962 GOTO_OPCODE(ip) @ jump to next instruction 2963 2964 2965/* ------------------------------ */ 2966 .balign 64 2967.L_OP_SPUT: /* 0x67 */ 2968/* File: armv5te/OP_SPUT.S */ 2969 /* 2970 * General 32-bit SPUT handler. 2971 * 2972 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 2973 */ 2974 /* op vAA, field@BBBB */ 2975 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2976 FETCH(r1, 1) @ r1<- field ref BBBB 2977 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2978 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2979 cmp r0, #0 @ is resolved entry null? 2980 beq .LOP_SPUT_resolve @ yes, do resolve 2981.LOP_SPUT_finish: @ field ptr in r0 2982 mov r2, rINST, lsr #8 @ r2<- AA 2983 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2984 GET_VREG(r1, r2) @ r1<- fp[AA] 2985 GET_INST_OPCODE(ip) @ extract opcode from rINST 2986 @ no-op @ releasing store 2987 str r1, [r0, #offStaticField_value] @ field<- vAA 2988 GOTO_OPCODE(ip) @ jump to next instruction 2989 2990/* ------------------------------ */ 2991 .balign 64 2992.L_OP_SPUT_WIDE: /* 0x68 */ 2993/* File: armv5te/OP_SPUT_WIDE.S */ 2994 /* 2995 * 64-bit SPUT handler. 2996 */ 2997 /* sput-wide vAA, field@BBBB */ 2998 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 2999 FETCH(r1, 1) @ r1<- field ref BBBB 3000 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 3001 mov r9, rINST, lsr #8 @ r9<- AA 3002 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 3003 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3004 cmp r2, #0 @ is resolved entry null? 3005 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3006.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 3007 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3008 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 3009 GET_INST_OPCODE(r10) @ extract opcode from rINST 3010 .if 0 3011 add r2, r2, #offStaticField_value @ r2<- pointer to data 3012 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 3013 .else 3014 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3015 .endif 3016 GOTO_OPCODE(r10) @ jump to next instruction 3017 3018/* ------------------------------ */ 3019 .balign 64 3020.L_OP_SPUT_OBJECT: /* 0x69 */ 3021/* File: armv5te/OP_SPUT_OBJECT.S */ 3022 /* 3023 * 32-bit SPUT handler for objects 3024 * 3025 * for: sput-object, sput-object-volatile 3026 */ 3027 /* op vAA, field@BBBB */ 3028 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3029 FETCH(r1, 1) @ r1<- field ref BBBB 3030 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3031 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3032 cmp r0, #0 @ is resolved entry null? 3033 bne .LOP_SPUT_OBJECT_finish @ no, continue 3034 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 3035 EXPORT_PC() @ resolve() could throw, so export now 3036 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 3037 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 3038 cmp r0, #0 @ success? 3039 bne .LOP_SPUT_OBJECT_finish @ yes, finish 3040 b common_exceptionThrown @ no, handle exception 3041 3042 3043/* ------------------------------ */ 3044 .balign 64 3045.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3046/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3047/* File: armv5te/OP_SPUT.S */ 3048 /* 3049 * General 32-bit SPUT handler. 3050 * 3051 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3052 */ 3053 /* op vAA, field@BBBB */ 3054 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3055 FETCH(r1, 1) @ r1<- field ref BBBB 3056 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3057 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3058 cmp r0, #0 @ is resolved entry null? 3059 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3060.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3061 mov r2, rINST, lsr #8 @ r2<- AA 3062 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3063 GET_VREG(r1, r2) @ r1<- fp[AA] 3064 GET_INST_OPCODE(ip) @ extract opcode from rINST 3065 @ no-op @ releasing store 3066 str r1, [r0, #offStaticField_value] @ field<- vAA 3067 GOTO_OPCODE(ip) @ jump to next instruction 3068 3069 3070/* ------------------------------ */ 3071 .balign 64 3072.L_OP_SPUT_BYTE: /* 0x6b */ 3073/* File: armv5te/OP_SPUT_BYTE.S */ 3074/* File: armv5te/OP_SPUT.S */ 3075 /* 3076 * General 32-bit SPUT handler. 3077 * 3078 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3079 */ 3080 /* op vAA, field@BBBB */ 3081 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3082 FETCH(r1, 1) @ r1<- field ref BBBB 3083 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3084 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3085 cmp r0, #0 @ is resolved entry null? 3086 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3087.LOP_SPUT_BYTE_finish: @ field ptr in r0 3088 mov r2, rINST, lsr #8 @ r2<- AA 3089 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3090 GET_VREG(r1, r2) @ r1<- fp[AA] 3091 GET_INST_OPCODE(ip) @ extract opcode from rINST 3092 @ no-op @ releasing store 3093 str r1, [r0, #offStaticField_value] @ field<- vAA 3094 GOTO_OPCODE(ip) @ jump to next instruction 3095 3096 3097/* ------------------------------ */ 3098 .balign 64 3099.L_OP_SPUT_CHAR: /* 0x6c */ 3100/* File: armv5te/OP_SPUT_CHAR.S */ 3101/* File: armv5te/OP_SPUT.S */ 3102 /* 3103 * General 32-bit SPUT handler. 3104 * 3105 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3106 */ 3107 /* op vAA, field@BBBB */ 3108 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3109 FETCH(r1, 1) @ r1<- field ref BBBB 3110 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3111 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3112 cmp r0, #0 @ is resolved entry null? 3113 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3114.LOP_SPUT_CHAR_finish: @ field ptr in r0 3115 mov r2, rINST, lsr #8 @ r2<- AA 3116 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3117 GET_VREG(r1, r2) @ r1<- fp[AA] 3118 GET_INST_OPCODE(ip) @ extract opcode from rINST 3119 @ no-op @ releasing store 3120 str r1, [r0, #offStaticField_value] @ field<- vAA 3121 GOTO_OPCODE(ip) @ jump to next instruction 3122 3123 3124/* ------------------------------ */ 3125 .balign 64 3126.L_OP_SPUT_SHORT: /* 0x6d */ 3127/* File: armv5te/OP_SPUT_SHORT.S */ 3128/* File: armv5te/OP_SPUT.S */ 3129 /* 3130 * General 32-bit SPUT handler. 3131 * 3132 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3133 */ 3134 /* op vAA, field@BBBB */ 3135 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3136 FETCH(r1, 1) @ r1<- field ref BBBB 3137 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3138 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3139 cmp r0, #0 @ is resolved entry null? 3140 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3141.LOP_SPUT_SHORT_finish: @ field ptr in r0 3142 mov r2, rINST, lsr #8 @ r2<- AA 3143 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3144 GET_VREG(r1, r2) @ r1<- fp[AA] 3145 GET_INST_OPCODE(ip) @ extract opcode from rINST 3146 @ no-op @ releasing store 3147 str r1, [r0, #offStaticField_value] @ field<- vAA 3148 GOTO_OPCODE(ip) @ jump to next instruction 3149 3150 3151/* ------------------------------ */ 3152 .balign 64 3153.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3154/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3155 /* 3156 * Handle a virtual method call. 3157 * 3158 * for: invoke-virtual, invoke-virtual/range 3159 */ 3160 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3161 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3162 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3163 FETCH(r1, 1) @ r1<- BBBB 3164 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3165 FETCH(r10, 2) @ r10<- GFED or CCCC 3166 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3167 .if (!0) 3168 and r10, r10, #15 @ r10<- D (or stays CCCC) 3169 .endif 3170 cmp r0, #0 @ already resolved? 3171 EXPORT_PC() @ must export for invoke 3172 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3173 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3174 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3175 mov r2, #METHOD_VIRTUAL @ resolver method type 3176 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3177 cmp r0, #0 @ got null? 3178 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3179 b common_exceptionThrown @ yes, handle exception 3180 3181/* ------------------------------ */ 3182 .balign 64 3183.L_OP_INVOKE_SUPER: /* 0x6f */ 3184/* File: armv5te/OP_INVOKE_SUPER.S */ 3185 /* 3186 * Handle a "super" method call. 3187 * 3188 * for: invoke-super, invoke-super/range 3189 */ 3190 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3191 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3192 FETCH(r10, 2) @ r10<- GFED or CCCC 3193 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3194 .if (!0) 3195 and r10, r10, #15 @ r10<- D (or stays CCCC) 3196 .endif 3197 FETCH(r1, 1) @ r1<- BBBB 3198 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3199 GET_VREG(r2, r10) @ r2<- "this" ptr 3200 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3201 cmp r2, #0 @ null "this"? 3202 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3203 beq common_errNullObject @ null "this", throw exception 3204 cmp r0, #0 @ already resolved? 3205 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3206 EXPORT_PC() @ must export for invoke 3207 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3208 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3209 3210/* ------------------------------ */ 3211 .balign 64 3212.L_OP_INVOKE_DIRECT: /* 0x70 */ 3213/* File: armv5te/OP_INVOKE_DIRECT.S */ 3214 /* 3215 * Handle a direct method call. 3216 * 3217 * (We could defer the "is 'this' pointer null" test to the common 3218 * method invocation code, and use a flag to indicate that static 3219 * calls don't count. If we do this as part of copying the arguments 3220 * out we could avoiding loading the first arg twice.) 3221 * 3222 * for: invoke-direct, invoke-direct/range 3223 */ 3224 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3225 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3226 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3227 FETCH(r1, 1) @ r1<- BBBB 3228 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3229 FETCH(r10, 2) @ r10<- GFED or CCCC 3230 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3231 .if (!0) 3232 and r10, r10, #15 @ r10<- D (or stays CCCC) 3233 .endif 3234 cmp r0, #0 @ already resolved? 3235 EXPORT_PC() @ must export for invoke 3236 GET_VREG(r2, r10) @ r2<- "this" ptr 3237 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3238.LOP_INVOKE_DIRECT_finish: 3239 cmp r2, #0 @ null "this" ref? 3240 bne common_invokeMethodNoRange @ no, continue on 3241 b common_errNullObject @ yes, throw exception 3242 3243/* ------------------------------ */ 3244 .balign 64 3245.L_OP_INVOKE_STATIC: /* 0x71 */ 3246/* File: armv5te/OP_INVOKE_STATIC.S */ 3247 /* 3248 * Handle a static method call. 3249 * 3250 * for: invoke-static, invoke-static/range 3251 */ 3252 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3253 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3254 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3255 FETCH(r1, 1) @ r1<- BBBB 3256 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3257 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3258 cmp r0, #0 @ already resolved? 3259 EXPORT_PC() @ must export for invoke 3260 bne common_invokeMethodNoRange @ yes, continue on 32610: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3262 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3263 mov r2, #METHOD_STATIC @ resolver method type 3264 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3265 cmp r0, #0 @ got null? 3266 bne common_invokeMethodNoRange @ no, continue 3267 b common_exceptionThrown @ yes, handle exception 3268 3269/* ------------------------------ */ 3270 .balign 64 3271.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3272/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3273 /* 3274 * Handle an interface method call. 3275 * 3276 * for: invoke-interface, invoke-interface/range 3277 */ 3278 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3279 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3280 FETCH(r2, 2) @ r2<- FEDC or CCCC 3281 FETCH(r1, 1) @ r1<- BBBB 3282 .if (!0) 3283 and r2, r2, #15 @ r2<- C (or stays CCCC) 3284 .endif 3285 EXPORT_PC() @ must export for invoke 3286 GET_VREG(r0, r2) @ r0<- first arg ("this") 3287 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3288 cmp r0, #0 @ null obj? 3289 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3290 beq common_errNullObject @ yes, fail 3291 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3292 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3293 cmp r0, #0 @ failed? 3294 beq common_exceptionThrown @ yes, handle exception 3295 b common_invokeMethodNoRange @ jump to common handler 3296 3297/* ------------------------------ */ 3298 .balign 64 3299.L_OP_UNUSED_73: /* 0x73 */ 3300/* File: armv5te/OP_UNUSED_73.S */ 3301/* File: armv5te/unused.S */ 3302 bl common_abort 3303 3304 3305/* ------------------------------ */ 3306 .balign 64 3307.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3308/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3309/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3310 /* 3311 * Handle a virtual method call. 3312 * 3313 * for: invoke-virtual, invoke-virtual/range 3314 */ 3315 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3316 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3317 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3318 FETCH(r1, 1) @ r1<- BBBB 3319 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3320 FETCH(r10, 2) @ r10<- GFED or CCCC 3321 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3322 .if (!1) 3323 and r10, r10, #15 @ r10<- D (or stays CCCC) 3324 .endif 3325 cmp r0, #0 @ already resolved? 3326 EXPORT_PC() @ must export for invoke 3327 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3328 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3329 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3330 mov r2, #METHOD_VIRTUAL @ resolver method type 3331 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3332 cmp r0, #0 @ got null? 3333 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3334 b common_exceptionThrown @ yes, handle exception 3335 3336 3337/* ------------------------------ */ 3338 .balign 64 3339.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3340/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3341/* File: armv5te/OP_INVOKE_SUPER.S */ 3342 /* 3343 * Handle a "super" method call. 3344 * 3345 * for: invoke-super, invoke-super/range 3346 */ 3347 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3348 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3349 FETCH(r10, 2) @ r10<- GFED or CCCC 3350 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3351 .if (!1) 3352 and r10, r10, #15 @ r10<- D (or stays CCCC) 3353 .endif 3354 FETCH(r1, 1) @ r1<- BBBB 3355 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3356 GET_VREG(r2, r10) @ r2<- "this" ptr 3357 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3358 cmp r2, #0 @ null "this"? 3359 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3360 beq common_errNullObject @ null "this", throw exception 3361 cmp r0, #0 @ already resolved? 3362 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3363 EXPORT_PC() @ must export for invoke 3364 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3365 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3366 3367 3368/* ------------------------------ */ 3369 .balign 64 3370.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3371/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3372/* File: armv5te/OP_INVOKE_DIRECT.S */ 3373 /* 3374 * Handle a direct method call. 3375 * 3376 * (We could defer the "is 'this' pointer null" test to the common 3377 * method invocation code, and use a flag to indicate that static 3378 * calls don't count. If we do this as part of copying the arguments 3379 * out we could avoiding loading the first arg twice.) 3380 * 3381 * for: invoke-direct, invoke-direct/range 3382 */ 3383 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3384 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3385 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3386 FETCH(r1, 1) @ r1<- BBBB 3387 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3388 FETCH(r10, 2) @ r10<- GFED or CCCC 3389 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3390 .if (!1) 3391 and r10, r10, #15 @ r10<- D (or stays CCCC) 3392 .endif 3393 cmp r0, #0 @ already resolved? 3394 EXPORT_PC() @ must export for invoke 3395 GET_VREG(r2, r10) @ r2<- "this" ptr 3396 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3397.LOP_INVOKE_DIRECT_RANGE_finish: 3398 cmp r2, #0 @ null "this" ref? 3399 bne common_invokeMethodRange @ no, continue on 3400 b common_errNullObject @ yes, throw exception 3401 3402 3403/* ------------------------------ */ 3404 .balign 64 3405.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3406/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3407/* File: armv5te/OP_INVOKE_STATIC.S */ 3408 /* 3409 * Handle a static method call. 3410 * 3411 * for: invoke-static, invoke-static/range 3412 */ 3413 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3414 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3415 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3416 FETCH(r1, 1) @ r1<- BBBB 3417 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3418 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3419 cmp r0, #0 @ already resolved? 3420 EXPORT_PC() @ must export for invoke 3421 bne common_invokeMethodRange @ yes, continue on 34220: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3423 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3424 mov r2, #METHOD_STATIC @ resolver method type 3425 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3426 cmp r0, #0 @ got null? 3427 bne common_invokeMethodRange @ no, continue 3428 b common_exceptionThrown @ yes, handle exception 3429 3430 3431/* ------------------------------ */ 3432 .balign 64 3433.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3434/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3435/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3436 /* 3437 * Handle an interface method call. 3438 * 3439 * for: invoke-interface, invoke-interface/range 3440 */ 3441 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3442 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3443 FETCH(r2, 2) @ r2<- FEDC or CCCC 3444 FETCH(r1, 1) @ r1<- BBBB 3445 .if (!1) 3446 and r2, r2, #15 @ r2<- C (or stays CCCC) 3447 .endif 3448 EXPORT_PC() @ must export for invoke 3449 GET_VREG(r0, r2) @ r0<- first arg ("this") 3450 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3451 cmp r0, #0 @ null obj? 3452 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3453 beq common_errNullObject @ yes, fail 3454 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3455 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3456 cmp r0, #0 @ failed? 3457 beq common_exceptionThrown @ yes, handle exception 3458 b common_invokeMethodRange @ jump to common handler 3459 3460 3461/* ------------------------------ */ 3462 .balign 64 3463.L_OP_UNUSED_79: /* 0x79 */ 3464/* File: armv5te/OP_UNUSED_79.S */ 3465/* File: armv5te/unused.S */ 3466 bl common_abort 3467 3468 3469/* ------------------------------ */ 3470 .balign 64 3471.L_OP_UNUSED_7A: /* 0x7a */ 3472/* File: armv5te/OP_UNUSED_7A.S */ 3473/* File: armv5te/unused.S */ 3474 bl common_abort 3475 3476 3477/* ------------------------------ */ 3478 .balign 64 3479.L_OP_NEG_INT: /* 0x7b */ 3480/* File: armv5te/OP_NEG_INT.S */ 3481/* File: armv5te/unop.S */ 3482 /* 3483 * Generic 32-bit unary operation. Provide an "instr" line that 3484 * specifies an instruction that performs "result = op r0". 3485 * This could be an ARM instruction or a function call. 3486 * 3487 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3488 * int-to-byte, int-to-char, int-to-short 3489 */ 3490 /* unop vA, vB */ 3491 mov r3, rINST, lsr #12 @ r3<- B 3492 mov r9, rINST, lsr #8 @ r9<- A+ 3493 GET_VREG(r0, r3) @ r0<- vB 3494 and r9, r9, #15 3495 @ optional op; may set condition codes 3496 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3497 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3498 GET_INST_OPCODE(ip) @ extract opcode from rINST 3499 SET_VREG(r0, r9) @ vAA<- r0 3500 GOTO_OPCODE(ip) @ jump to next instruction 3501 /* 9-10 instructions */ 3502 3503 3504/* ------------------------------ */ 3505 .balign 64 3506.L_OP_NOT_INT: /* 0x7c */ 3507/* File: armv5te/OP_NOT_INT.S */ 3508/* File: armv5te/unop.S */ 3509 /* 3510 * Generic 32-bit unary operation. Provide an "instr" line that 3511 * specifies an instruction that performs "result = op r0". 3512 * This could be an ARM instruction or a function call. 3513 * 3514 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3515 * int-to-byte, int-to-char, int-to-short 3516 */ 3517 /* unop vA, vB */ 3518 mov r3, rINST, lsr #12 @ r3<- B 3519 mov r9, rINST, lsr #8 @ r9<- A+ 3520 GET_VREG(r0, r3) @ r0<- vB 3521 and r9, r9, #15 3522 @ optional op; may set condition codes 3523 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3524 mvn r0, r0 @ r0<- op, r0-r3 changed 3525 GET_INST_OPCODE(ip) @ extract opcode from rINST 3526 SET_VREG(r0, r9) @ vAA<- r0 3527 GOTO_OPCODE(ip) @ jump to next instruction 3528 /* 9-10 instructions */ 3529 3530 3531/* ------------------------------ */ 3532 .balign 64 3533.L_OP_NEG_LONG: /* 0x7d */ 3534/* File: armv5te/OP_NEG_LONG.S */ 3535/* File: armv5te/unopWide.S */ 3536 /* 3537 * Generic 64-bit unary operation. Provide an "instr" line that 3538 * specifies an instruction that performs "result = op r0/r1". 3539 * This could be an ARM instruction or a function call. 3540 * 3541 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3542 */ 3543 /* unop vA, vB */ 3544 mov r9, rINST, lsr #8 @ r9<- A+ 3545 mov r3, rINST, lsr #12 @ r3<- B 3546 and r9, r9, #15 3547 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3548 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3549 ldmia r3, {r0-r1} @ r0/r1<- vAA 3550 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3551 rsbs r0, r0, #0 @ optional op; may set condition codes 3552 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3553 GET_INST_OPCODE(ip) @ extract opcode from rINST 3554 stmia r9, {r0-r1} @ vAA<- r0/r1 3555 GOTO_OPCODE(ip) @ jump to next instruction 3556 /* 12-13 instructions */ 3557 3558 3559/* ------------------------------ */ 3560 .balign 64 3561.L_OP_NOT_LONG: /* 0x7e */ 3562/* File: armv5te/OP_NOT_LONG.S */ 3563/* File: armv5te/unopWide.S */ 3564 /* 3565 * Generic 64-bit unary operation. Provide an "instr" line that 3566 * specifies an instruction that performs "result = op r0/r1". 3567 * This could be an ARM instruction or a function call. 3568 * 3569 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3570 */ 3571 /* unop vA, vB */ 3572 mov r9, rINST, lsr #8 @ r9<- A+ 3573 mov r3, rINST, lsr #12 @ r3<- B 3574 and r9, r9, #15 3575 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3576 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3577 ldmia r3, {r0-r1} @ r0/r1<- vAA 3578 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3579 mvn r0, r0 @ optional op; may set condition codes 3580 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3581 GET_INST_OPCODE(ip) @ extract opcode from rINST 3582 stmia r9, {r0-r1} @ vAA<- r0/r1 3583 GOTO_OPCODE(ip) @ jump to next instruction 3584 /* 12-13 instructions */ 3585 3586 3587/* ------------------------------ */ 3588 .balign 64 3589.L_OP_NEG_FLOAT: /* 0x7f */ 3590/* File: armv5te/OP_NEG_FLOAT.S */ 3591/* File: armv5te/unop.S */ 3592 /* 3593 * Generic 32-bit unary operation. Provide an "instr" line that 3594 * specifies an instruction that performs "result = op r0". 3595 * This could be an ARM instruction or a function call. 3596 * 3597 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3598 * int-to-byte, int-to-char, int-to-short 3599 */ 3600 /* unop vA, vB */ 3601 mov r3, rINST, lsr #12 @ r3<- B 3602 mov r9, rINST, lsr #8 @ r9<- A+ 3603 GET_VREG(r0, r3) @ r0<- vB 3604 and r9, r9, #15 3605 @ optional op; may set condition codes 3606 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3607 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3608 GET_INST_OPCODE(ip) @ extract opcode from rINST 3609 SET_VREG(r0, r9) @ vAA<- r0 3610 GOTO_OPCODE(ip) @ jump to next instruction 3611 /* 9-10 instructions */ 3612 3613 3614/* ------------------------------ */ 3615 .balign 64 3616.L_OP_NEG_DOUBLE: /* 0x80 */ 3617/* File: armv5te/OP_NEG_DOUBLE.S */ 3618/* File: armv5te/unopWide.S */ 3619 /* 3620 * Generic 64-bit unary operation. Provide an "instr" line that 3621 * specifies an instruction that performs "result = op r0/r1". 3622 * This could be an ARM instruction or a function call. 3623 * 3624 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3625 */ 3626 /* unop vA, vB */ 3627 mov r9, rINST, lsr #8 @ r9<- A+ 3628 mov r3, rINST, lsr #12 @ r3<- B 3629 and r9, r9, #15 3630 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3631 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3632 ldmia r3, {r0-r1} @ r0/r1<- vAA 3633 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3634 @ optional op; may set condition codes 3635 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3636 GET_INST_OPCODE(ip) @ extract opcode from rINST 3637 stmia r9, {r0-r1} @ vAA<- r0/r1 3638 GOTO_OPCODE(ip) @ jump to next instruction 3639 /* 12-13 instructions */ 3640 3641 3642/* ------------------------------ */ 3643 .balign 64 3644.L_OP_INT_TO_LONG: /* 0x81 */ 3645/* File: armv5te/OP_INT_TO_LONG.S */ 3646/* File: armv5te/unopWider.S */ 3647 /* 3648 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3649 * that specifies an instruction that performs "result = op r0", where 3650 * "result" is a 64-bit quantity in r0/r1. 3651 * 3652 * For: int-to-long, int-to-double, float-to-long, float-to-double 3653 */ 3654 /* unop vA, vB */ 3655 mov r9, rINST, lsr #8 @ r9<- A+ 3656 mov r3, rINST, lsr #12 @ r3<- B 3657 and r9, r9, #15 3658 GET_VREG(r0, r3) @ r0<- vB 3659 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3660 @ optional op; may set condition codes 3661 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3662 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3663 GET_INST_OPCODE(ip) @ extract opcode from rINST 3664 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3665 GOTO_OPCODE(ip) @ jump to next instruction 3666 /* 10-11 instructions */ 3667 3668 3669/* ------------------------------ */ 3670 .balign 64 3671.L_OP_INT_TO_FLOAT: /* 0x82 */ 3672/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3673/* File: arm-vfp/funop.S */ 3674 /* 3675 * Generic 32-bit unary floating-point operation. Provide an "instr" 3676 * line that specifies an instruction that performs "s1 = op s0". 3677 * 3678 * for: int-to-float, float-to-int 3679 */ 3680 /* unop vA, vB */ 3681 mov r3, rINST, lsr #12 @ r3<- B 3682 mov r9, rINST, lsr #8 @ r9<- A+ 3683 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3684 flds s0, [r3] @ s0<- vB 3685 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3686 and r9, r9, #15 @ r9<- A 3687 fsitos s1, s0 @ s1<- op 3688 GET_INST_OPCODE(ip) @ extract opcode from rINST 3689 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3690 fsts s1, [r9] @ vA<- s1 3691 GOTO_OPCODE(ip) @ jump to next instruction 3692 3693 3694/* ------------------------------ */ 3695 .balign 64 3696.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3697/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3698/* File: arm-vfp/funopWider.S */ 3699 /* 3700 * Generic 32bit-to-64bit floating point unary operation. Provide an 3701 * "instr" line that specifies an instruction that performs "d0 = op s0". 3702 * 3703 * For: int-to-double, float-to-double 3704 */ 3705 /* unop vA, vB */ 3706 mov r3, rINST, lsr #12 @ r3<- B 3707 mov r9, rINST, lsr #8 @ r9<- A+ 3708 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3709 flds s0, [r3] @ s0<- vB 3710 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3711 and r9, r9, #15 @ r9<- A 3712 fsitod d0, s0 @ d0<- op 3713 GET_INST_OPCODE(ip) @ extract opcode from rINST 3714 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3715 fstd d0, [r9] @ vA<- d0 3716 GOTO_OPCODE(ip) @ jump to next instruction 3717 3718 3719/* ------------------------------ */ 3720 .balign 64 3721.L_OP_LONG_TO_INT: /* 0x84 */ 3722/* File: armv5te/OP_LONG_TO_INT.S */ 3723/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3724/* File: armv5te/OP_MOVE.S */ 3725 /* for move, move-object, long-to-int */ 3726 /* op vA, vB */ 3727 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3728 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3729 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3730 GET_VREG(r2, r1) @ r2<- fp[B] 3731 and r0, r0, #15 3732 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3733 SET_VREG(r2, r0) @ fp[A]<- r2 3734 GOTO_OPCODE(ip) @ execute next instruction 3735 3736 3737/* ------------------------------ */ 3738 .balign 64 3739.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3740/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3741/* File: armv5te/unopNarrower.S */ 3742 /* 3743 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3744 * that specifies an instruction that performs "result = op r0/r1", where 3745 * "result" is a 32-bit quantity in r0. 3746 * 3747 * For: long-to-float, double-to-int, double-to-float 3748 * 3749 * (This would work for long-to-int, but that instruction is actually 3750 * an exact match for OP_MOVE.) 3751 */ 3752 /* unop vA, vB */ 3753 mov r3, rINST, lsr #12 @ r3<- B 3754 mov r9, rINST, lsr #8 @ r9<- A+ 3755 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3756 and r9, r9, #15 3757 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3758 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3759 @ optional op; may set condition codes 3760 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3761 GET_INST_OPCODE(ip) @ extract opcode from rINST 3762 SET_VREG(r0, r9) @ vA<- r0 3763 GOTO_OPCODE(ip) @ jump to next instruction 3764 /* 10-11 instructions */ 3765 3766 3767/* ------------------------------ */ 3768 .balign 64 3769.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3770/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3771/* File: armv5te/unopWide.S */ 3772 /* 3773 * Generic 64-bit unary operation. Provide an "instr" line that 3774 * specifies an instruction that performs "result = op r0/r1". 3775 * This could be an ARM instruction or a function call. 3776 * 3777 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3778 */ 3779 /* unop vA, vB */ 3780 mov r9, rINST, lsr #8 @ r9<- A+ 3781 mov r3, rINST, lsr #12 @ r3<- B 3782 and r9, r9, #15 3783 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3784 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3785 ldmia r3, {r0-r1} @ r0/r1<- vAA 3786 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3787 @ optional op; may set condition codes 3788 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3789 GET_INST_OPCODE(ip) @ extract opcode from rINST 3790 stmia r9, {r0-r1} @ vAA<- r0/r1 3791 GOTO_OPCODE(ip) @ jump to next instruction 3792 /* 12-13 instructions */ 3793 3794 3795/* ------------------------------ */ 3796 .balign 64 3797.L_OP_FLOAT_TO_INT: /* 0x87 */ 3798/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3799/* File: arm-vfp/funop.S */ 3800 /* 3801 * Generic 32-bit unary floating-point operation. Provide an "instr" 3802 * line that specifies an instruction that performs "s1 = op s0". 3803 * 3804 * for: int-to-float, float-to-int 3805 */ 3806 /* unop vA, vB */ 3807 mov r3, rINST, lsr #12 @ r3<- B 3808 mov r9, rINST, lsr #8 @ r9<- A+ 3809 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3810 flds s0, [r3] @ s0<- vB 3811 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3812 and r9, r9, #15 @ r9<- A 3813 ftosizs s1, s0 @ s1<- op 3814 GET_INST_OPCODE(ip) @ extract opcode from rINST 3815 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3816 fsts s1, [r9] @ vA<- s1 3817 GOTO_OPCODE(ip) @ jump to next instruction 3818 3819 3820/* ------------------------------ */ 3821 .balign 64 3822.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3823/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3824@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3825/* File: armv5te/unopWider.S */ 3826 /* 3827 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3828 * that specifies an instruction that performs "result = op r0", where 3829 * "result" is a 64-bit quantity in r0/r1. 3830 * 3831 * For: int-to-long, int-to-double, float-to-long, float-to-double 3832 */ 3833 /* unop vA, vB */ 3834 mov r9, rINST, lsr #8 @ r9<- A+ 3835 mov r3, rINST, lsr #12 @ r3<- B 3836 and r9, r9, #15 3837 GET_VREG(r0, r3) @ r0<- vB 3838 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3839 @ optional op; may set condition codes 3840 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3841 bl f2l_doconv @ r0<- op, r0-r3 changed 3842 GET_INST_OPCODE(ip) @ extract opcode from rINST 3843 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3844 GOTO_OPCODE(ip) @ jump to next instruction 3845 /* 10-11 instructions */ 3846 3847 3848 3849/* ------------------------------ */ 3850 .balign 64 3851.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3852/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3853/* File: arm-vfp/funopWider.S */ 3854 /* 3855 * Generic 32bit-to-64bit floating point unary operation. Provide an 3856 * "instr" line that specifies an instruction that performs "d0 = op s0". 3857 * 3858 * For: int-to-double, float-to-double 3859 */ 3860 /* unop vA, vB */ 3861 mov r3, rINST, lsr #12 @ r3<- B 3862 mov r9, rINST, lsr #8 @ r9<- A+ 3863 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3864 flds s0, [r3] @ s0<- vB 3865 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3866 and r9, r9, #15 @ r9<- A 3867 fcvtds d0, s0 @ d0<- op 3868 GET_INST_OPCODE(ip) @ extract opcode from rINST 3869 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3870 fstd d0, [r9] @ vA<- d0 3871 GOTO_OPCODE(ip) @ jump to next instruction 3872 3873 3874/* ------------------------------ */ 3875 .balign 64 3876.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3877/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3878/* File: arm-vfp/funopNarrower.S */ 3879 /* 3880 * Generic 64bit-to-32bit unary floating point operation. Provide an 3881 * "instr" line that specifies an instruction that performs "s0 = op d0". 3882 * 3883 * For: double-to-int, double-to-float 3884 */ 3885 /* unop vA, vB */ 3886 mov r3, rINST, lsr #12 @ r3<- B 3887 mov r9, rINST, lsr #8 @ r9<- A+ 3888 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3889 fldd d0, [r3] @ d0<- vB 3890 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3891 and r9, r9, #15 @ r9<- A 3892 ftosizd s0, d0 @ s0<- op 3893 GET_INST_OPCODE(ip) @ extract opcode from rINST 3894 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3895 fsts s0, [r9] @ vA<- s0 3896 GOTO_OPCODE(ip) @ jump to next instruction 3897 3898 3899/* ------------------------------ */ 3900 .balign 64 3901.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3902/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 3903@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3904/* File: armv5te/unopWide.S */ 3905 /* 3906 * Generic 64-bit unary operation. Provide an "instr" line that 3907 * specifies an instruction that performs "result = op r0/r1". 3908 * This could be an ARM instruction or a function call. 3909 * 3910 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3911 */ 3912 /* unop vA, vB */ 3913 mov r9, rINST, lsr #8 @ r9<- A+ 3914 mov r3, rINST, lsr #12 @ r3<- B 3915 and r9, r9, #15 3916 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3917 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3918 ldmia r3, {r0-r1} @ r0/r1<- vAA 3919 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3920 @ optional op; may set condition codes 3921 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3922 GET_INST_OPCODE(ip) @ extract opcode from rINST 3923 stmia r9, {r0-r1} @ vAA<- r0/r1 3924 GOTO_OPCODE(ip) @ jump to next instruction 3925 /* 12-13 instructions */ 3926 3927 3928 3929/* ------------------------------ */ 3930 .balign 64 3931.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3932/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3933/* File: arm-vfp/funopNarrower.S */ 3934 /* 3935 * Generic 64bit-to-32bit unary floating point operation. Provide an 3936 * "instr" line that specifies an instruction that performs "s0 = op d0". 3937 * 3938 * For: double-to-int, double-to-float 3939 */ 3940 /* unop vA, vB */ 3941 mov r3, rINST, lsr #12 @ r3<- B 3942 mov r9, rINST, lsr #8 @ r9<- A+ 3943 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3944 fldd d0, [r3] @ d0<- vB 3945 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3946 and r9, r9, #15 @ r9<- A 3947 fcvtsd s0, d0 @ s0<- op 3948 GET_INST_OPCODE(ip) @ extract opcode from rINST 3949 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3950 fsts s0, [r9] @ vA<- s0 3951 GOTO_OPCODE(ip) @ jump to next instruction 3952 3953 3954/* ------------------------------ */ 3955 .balign 64 3956.L_OP_INT_TO_BYTE: /* 0x8d */ 3957/* File: armv5te/OP_INT_TO_BYTE.S */ 3958/* File: armv5te/unop.S */ 3959 /* 3960 * Generic 32-bit unary operation. Provide an "instr" line that 3961 * specifies an instruction that performs "result = op r0". 3962 * This could be an ARM instruction or a function call. 3963 * 3964 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3965 * int-to-byte, int-to-char, int-to-short 3966 */ 3967 /* unop vA, vB */ 3968 mov r3, rINST, lsr #12 @ r3<- B 3969 mov r9, rINST, lsr #8 @ r9<- A+ 3970 GET_VREG(r0, r3) @ r0<- vB 3971 and r9, r9, #15 3972 mov r0, r0, asl #24 @ optional op; may set condition codes 3973 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3974 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 3975 GET_INST_OPCODE(ip) @ extract opcode from rINST 3976 SET_VREG(r0, r9) @ vAA<- r0 3977 GOTO_OPCODE(ip) @ jump to next instruction 3978 /* 9-10 instructions */ 3979 3980 3981/* ------------------------------ */ 3982 .balign 64 3983.L_OP_INT_TO_CHAR: /* 0x8e */ 3984/* File: armv5te/OP_INT_TO_CHAR.S */ 3985/* File: armv5te/unop.S */ 3986 /* 3987 * Generic 32-bit unary operation. Provide an "instr" line that 3988 * specifies an instruction that performs "result = op r0". 3989 * This could be an ARM instruction or a function call. 3990 * 3991 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3992 * int-to-byte, int-to-char, int-to-short 3993 */ 3994 /* unop vA, vB */ 3995 mov r3, rINST, lsr #12 @ r3<- B 3996 mov r9, rINST, lsr #8 @ r9<- A+ 3997 GET_VREG(r0, r3) @ r0<- vB 3998 and r9, r9, #15 3999 mov r0, r0, asl #16 @ optional op; may set condition codes 4000 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4001 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4002 GET_INST_OPCODE(ip) @ extract opcode from rINST 4003 SET_VREG(r0, r9) @ vAA<- r0 4004 GOTO_OPCODE(ip) @ jump to next instruction 4005 /* 9-10 instructions */ 4006 4007 4008/* ------------------------------ */ 4009 .balign 64 4010.L_OP_INT_TO_SHORT: /* 0x8f */ 4011/* File: armv5te/OP_INT_TO_SHORT.S */ 4012/* File: armv5te/unop.S */ 4013 /* 4014 * Generic 32-bit unary operation. Provide an "instr" line that 4015 * specifies an instruction that performs "result = op r0". 4016 * This could be an ARM instruction or a function call. 4017 * 4018 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4019 * int-to-byte, int-to-char, int-to-short 4020 */ 4021 /* unop vA, vB */ 4022 mov r3, rINST, lsr #12 @ r3<- B 4023 mov r9, rINST, lsr #8 @ r9<- A+ 4024 GET_VREG(r0, r3) @ r0<- vB 4025 and r9, r9, #15 4026 mov r0, r0, asl #16 @ optional op; may set condition codes 4027 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4028 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4029 GET_INST_OPCODE(ip) @ extract opcode from rINST 4030 SET_VREG(r0, r9) @ vAA<- r0 4031 GOTO_OPCODE(ip) @ jump to next instruction 4032 /* 9-10 instructions */ 4033 4034 4035/* ------------------------------ */ 4036 .balign 64 4037.L_OP_ADD_INT: /* 0x90 */ 4038/* File: armv5te/OP_ADD_INT.S */ 4039/* File: armv5te/binop.S */ 4040 /* 4041 * Generic 32-bit binary operation. Provide an "instr" line that 4042 * specifies an instruction that performs "result = r0 op r1". 4043 * This could be an ARM instruction or a function call. (If the result 4044 * comes back in a register other than r0, you can override "result".) 4045 * 4046 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4047 * vCC (r1). Useful for integer division and modulus. Note that we 4048 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4049 * handles it correctly. 4050 * 4051 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4052 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4053 * mul-float, div-float, rem-float 4054 */ 4055 /* binop vAA, vBB, vCC */ 4056 FETCH(r0, 1) @ r0<- CCBB 4057 mov r9, rINST, lsr #8 @ r9<- AA 4058 mov r3, r0, lsr #8 @ r3<- CC 4059 and r2, r0, #255 @ r2<- BB 4060 GET_VREG(r1, r3) @ r1<- vCC 4061 GET_VREG(r0, r2) @ r0<- vBB 4062 .if 0 4063 cmp r1, #0 @ is second operand zero? 4064 beq common_errDivideByZero 4065 .endif 4066 4067 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4068 @ optional op; may set condition codes 4069 add r0, r0, r1 @ r0<- op, r0-r3 changed 4070 GET_INST_OPCODE(ip) @ extract opcode from rINST 4071 SET_VREG(r0, r9) @ vAA<- r0 4072 GOTO_OPCODE(ip) @ jump to next instruction 4073 /* 11-14 instructions */ 4074 4075 4076/* ------------------------------ */ 4077 .balign 64 4078.L_OP_SUB_INT: /* 0x91 */ 4079/* File: armv5te/OP_SUB_INT.S */ 4080/* File: armv5te/binop.S */ 4081 /* 4082 * Generic 32-bit binary operation. Provide an "instr" line that 4083 * specifies an instruction that performs "result = r0 op r1". 4084 * This could be an ARM instruction or a function call. (If the result 4085 * comes back in a register other than r0, you can override "result".) 4086 * 4087 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4088 * vCC (r1). Useful for integer division and modulus. Note that we 4089 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4090 * handles it correctly. 4091 * 4092 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4093 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4094 * mul-float, div-float, rem-float 4095 */ 4096 /* binop vAA, vBB, vCC */ 4097 FETCH(r0, 1) @ r0<- CCBB 4098 mov r9, rINST, lsr #8 @ r9<- AA 4099 mov r3, r0, lsr #8 @ r3<- CC 4100 and r2, r0, #255 @ r2<- BB 4101 GET_VREG(r1, r3) @ r1<- vCC 4102 GET_VREG(r0, r2) @ r0<- vBB 4103 .if 0 4104 cmp r1, #0 @ is second operand zero? 4105 beq common_errDivideByZero 4106 .endif 4107 4108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4109 @ optional op; may set condition codes 4110 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4111 GET_INST_OPCODE(ip) @ extract opcode from rINST 4112 SET_VREG(r0, r9) @ vAA<- r0 4113 GOTO_OPCODE(ip) @ jump to next instruction 4114 /* 11-14 instructions */ 4115 4116 4117/* ------------------------------ */ 4118 .balign 64 4119.L_OP_MUL_INT: /* 0x92 */ 4120/* File: armv5te/OP_MUL_INT.S */ 4121/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4122/* File: armv5te/binop.S */ 4123 /* 4124 * Generic 32-bit binary operation. Provide an "instr" line that 4125 * specifies an instruction that performs "result = r0 op r1". 4126 * This could be an ARM instruction or a function call. (If the result 4127 * comes back in a register other than r0, you can override "result".) 4128 * 4129 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4130 * vCC (r1). Useful for integer division and modulus. Note that we 4131 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4132 * handles it correctly. 4133 * 4134 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4135 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4136 * mul-float, div-float, rem-float 4137 */ 4138 /* binop vAA, vBB, vCC */ 4139 FETCH(r0, 1) @ r0<- CCBB 4140 mov r9, rINST, lsr #8 @ r9<- AA 4141 mov r3, r0, lsr #8 @ r3<- CC 4142 and r2, r0, #255 @ r2<- BB 4143 GET_VREG(r1, r3) @ r1<- vCC 4144 GET_VREG(r0, r2) @ r0<- vBB 4145 .if 0 4146 cmp r1, #0 @ is second operand zero? 4147 beq common_errDivideByZero 4148 .endif 4149 4150 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4151 @ optional op; may set condition codes 4152 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4153 GET_INST_OPCODE(ip) @ extract opcode from rINST 4154 SET_VREG(r0, r9) @ vAA<- r0 4155 GOTO_OPCODE(ip) @ jump to next instruction 4156 /* 11-14 instructions */ 4157 4158 4159/* ------------------------------ */ 4160 .balign 64 4161.L_OP_DIV_INT: /* 0x93 */ 4162/* File: armv5te/OP_DIV_INT.S */ 4163/* File: armv5te/binop.S */ 4164 /* 4165 * Generic 32-bit binary operation. Provide an "instr" line that 4166 * specifies an instruction that performs "result = r0 op r1". 4167 * This could be an ARM instruction or a function call. (If the result 4168 * comes back in a register other than r0, you can override "result".) 4169 * 4170 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4171 * vCC (r1). Useful for integer division and modulus. Note that we 4172 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4173 * handles it correctly. 4174 * 4175 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4176 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4177 * mul-float, div-float, rem-float 4178 */ 4179 /* binop vAA, vBB, vCC */ 4180 FETCH(r0, 1) @ r0<- CCBB 4181 mov r9, rINST, lsr #8 @ r9<- AA 4182 mov r3, r0, lsr #8 @ r3<- CC 4183 and r2, r0, #255 @ r2<- BB 4184 GET_VREG(r1, r3) @ r1<- vCC 4185 GET_VREG(r0, r2) @ r0<- vBB 4186 .if 1 4187 cmp r1, #0 @ is second operand zero? 4188 beq common_errDivideByZero 4189 .endif 4190 4191 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4192 @ optional op; may set condition codes 4193 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4194 GET_INST_OPCODE(ip) @ extract opcode from rINST 4195 SET_VREG(r0, r9) @ vAA<- r0 4196 GOTO_OPCODE(ip) @ jump to next instruction 4197 /* 11-14 instructions */ 4198 4199 4200/* ------------------------------ */ 4201 .balign 64 4202.L_OP_REM_INT: /* 0x94 */ 4203/* File: armv5te/OP_REM_INT.S */ 4204/* idivmod returns quotient in r0 and remainder in r1 */ 4205/* File: armv5te/binop.S */ 4206 /* 4207 * Generic 32-bit binary operation. Provide an "instr" line that 4208 * specifies an instruction that performs "result = r0 op r1". 4209 * This could be an ARM instruction or a function call. (If the result 4210 * comes back in a register other than r0, you can override "result".) 4211 * 4212 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4213 * vCC (r1). Useful for integer division and modulus. Note that we 4214 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4215 * handles it correctly. 4216 * 4217 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4218 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4219 * mul-float, div-float, rem-float 4220 */ 4221 /* binop vAA, vBB, vCC */ 4222 FETCH(r0, 1) @ r0<- CCBB 4223 mov r9, rINST, lsr #8 @ r9<- AA 4224 mov r3, r0, lsr #8 @ r3<- CC 4225 and r2, r0, #255 @ r2<- BB 4226 GET_VREG(r1, r3) @ r1<- vCC 4227 GET_VREG(r0, r2) @ r0<- vBB 4228 .if 1 4229 cmp r1, #0 @ is second operand zero? 4230 beq common_errDivideByZero 4231 .endif 4232 4233 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4234 @ optional op; may set condition codes 4235 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4236 GET_INST_OPCODE(ip) @ extract opcode from rINST 4237 SET_VREG(r1, r9) @ vAA<- r1 4238 GOTO_OPCODE(ip) @ jump to next instruction 4239 /* 11-14 instructions */ 4240 4241 4242/* ------------------------------ */ 4243 .balign 64 4244.L_OP_AND_INT: /* 0x95 */ 4245/* File: armv5te/OP_AND_INT.S */ 4246/* File: armv5te/binop.S */ 4247 /* 4248 * Generic 32-bit binary operation. Provide an "instr" line that 4249 * specifies an instruction that performs "result = r0 op r1". 4250 * This could be an ARM instruction or a function call. (If the result 4251 * comes back in a register other than r0, you can override "result".) 4252 * 4253 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4254 * vCC (r1). Useful for integer division and modulus. Note that we 4255 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4256 * handles it correctly. 4257 * 4258 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4259 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4260 * mul-float, div-float, rem-float 4261 */ 4262 /* binop vAA, vBB, vCC */ 4263 FETCH(r0, 1) @ r0<- CCBB 4264 mov r9, rINST, lsr #8 @ r9<- AA 4265 mov r3, r0, lsr #8 @ r3<- CC 4266 and r2, r0, #255 @ r2<- BB 4267 GET_VREG(r1, r3) @ r1<- vCC 4268 GET_VREG(r0, r2) @ r0<- vBB 4269 .if 0 4270 cmp r1, #0 @ is second operand zero? 4271 beq common_errDivideByZero 4272 .endif 4273 4274 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4275 @ optional op; may set condition codes 4276 and r0, r0, r1 @ r0<- op, r0-r3 changed 4277 GET_INST_OPCODE(ip) @ extract opcode from rINST 4278 SET_VREG(r0, r9) @ vAA<- r0 4279 GOTO_OPCODE(ip) @ jump to next instruction 4280 /* 11-14 instructions */ 4281 4282 4283/* ------------------------------ */ 4284 .balign 64 4285.L_OP_OR_INT: /* 0x96 */ 4286/* File: armv5te/OP_OR_INT.S */ 4287/* File: armv5te/binop.S */ 4288 /* 4289 * Generic 32-bit binary operation. Provide an "instr" line that 4290 * specifies an instruction that performs "result = r0 op r1". 4291 * This could be an ARM instruction or a function call. (If the result 4292 * comes back in a register other than r0, you can override "result".) 4293 * 4294 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4295 * vCC (r1). Useful for integer division and modulus. Note that we 4296 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4297 * handles it correctly. 4298 * 4299 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4300 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4301 * mul-float, div-float, rem-float 4302 */ 4303 /* binop vAA, vBB, vCC */ 4304 FETCH(r0, 1) @ r0<- CCBB 4305 mov r9, rINST, lsr #8 @ r9<- AA 4306 mov r3, r0, lsr #8 @ r3<- CC 4307 and r2, r0, #255 @ r2<- BB 4308 GET_VREG(r1, r3) @ r1<- vCC 4309 GET_VREG(r0, r2) @ r0<- vBB 4310 .if 0 4311 cmp r1, #0 @ is second operand zero? 4312 beq common_errDivideByZero 4313 .endif 4314 4315 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4316 @ optional op; may set condition codes 4317 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4318 GET_INST_OPCODE(ip) @ extract opcode from rINST 4319 SET_VREG(r0, r9) @ vAA<- r0 4320 GOTO_OPCODE(ip) @ jump to next instruction 4321 /* 11-14 instructions */ 4322 4323 4324/* ------------------------------ */ 4325 .balign 64 4326.L_OP_XOR_INT: /* 0x97 */ 4327/* File: armv5te/OP_XOR_INT.S */ 4328/* File: armv5te/binop.S */ 4329 /* 4330 * Generic 32-bit binary operation. Provide an "instr" line that 4331 * specifies an instruction that performs "result = r0 op r1". 4332 * This could be an ARM instruction or a function call. (If the result 4333 * comes back in a register other than r0, you can override "result".) 4334 * 4335 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4336 * vCC (r1). Useful for integer division and modulus. Note that we 4337 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4338 * handles it correctly. 4339 * 4340 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4341 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4342 * mul-float, div-float, rem-float 4343 */ 4344 /* binop vAA, vBB, vCC */ 4345 FETCH(r0, 1) @ r0<- CCBB 4346 mov r9, rINST, lsr #8 @ r9<- AA 4347 mov r3, r0, lsr #8 @ r3<- CC 4348 and r2, r0, #255 @ r2<- BB 4349 GET_VREG(r1, r3) @ r1<- vCC 4350 GET_VREG(r0, r2) @ r0<- vBB 4351 .if 0 4352 cmp r1, #0 @ is second operand zero? 4353 beq common_errDivideByZero 4354 .endif 4355 4356 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4357 @ optional op; may set condition codes 4358 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4359 GET_INST_OPCODE(ip) @ extract opcode from rINST 4360 SET_VREG(r0, r9) @ vAA<- r0 4361 GOTO_OPCODE(ip) @ jump to next instruction 4362 /* 11-14 instructions */ 4363 4364 4365/* ------------------------------ */ 4366 .balign 64 4367.L_OP_SHL_INT: /* 0x98 */ 4368/* File: armv5te/OP_SHL_INT.S */ 4369/* File: armv5te/binop.S */ 4370 /* 4371 * Generic 32-bit binary operation. Provide an "instr" line that 4372 * specifies an instruction that performs "result = r0 op r1". 4373 * This could be an ARM instruction or a function call. (If the result 4374 * comes back in a register other than r0, you can override "result".) 4375 * 4376 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4377 * vCC (r1). Useful for integer division and modulus. Note that we 4378 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4379 * handles it correctly. 4380 * 4381 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4382 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4383 * mul-float, div-float, rem-float 4384 */ 4385 /* binop vAA, vBB, vCC */ 4386 FETCH(r0, 1) @ r0<- CCBB 4387 mov r9, rINST, lsr #8 @ r9<- AA 4388 mov r3, r0, lsr #8 @ r3<- CC 4389 and r2, r0, #255 @ r2<- BB 4390 GET_VREG(r1, r3) @ r1<- vCC 4391 GET_VREG(r0, r2) @ r0<- vBB 4392 .if 0 4393 cmp r1, #0 @ is second operand zero? 4394 beq common_errDivideByZero 4395 .endif 4396 4397 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4398 and r1, r1, #31 @ optional op; may set condition codes 4399 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4400 GET_INST_OPCODE(ip) @ extract opcode from rINST 4401 SET_VREG(r0, r9) @ vAA<- r0 4402 GOTO_OPCODE(ip) @ jump to next instruction 4403 /* 11-14 instructions */ 4404 4405 4406/* ------------------------------ */ 4407 .balign 64 4408.L_OP_SHR_INT: /* 0x99 */ 4409/* File: armv5te/OP_SHR_INT.S */ 4410/* File: armv5te/binop.S */ 4411 /* 4412 * Generic 32-bit binary operation. Provide an "instr" line that 4413 * specifies an instruction that performs "result = r0 op r1". 4414 * This could be an ARM instruction or a function call. (If the result 4415 * comes back in a register other than r0, you can override "result".) 4416 * 4417 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4418 * vCC (r1). Useful for integer division and modulus. Note that we 4419 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4420 * handles it correctly. 4421 * 4422 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4423 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4424 * mul-float, div-float, rem-float 4425 */ 4426 /* binop vAA, vBB, vCC */ 4427 FETCH(r0, 1) @ r0<- CCBB 4428 mov r9, rINST, lsr #8 @ r9<- AA 4429 mov r3, r0, lsr #8 @ r3<- CC 4430 and r2, r0, #255 @ r2<- BB 4431 GET_VREG(r1, r3) @ r1<- vCC 4432 GET_VREG(r0, r2) @ r0<- vBB 4433 .if 0 4434 cmp r1, #0 @ is second operand zero? 4435 beq common_errDivideByZero 4436 .endif 4437 4438 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4439 and r1, r1, #31 @ optional op; may set condition codes 4440 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4441 GET_INST_OPCODE(ip) @ extract opcode from rINST 4442 SET_VREG(r0, r9) @ vAA<- r0 4443 GOTO_OPCODE(ip) @ jump to next instruction 4444 /* 11-14 instructions */ 4445 4446 4447/* ------------------------------ */ 4448 .balign 64 4449.L_OP_USHR_INT: /* 0x9a */ 4450/* File: armv5te/OP_USHR_INT.S */ 4451/* File: armv5te/binop.S */ 4452 /* 4453 * Generic 32-bit binary operation. Provide an "instr" line that 4454 * specifies an instruction that performs "result = r0 op r1". 4455 * This could be an ARM instruction or a function call. (If the result 4456 * comes back in a register other than r0, you can override "result".) 4457 * 4458 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4459 * vCC (r1). Useful for integer division and modulus. Note that we 4460 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4461 * handles it correctly. 4462 * 4463 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4464 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4465 * mul-float, div-float, rem-float 4466 */ 4467 /* binop vAA, vBB, vCC */ 4468 FETCH(r0, 1) @ r0<- CCBB 4469 mov r9, rINST, lsr #8 @ r9<- AA 4470 mov r3, r0, lsr #8 @ r3<- CC 4471 and r2, r0, #255 @ r2<- BB 4472 GET_VREG(r1, r3) @ r1<- vCC 4473 GET_VREG(r0, r2) @ r0<- vBB 4474 .if 0 4475 cmp r1, #0 @ is second operand zero? 4476 beq common_errDivideByZero 4477 .endif 4478 4479 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4480 and r1, r1, #31 @ optional op; may set condition codes 4481 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4482 GET_INST_OPCODE(ip) @ extract opcode from rINST 4483 SET_VREG(r0, r9) @ vAA<- r0 4484 GOTO_OPCODE(ip) @ jump to next instruction 4485 /* 11-14 instructions */ 4486 4487 4488/* ------------------------------ */ 4489 .balign 64 4490.L_OP_ADD_LONG: /* 0x9b */ 4491/* File: armv5te/OP_ADD_LONG.S */ 4492/* File: armv5te/binopWide.S */ 4493 /* 4494 * Generic 64-bit binary operation. Provide an "instr" line that 4495 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4496 * This could be an ARM instruction or a function call. (If the result 4497 * comes back in a register other than r0, you can override "result".) 4498 * 4499 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4500 * vCC (r1). Useful for integer division and modulus. 4501 * 4502 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4503 * xor-long, add-double, sub-double, mul-double, div-double, 4504 * rem-double 4505 * 4506 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4507 */ 4508 /* binop vAA, vBB, vCC */ 4509 FETCH(r0, 1) @ r0<- CCBB 4510 mov r9, rINST, lsr #8 @ r9<- AA 4511 and r2, r0, #255 @ r2<- BB 4512 mov r3, r0, lsr #8 @ r3<- CC 4513 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4514 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4515 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4516 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4517 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4518 .if 0 4519 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4520 beq common_errDivideByZero 4521 .endif 4522 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4523 4524 adds r0, r0, r2 @ optional op; may set condition codes 4525 adc r1, r1, r3 @ result<- op, r0-r3 changed 4526 GET_INST_OPCODE(ip) @ extract opcode from rINST 4527 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4528 GOTO_OPCODE(ip) @ jump to next instruction 4529 /* 14-17 instructions */ 4530 4531 4532/* ------------------------------ */ 4533 .balign 64 4534.L_OP_SUB_LONG: /* 0x9c */ 4535/* File: armv5te/OP_SUB_LONG.S */ 4536/* File: armv5te/binopWide.S */ 4537 /* 4538 * Generic 64-bit binary operation. Provide an "instr" line that 4539 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4540 * This could be an ARM instruction or a function call. (If the result 4541 * comes back in a register other than r0, you can override "result".) 4542 * 4543 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4544 * vCC (r1). Useful for integer division and modulus. 4545 * 4546 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4547 * xor-long, add-double, sub-double, mul-double, div-double, 4548 * rem-double 4549 * 4550 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4551 */ 4552 /* binop vAA, vBB, vCC */ 4553 FETCH(r0, 1) @ r0<- CCBB 4554 mov r9, rINST, lsr #8 @ r9<- AA 4555 and r2, r0, #255 @ r2<- BB 4556 mov r3, r0, lsr #8 @ r3<- CC 4557 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4558 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4559 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4560 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4561 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4562 .if 0 4563 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4564 beq common_errDivideByZero 4565 .endif 4566 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4567 4568 subs r0, r0, r2 @ optional op; may set condition codes 4569 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4570 GET_INST_OPCODE(ip) @ extract opcode from rINST 4571 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4572 GOTO_OPCODE(ip) @ jump to next instruction 4573 /* 14-17 instructions */ 4574 4575 4576/* ------------------------------ */ 4577 .balign 64 4578.L_OP_MUL_LONG: /* 0x9d */ 4579/* File: armv5te/OP_MUL_LONG.S */ 4580 /* 4581 * Signed 64-bit integer multiply. 4582 * 4583 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4584 * WX 4585 * x YZ 4586 * -------- 4587 * ZW ZX 4588 * YW YX 4589 * 4590 * The low word of the result holds ZX, the high word holds 4591 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4592 * it doesn't fit in the low 64 bits. 4593 * 4594 * Unlike most ARM math operations, multiply instructions have 4595 * restrictions on using the same register more than once (Rd and Rm 4596 * cannot be the same). 4597 */ 4598 /* mul-long vAA, vBB, vCC */ 4599 FETCH(r0, 1) @ r0<- CCBB 4600 and r2, r0, #255 @ r2<- BB 4601 mov r3, r0, lsr #8 @ r3<- CC 4602 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4603 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4604 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4605 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4606 mul ip, r2, r1 @ ip<- ZxW 4607 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4608 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4609 mov r0, rINST, lsr #8 @ r0<- AA 4610 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4611 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4612 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4613 b .LOP_MUL_LONG_finish 4614 4615/* ------------------------------ */ 4616 .balign 64 4617.L_OP_DIV_LONG: /* 0x9e */ 4618/* File: armv5te/OP_DIV_LONG.S */ 4619/* File: armv5te/binopWide.S */ 4620 /* 4621 * Generic 64-bit binary operation. Provide an "instr" line that 4622 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4623 * This could be an ARM instruction or a function call. (If the result 4624 * comes back in a register other than r0, you can override "result".) 4625 * 4626 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4627 * vCC (r1). Useful for integer division and modulus. 4628 * 4629 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4630 * xor-long, add-double, sub-double, mul-double, div-double, 4631 * rem-double 4632 * 4633 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4634 */ 4635 /* binop vAA, vBB, vCC */ 4636 FETCH(r0, 1) @ r0<- CCBB 4637 mov r9, rINST, lsr #8 @ r9<- AA 4638 and r2, r0, #255 @ r2<- BB 4639 mov r3, r0, lsr #8 @ r3<- CC 4640 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4641 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4642 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4643 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4644 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4645 .if 1 4646 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4647 beq common_errDivideByZero 4648 .endif 4649 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4650 4651 @ optional op; may set condition codes 4652 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4653 GET_INST_OPCODE(ip) @ extract opcode from rINST 4654 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4655 GOTO_OPCODE(ip) @ jump to next instruction 4656 /* 14-17 instructions */ 4657 4658 4659/* ------------------------------ */ 4660 .balign 64 4661.L_OP_REM_LONG: /* 0x9f */ 4662/* File: armv5te/OP_REM_LONG.S */ 4663/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4664/* File: armv5te/binopWide.S */ 4665 /* 4666 * Generic 64-bit binary operation. Provide an "instr" line that 4667 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4668 * This could be an ARM instruction or a function call. (If the result 4669 * comes back in a register other than r0, you can override "result".) 4670 * 4671 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4672 * vCC (r1). Useful for integer division and modulus. 4673 * 4674 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4675 * xor-long, add-double, sub-double, mul-double, div-double, 4676 * rem-double 4677 * 4678 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4679 */ 4680 /* binop vAA, vBB, vCC */ 4681 FETCH(r0, 1) @ r0<- CCBB 4682 mov r9, rINST, lsr #8 @ r9<- AA 4683 and r2, r0, #255 @ r2<- BB 4684 mov r3, r0, lsr #8 @ r3<- CC 4685 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4686 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4687 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4688 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4689 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4690 .if 1 4691 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4692 beq common_errDivideByZero 4693 .endif 4694 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4695 4696 @ optional op; may set condition codes 4697 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4698 GET_INST_OPCODE(ip) @ extract opcode from rINST 4699 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4700 GOTO_OPCODE(ip) @ jump to next instruction 4701 /* 14-17 instructions */ 4702 4703 4704/* ------------------------------ */ 4705 .balign 64 4706.L_OP_AND_LONG: /* 0xa0 */ 4707/* File: armv5te/OP_AND_LONG.S */ 4708/* File: armv5te/binopWide.S */ 4709 /* 4710 * Generic 64-bit binary operation. Provide an "instr" line that 4711 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4712 * This could be an ARM instruction or a function call. (If the result 4713 * comes back in a register other than r0, you can override "result".) 4714 * 4715 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4716 * vCC (r1). Useful for integer division and modulus. 4717 * 4718 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4719 * xor-long, add-double, sub-double, mul-double, div-double, 4720 * rem-double 4721 * 4722 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4723 */ 4724 /* binop vAA, vBB, vCC */ 4725 FETCH(r0, 1) @ r0<- CCBB 4726 mov r9, rINST, lsr #8 @ r9<- AA 4727 and r2, r0, #255 @ r2<- BB 4728 mov r3, r0, lsr #8 @ r3<- CC 4729 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4730 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4731 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4732 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4733 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4734 .if 0 4735 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4736 beq common_errDivideByZero 4737 .endif 4738 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4739 4740 and r0, r0, r2 @ optional op; may set condition codes 4741 and r1, r1, r3 @ result<- op, r0-r3 changed 4742 GET_INST_OPCODE(ip) @ extract opcode from rINST 4743 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4744 GOTO_OPCODE(ip) @ jump to next instruction 4745 /* 14-17 instructions */ 4746 4747 4748/* ------------------------------ */ 4749 .balign 64 4750.L_OP_OR_LONG: /* 0xa1 */ 4751/* File: armv5te/OP_OR_LONG.S */ 4752/* File: armv5te/binopWide.S */ 4753 /* 4754 * Generic 64-bit binary operation. Provide an "instr" line that 4755 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4756 * This could be an ARM instruction or a function call. (If the result 4757 * comes back in a register other than r0, you can override "result".) 4758 * 4759 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4760 * vCC (r1). Useful for integer division and modulus. 4761 * 4762 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4763 * xor-long, add-double, sub-double, mul-double, div-double, 4764 * rem-double 4765 * 4766 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4767 */ 4768 /* binop vAA, vBB, vCC */ 4769 FETCH(r0, 1) @ r0<- CCBB 4770 mov r9, rINST, lsr #8 @ r9<- AA 4771 and r2, r0, #255 @ r2<- BB 4772 mov r3, r0, lsr #8 @ r3<- CC 4773 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4774 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4775 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4776 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4777 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4778 .if 0 4779 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4780 beq common_errDivideByZero 4781 .endif 4782 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4783 4784 orr r0, r0, r2 @ optional op; may set condition codes 4785 orr r1, r1, r3 @ result<- op, r0-r3 changed 4786 GET_INST_OPCODE(ip) @ extract opcode from rINST 4787 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4788 GOTO_OPCODE(ip) @ jump to next instruction 4789 /* 14-17 instructions */ 4790 4791 4792/* ------------------------------ */ 4793 .balign 64 4794.L_OP_XOR_LONG: /* 0xa2 */ 4795/* File: armv5te/OP_XOR_LONG.S */ 4796/* File: armv5te/binopWide.S */ 4797 /* 4798 * Generic 64-bit binary operation. Provide an "instr" line that 4799 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4800 * This could be an ARM instruction or a function call. (If the result 4801 * comes back in a register other than r0, you can override "result".) 4802 * 4803 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4804 * vCC (r1). Useful for integer division and modulus. 4805 * 4806 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4807 * xor-long, add-double, sub-double, mul-double, div-double, 4808 * rem-double 4809 * 4810 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4811 */ 4812 /* binop vAA, vBB, vCC */ 4813 FETCH(r0, 1) @ r0<- CCBB 4814 mov r9, rINST, lsr #8 @ r9<- AA 4815 and r2, r0, #255 @ r2<- BB 4816 mov r3, r0, lsr #8 @ r3<- CC 4817 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4818 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4819 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4820 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4821 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4822 .if 0 4823 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4824 beq common_errDivideByZero 4825 .endif 4826 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4827 4828 eor r0, r0, r2 @ optional op; may set condition codes 4829 eor r1, r1, r3 @ result<- op, r0-r3 changed 4830 GET_INST_OPCODE(ip) @ extract opcode from rINST 4831 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4832 GOTO_OPCODE(ip) @ jump to next instruction 4833 /* 14-17 instructions */ 4834 4835 4836/* ------------------------------ */ 4837 .balign 64 4838.L_OP_SHL_LONG: /* 0xa3 */ 4839/* File: armv5te/OP_SHL_LONG.S */ 4840 /* 4841 * Long integer shift. This is different from the generic 32/64-bit 4842 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4843 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4844 * 6 bits of the shift distance. 4845 */ 4846 /* shl-long vAA, vBB, vCC */ 4847 FETCH(r0, 1) @ r0<- CCBB 4848 mov r9, rINST, lsr #8 @ r9<- AA 4849 and r3, r0, #255 @ r3<- BB 4850 mov r0, r0, lsr #8 @ r0<- CC 4851 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4852 GET_VREG(r2, r0) @ r2<- vCC 4853 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4854 and r2, r2, #63 @ r2<- r2 & 0x3f 4855 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4856 4857 mov r1, r1, asl r2 @ r1<- r1 << r2 4858 rsb r3, r2, #32 @ r3<- 32 - r2 4859 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4860 subs ip, r2, #32 @ ip<- r2 - 32 4861 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4862 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4863 b .LOP_SHL_LONG_finish 4864 4865/* ------------------------------ */ 4866 .balign 64 4867.L_OP_SHR_LONG: /* 0xa4 */ 4868/* File: armv5te/OP_SHR_LONG.S */ 4869 /* 4870 * Long integer shift. This is different from the generic 32/64-bit 4871 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4872 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4873 * 6 bits of the shift distance. 4874 */ 4875 /* shr-long vAA, vBB, vCC */ 4876 FETCH(r0, 1) @ r0<- CCBB 4877 mov r9, rINST, lsr #8 @ r9<- AA 4878 and r3, r0, #255 @ r3<- BB 4879 mov r0, r0, lsr #8 @ r0<- CC 4880 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4881 GET_VREG(r2, r0) @ r2<- vCC 4882 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4883 and r2, r2, #63 @ r0<- r0 & 0x3f 4884 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4885 4886 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4887 rsb r3, r2, #32 @ r3<- 32 - r2 4888 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4889 subs ip, r2, #32 @ ip<- r2 - 32 4890 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4891 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4892 b .LOP_SHR_LONG_finish 4893 4894/* ------------------------------ */ 4895 .balign 64 4896.L_OP_USHR_LONG: /* 0xa5 */ 4897/* File: armv5te/OP_USHR_LONG.S */ 4898 /* 4899 * Long integer shift. This is different from the generic 32/64-bit 4900 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4901 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4902 * 6 bits of the shift distance. 4903 */ 4904 /* ushr-long vAA, vBB, vCC */ 4905 FETCH(r0, 1) @ r0<- CCBB 4906 mov r9, rINST, lsr #8 @ r9<- AA 4907 and r3, r0, #255 @ r3<- BB 4908 mov r0, r0, lsr #8 @ r0<- CC 4909 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4910 GET_VREG(r2, r0) @ r2<- vCC 4911 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4912 and r2, r2, #63 @ r0<- r0 & 0x3f 4913 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4914 4915 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4916 rsb r3, r2, #32 @ r3<- 32 - r2 4917 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4918 subs ip, r2, #32 @ ip<- r2 - 32 4919 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4920 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4921 b .LOP_USHR_LONG_finish 4922 4923/* ------------------------------ */ 4924 .balign 64 4925.L_OP_ADD_FLOAT: /* 0xa6 */ 4926/* File: arm-vfp/OP_ADD_FLOAT.S */ 4927/* File: arm-vfp/fbinop.S */ 4928 /* 4929 * Generic 32-bit floating-point operation. Provide an "instr" line that 4930 * specifies an instruction that performs "s2 = s0 op s1". Because we 4931 * use the "softfp" ABI, this must be an instruction, not a function call. 4932 * 4933 * For: add-float, sub-float, mul-float, div-float 4934 */ 4935 /* floatop vAA, vBB, vCC */ 4936 FETCH(r0, 1) @ r0<- CCBB 4937 mov r9, rINST, lsr #8 @ r9<- AA 4938 mov r3, r0, lsr #8 @ r3<- CC 4939 and r2, r0, #255 @ r2<- BB 4940 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4941 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4942 flds s1, [r3] @ s1<- vCC 4943 flds s0, [r2] @ s0<- vBB 4944 4945 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4946 fadds s2, s0, s1 @ s2<- op 4947 GET_INST_OPCODE(ip) @ extract opcode from rINST 4948 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4949 fsts s2, [r9] @ vAA<- s2 4950 GOTO_OPCODE(ip) @ jump to next instruction 4951 4952 4953/* ------------------------------ */ 4954 .balign 64 4955.L_OP_SUB_FLOAT: /* 0xa7 */ 4956/* File: arm-vfp/OP_SUB_FLOAT.S */ 4957/* File: arm-vfp/fbinop.S */ 4958 /* 4959 * Generic 32-bit floating-point operation. Provide an "instr" line that 4960 * specifies an instruction that performs "s2 = s0 op s1". Because we 4961 * use the "softfp" ABI, this must be an instruction, not a function call. 4962 * 4963 * For: add-float, sub-float, mul-float, div-float 4964 */ 4965 /* floatop vAA, vBB, vCC */ 4966 FETCH(r0, 1) @ r0<- CCBB 4967 mov r9, rINST, lsr #8 @ r9<- AA 4968 mov r3, r0, lsr #8 @ r3<- CC 4969 and r2, r0, #255 @ r2<- BB 4970 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4971 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4972 flds s1, [r3] @ s1<- vCC 4973 flds s0, [r2] @ s0<- vBB 4974 4975 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4976 fsubs s2, s0, s1 @ s2<- op 4977 GET_INST_OPCODE(ip) @ extract opcode from rINST 4978 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4979 fsts s2, [r9] @ vAA<- s2 4980 GOTO_OPCODE(ip) @ jump to next instruction 4981 4982 4983/* ------------------------------ */ 4984 .balign 64 4985.L_OP_MUL_FLOAT: /* 0xa8 */ 4986/* File: arm-vfp/OP_MUL_FLOAT.S */ 4987/* File: arm-vfp/fbinop.S */ 4988 /* 4989 * Generic 32-bit floating-point operation. Provide an "instr" line that 4990 * specifies an instruction that performs "s2 = s0 op s1". Because we 4991 * use the "softfp" ABI, this must be an instruction, not a function call. 4992 * 4993 * For: add-float, sub-float, mul-float, div-float 4994 */ 4995 /* floatop vAA, vBB, vCC */ 4996 FETCH(r0, 1) @ r0<- CCBB 4997 mov r9, rINST, lsr #8 @ r9<- AA 4998 mov r3, r0, lsr #8 @ r3<- CC 4999 and r2, r0, #255 @ r2<- BB 5000 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5001 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5002 flds s1, [r3] @ s1<- vCC 5003 flds s0, [r2] @ s0<- vBB 5004 5005 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5006 fmuls s2, s0, s1 @ s2<- op 5007 GET_INST_OPCODE(ip) @ extract opcode from rINST 5008 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5009 fsts s2, [r9] @ vAA<- s2 5010 GOTO_OPCODE(ip) @ jump to next instruction 5011 5012 5013/* ------------------------------ */ 5014 .balign 64 5015.L_OP_DIV_FLOAT: /* 0xa9 */ 5016/* File: arm-vfp/OP_DIV_FLOAT.S */ 5017/* File: arm-vfp/fbinop.S */ 5018 /* 5019 * Generic 32-bit floating-point operation. Provide an "instr" line that 5020 * specifies an instruction that performs "s2 = s0 op s1". Because we 5021 * use the "softfp" ABI, this must be an instruction, not a function call. 5022 * 5023 * For: add-float, sub-float, mul-float, div-float 5024 */ 5025 /* floatop vAA, vBB, vCC */ 5026 FETCH(r0, 1) @ r0<- CCBB 5027 mov r9, rINST, lsr #8 @ r9<- AA 5028 mov r3, r0, lsr #8 @ r3<- CC 5029 and r2, r0, #255 @ r2<- BB 5030 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5031 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5032 flds s1, [r3] @ s1<- vCC 5033 flds s0, [r2] @ s0<- vBB 5034 5035 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5036 fdivs s2, s0, s1 @ s2<- op 5037 GET_INST_OPCODE(ip) @ extract opcode from rINST 5038 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5039 fsts s2, [r9] @ vAA<- s2 5040 GOTO_OPCODE(ip) @ jump to next instruction 5041 5042 5043/* ------------------------------ */ 5044 .balign 64 5045.L_OP_REM_FLOAT: /* 0xaa */ 5046/* File: armv5te/OP_REM_FLOAT.S */ 5047/* EABI doesn't define a float remainder function, but libm does */ 5048/* File: armv5te/binop.S */ 5049 /* 5050 * Generic 32-bit binary operation. Provide an "instr" line that 5051 * specifies an instruction that performs "result = r0 op r1". 5052 * This could be an ARM instruction or a function call. (If the result 5053 * comes back in a register other than r0, you can override "result".) 5054 * 5055 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5056 * vCC (r1). Useful for integer division and modulus. Note that we 5057 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5058 * handles it correctly. 5059 * 5060 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5061 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5062 * mul-float, div-float, rem-float 5063 */ 5064 /* binop vAA, vBB, vCC */ 5065 FETCH(r0, 1) @ r0<- CCBB 5066 mov r9, rINST, lsr #8 @ r9<- AA 5067 mov r3, r0, lsr #8 @ r3<- CC 5068 and r2, r0, #255 @ r2<- BB 5069 GET_VREG(r1, r3) @ r1<- vCC 5070 GET_VREG(r0, r2) @ r0<- vBB 5071 .if 0 5072 cmp r1, #0 @ is second operand zero? 5073 beq common_errDivideByZero 5074 .endif 5075 5076 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5077 @ optional op; may set condition codes 5078 bl fmodf @ r0<- op, r0-r3 changed 5079 GET_INST_OPCODE(ip) @ extract opcode from rINST 5080 SET_VREG(r0, r9) @ vAA<- r0 5081 GOTO_OPCODE(ip) @ jump to next instruction 5082 /* 11-14 instructions */ 5083 5084 5085/* ------------------------------ */ 5086 .balign 64 5087.L_OP_ADD_DOUBLE: /* 0xab */ 5088/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5089/* File: arm-vfp/fbinopWide.S */ 5090 /* 5091 * Generic 64-bit double-precision floating point binary operation. 5092 * Provide an "instr" line that specifies an instruction that performs 5093 * "d2 = d0 op d1". 5094 * 5095 * for: add-double, sub-double, mul-double, div-double 5096 */ 5097 /* doubleop vAA, vBB, vCC */ 5098 FETCH(r0, 1) @ r0<- CCBB 5099 mov r9, rINST, lsr #8 @ r9<- AA 5100 mov r3, r0, lsr #8 @ r3<- CC 5101 and r2, r0, #255 @ r2<- BB 5102 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5103 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5104 fldd d1, [r3] @ d1<- vCC 5105 fldd d0, [r2] @ d0<- vBB 5106 5107 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5108 faddd d2, d0, d1 @ s2<- op 5109 GET_INST_OPCODE(ip) @ extract opcode from rINST 5110 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5111 fstd d2, [r9] @ vAA<- d2 5112 GOTO_OPCODE(ip) @ jump to next instruction 5113 5114 5115/* ------------------------------ */ 5116 .balign 64 5117.L_OP_SUB_DOUBLE: /* 0xac */ 5118/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5119/* File: arm-vfp/fbinopWide.S */ 5120 /* 5121 * Generic 64-bit double-precision floating point binary operation. 5122 * Provide an "instr" line that specifies an instruction that performs 5123 * "d2 = d0 op d1". 5124 * 5125 * for: add-double, sub-double, mul-double, div-double 5126 */ 5127 /* doubleop vAA, vBB, vCC */ 5128 FETCH(r0, 1) @ r0<- CCBB 5129 mov r9, rINST, lsr #8 @ r9<- AA 5130 mov r3, r0, lsr #8 @ r3<- CC 5131 and r2, r0, #255 @ r2<- BB 5132 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5133 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5134 fldd d1, [r3] @ d1<- vCC 5135 fldd d0, [r2] @ d0<- vBB 5136 5137 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5138 fsubd d2, d0, d1 @ s2<- op 5139 GET_INST_OPCODE(ip) @ extract opcode from rINST 5140 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5141 fstd d2, [r9] @ vAA<- d2 5142 GOTO_OPCODE(ip) @ jump to next instruction 5143 5144 5145/* ------------------------------ */ 5146 .balign 64 5147.L_OP_MUL_DOUBLE: /* 0xad */ 5148/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5149/* File: arm-vfp/fbinopWide.S */ 5150 /* 5151 * Generic 64-bit double-precision floating point binary operation. 5152 * Provide an "instr" line that specifies an instruction that performs 5153 * "d2 = d0 op d1". 5154 * 5155 * for: add-double, sub-double, mul-double, div-double 5156 */ 5157 /* doubleop vAA, vBB, vCC */ 5158 FETCH(r0, 1) @ r0<- CCBB 5159 mov r9, rINST, lsr #8 @ r9<- AA 5160 mov r3, r0, lsr #8 @ r3<- CC 5161 and r2, r0, #255 @ r2<- BB 5162 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5163 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5164 fldd d1, [r3] @ d1<- vCC 5165 fldd d0, [r2] @ d0<- vBB 5166 5167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5168 fmuld d2, d0, d1 @ s2<- op 5169 GET_INST_OPCODE(ip) @ extract opcode from rINST 5170 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5171 fstd d2, [r9] @ vAA<- d2 5172 GOTO_OPCODE(ip) @ jump to next instruction 5173 5174 5175/* ------------------------------ */ 5176 .balign 64 5177.L_OP_DIV_DOUBLE: /* 0xae */ 5178/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5179/* File: arm-vfp/fbinopWide.S */ 5180 /* 5181 * Generic 64-bit double-precision floating point binary operation. 5182 * Provide an "instr" line that specifies an instruction that performs 5183 * "d2 = d0 op d1". 5184 * 5185 * for: add-double, sub-double, mul-double, div-double 5186 */ 5187 /* doubleop vAA, vBB, vCC */ 5188 FETCH(r0, 1) @ r0<- CCBB 5189 mov r9, rINST, lsr #8 @ r9<- AA 5190 mov r3, r0, lsr #8 @ r3<- CC 5191 and r2, r0, #255 @ r2<- BB 5192 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5193 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5194 fldd d1, [r3] @ d1<- vCC 5195 fldd d0, [r2] @ d0<- vBB 5196 5197 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5198 fdivd d2, d0, d1 @ s2<- op 5199 GET_INST_OPCODE(ip) @ extract opcode from rINST 5200 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5201 fstd d2, [r9] @ vAA<- d2 5202 GOTO_OPCODE(ip) @ jump to next instruction 5203 5204 5205/* ------------------------------ */ 5206 .balign 64 5207.L_OP_REM_DOUBLE: /* 0xaf */ 5208/* File: armv5te/OP_REM_DOUBLE.S */ 5209/* EABI doesn't define a double remainder function, but libm does */ 5210/* File: armv5te/binopWide.S */ 5211 /* 5212 * Generic 64-bit binary operation. Provide an "instr" line that 5213 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5214 * This could be an ARM instruction or a function call. (If the result 5215 * comes back in a register other than r0, you can override "result".) 5216 * 5217 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5218 * vCC (r1). Useful for integer division and modulus. 5219 * 5220 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5221 * xor-long, add-double, sub-double, mul-double, div-double, 5222 * rem-double 5223 * 5224 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5225 */ 5226 /* binop vAA, vBB, vCC */ 5227 FETCH(r0, 1) @ r0<- CCBB 5228 mov r9, rINST, lsr #8 @ r9<- AA 5229 and r2, r0, #255 @ r2<- BB 5230 mov r3, r0, lsr #8 @ r3<- CC 5231 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5232 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5233 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5234 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5235 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5236 .if 0 5237 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5238 beq common_errDivideByZero 5239 .endif 5240 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5241 5242 @ optional op; may set condition codes 5243 bl fmod @ result<- op, r0-r3 changed 5244 GET_INST_OPCODE(ip) @ extract opcode from rINST 5245 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5246 GOTO_OPCODE(ip) @ jump to next instruction 5247 /* 14-17 instructions */ 5248 5249 5250/* ------------------------------ */ 5251 .balign 64 5252.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5253/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5254/* File: armv5te/binop2addr.S */ 5255 /* 5256 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5257 * that specifies an instruction that performs "result = r0 op r1". 5258 * This could be an ARM instruction or a function call. (If the result 5259 * comes back in a register other than r0, you can override "result".) 5260 * 5261 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5262 * vCC (r1). Useful for integer division and modulus. 5263 * 5264 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5265 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5266 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5267 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5268 */ 5269 /* binop/2addr vA, vB */ 5270 mov r9, rINST, lsr #8 @ r9<- A+ 5271 mov r3, rINST, lsr #12 @ r3<- B 5272 and r9, r9, #15 5273 GET_VREG(r1, r3) @ r1<- vB 5274 GET_VREG(r0, r9) @ r0<- vA 5275 .if 0 5276 cmp r1, #0 @ is second operand zero? 5277 beq common_errDivideByZero 5278 .endif 5279 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5280 5281 @ optional op; may set condition codes 5282 add r0, r0, r1 @ r0<- op, r0-r3 changed 5283 GET_INST_OPCODE(ip) @ extract opcode from rINST 5284 SET_VREG(r0, r9) @ vAA<- r0 5285 GOTO_OPCODE(ip) @ jump to next instruction 5286 /* 10-13 instructions */ 5287 5288 5289/* ------------------------------ */ 5290 .balign 64 5291.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5292/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5293/* File: armv5te/binop2addr.S */ 5294 /* 5295 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5296 * that specifies an instruction that performs "result = r0 op r1". 5297 * This could be an ARM instruction or a function call. (If the result 5298 * comes back in a register other than r0, you can override "result".) 5299 * 5300 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5301 * vCC (r1). Useful for integer division and modulus. 5302 * 5303 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5304 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5305 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5306 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5307 */ 5308 /* binop/2addr vA, vB */ 5309 mov r9, rINST, lsr #8 @ r9<- A+ 5310 mov r3, rINST, lsr #12 @ r3<- B 5311 and r9, r9, #15 5312 GET_VREG(r1, r3) @ r1<- vB 5313 GET_VREG(r0, r9) @ r0<- vA 5314 .if 0 5315 cmp r1, #0 @ is second operand zero? 5316 beq common_errDivideByZero 5317 .endif 5318 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5319 5320 @ optional op; may set condition codes 5321 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5322 GET_INST_OPCODE(ip) @ extract opcode from rINST 5323 SET_VREG(r0, r9) @ vAA<- r0 5324 GOTO_OPCODE(ip) @ jump to next instruction 5325 /* 10-13 instructions */ 5326 5327 5328/* ------------------------------ */ 5329 .balign 64 5330.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5331/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5332/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5333/* File: armv5te/binop2addr.S */ 5334 /* 5335 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5336 * that specifies an instruction that performs "result = r0 op r1". 5337 * This could be an ARM instruction or a function call. (If the result 5338 * comes back in a register other than r0, you can override "result".) 5339 * 5340 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5341 * vCC (r1). Useful for integer division and modulus. 5342 * 5343 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5344 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5345 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5346 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5347 */ 5348 /* binop/2addr vA, vB */ 5349 mov r9, rINST, lsr #8 @ r9<- A+ 5350 mov r3, rINST, lsr #12 @ r3<- B 5351 and r9, r9, #15 5352 GET_VREG(r1, r3) @ r1<- vB 5353 GET_VREG(r0, r9) @ r0<- vA 5354 .if 0 5355 cmp r1, #0 @ is second operand zero? 5356 beq common_errDivideByZero 5357 .endif 5358 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5359 5360 @ optional op; may set condition codes 5361 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5362 GET_INST_OPCODE(ip) @ extract opcode from rINST 5363 SET_VREG(r0, r9) @ vAA<- r0 5364 GOTO_OPCODE(ip) @ jump to next instruction 5365 /* 10-13 instructions */ 5366 5367 5368/* ------------------------------ */ 5369 .balign 64 5370.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5371/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5372/* File: armv5te/binop2addr.S */ 5373 /* 5374 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5375 * that specifies an instruction that performs "result = r0 op r1". 5376 * This could be an ARM instruction or a function call. (If the result 5377 * comes back in a register other than r0, you can override "result".) 5378 * 5379 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5380 * vCC (r1). Useful for integer division and modulus. 5381 * 5382 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5383 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5384 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5385 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5386 */ 5387 /* binop/2addr vA, vB */ 5388 mov r9, rINST, lsr #8 @ r9<- A+ 5389 mov r3, rINST, lsr #12 @ r3<- B 5390 and r9, r9, #15 5391 GET_VREG(r1, r3) @ r1<- vB 5392 GET_VREG(r0, r9) @ r0<- vA 5393 .if 1 5394 cmp r1, #0 @ is second operand zero? 5395 beq common_errDivideByZero 5396 .endif 5397 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5398 5399 @ optional op; may set condition codes 5400 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5401 GET_INST_OPCODE(ip) @ extract opcode from rINST 5402 SET_VREG(r0, r9) @ vAA<- r0 5403 GOTO_OPCODE(ip) @ jump to next instruction 5404 /* 10-13 instructions */ 5405 5406 5407/* ------------------------------ */ 5408 .balign 64 5409.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5410/* File: armv5te/OP_REM_INT_2ADDR.S */ 5411/* idivmod returns quotient in r0 and remainder in r1 */ 5412/* File: armv5te/binop2addr.S */ 5413 /* 5414 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5415 * that specifies an instruction that performs "result = r0 op r1". 5416 * This could be an ARM instruction or a function call. (If the result 5417 * comes back in a register other than r0, you can override "result".) 5418 * 5419 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5420 * vCC (r1). Useful for integer division and modulus. 5421 * 5422 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5423 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5424 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5425 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5426 */ 5427 /* binop/2addr vA, vB */ 5428 mov r9, rINST, lsr #8 @ r9<- A+ 5429 mov r3, rINST, lsr #12 @ r3<- B 5430 and r9, r9, #15 5431 GET_VREG(r1, r3) @ r1<- vB 5432 GET_VREG(r0, r9) @ r0<- vA 5433 .if 1 5434 cmp r1, #0 @ is second operand zero? 5435 beq common_errDivideByZero 5436 .endif 5437 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5438 5439 @ optional op; may set condition codes 5440 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5441 GET_INST_OPCODE(ip) @ extract opcode from rINST 5442 SET_VREG(r1, r9) @ vAA<- r1 5443 GOTO_OPCODE(ip) @ jump to next instruction 5444 /* 10-13 instructions */ 5445 5446 5447/* ------------------------------ */ 5448 .balign 64 5449.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5450/* File: armv5te/OP_AND_INT_2ADDR.S */ 5451/* File: armv5te/binop2addr.S */ 5452 /* 5453 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5454 * that specifies an instruction that performs "result = r0 op r1". 5455 * This could be an ARM instruction or a function call. (If the result 5456 * comes back in a register other than r0, you can override "result".) 5457 * 5458 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5459 * vCC (r1). Useful for integer division and modulus. 5460 * 5461 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5462 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5463 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5464 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5465 */ 5466 /* binop/2addr vA, vB */ 5467 mov r9, rINST, lsr #8 @ r9<- A+ 5468 mov r3, rINST, lsr #12 @ r3<- B 5469 and r9, r9, #15 5470 GET_VREG(r1, r3) @ r1<- vB 5471 GET_VREG(r0, r9) @ r0<- vA 5472 .if 0 5473 cmp r1, #0 @ is second operand zero? 5474 beq common_errDivideByZero 5475 .endif 5476 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5477 5478 @ optional op; may set condition codes 5479 and r0, r0, r1 @ r0<- op, r0-r3 changed 5480 GET_INST_OPCODE(ip) @ extract opcode from rINST 5481 SET_VREG(r0, r9) @ vAA<- r0 5482 GOTO_OPCODE(ip) @ jump to next instruction 5483 /* 10-13 instructions */ 5484 5485 5486/* ------------------------------ */ 5487 .balign 64 5488.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5489/* File: armv5te/OP_OR_INT_2ADDR.S */ 5490/* File: armv5te/binop2addr.S */ 5491 /* 5492 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5493 * that specifies an instruction that performs "result = r0 op r1". 5494 * This could be an ARM instruction or a function call. (If the result 5495 * comes back in a register other than r0, you can override "result".) 5496 * 5497 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5498 * vCC (r1). Useful for integer division and modulus. 5499 * 5500 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5501 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5502 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5503 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5504 */ 5505 /* binop/2addr vA, vB */ 5506 mov r9, rINST, lsr #8 @ r9<- A+ 5507 mov r3, rINST, lsr #12 @ r3<- B 5508 and r9, r9, #15 5509 GET_VREG(r1, r3) @ r1<- vB 5510 GET_VREG(r0, r9) @ r0<- vA 5511 .if 0 5512 cmp r1, #0 @ is second operand zero? 5513 beq common_errDivideByZero 5514 .endif 5515 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5516 5517 @ optional op; may set condition codes 5518 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5519 GET_INST_OPCODE(ip) @ extract opcode from rINST 5520 SET_VREG(r0, r9) @ vAA<- r0 5521 GOTO_OPCODE(ip) @ jump to next instruction 5522 /* 10-13 instructions */ 5523 5524 5525/* ------------------------------ */ 5526 .balign 64 5527.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5528/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5529/* File: armv5te/binop2addr.S */ 5530 /* 5531 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5532 * that specifies an instruction that performs "result = r0 op r1". 5533 * This could be an ARM instruction or a function call. (If the result 5534 * comes back in a register other than r0, you can override "result".) 5535 * 5536 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5537 * vCC (r1). Useful for integer division and modulus. 5538 * 5539 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5540 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5541 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5542 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5543 */ 5544 /* binop/2addr vA, vB */ 5545 mov r9, rINST, lsr #8 @ r9<- A+ 5546 mov r3, rINST, lsr #12 @ r3<- B 5547 and r9, r9, #15 5548 GET_VREG(r1, r3) @ r1<- vB 5549 GET_VREG(r0, r9) @ r0<- vA 5550 .if 0 5551 cmp r1, #0 @ is second operand zero? 5552 beq common_errDivideByZero 5553 .endif 5554 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5555 5556 @ optional op; may set condition codes 5557 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5558 GET_INST_OPCODE(ip) @ extract opcode from rINST 5559 SET_VREG(r0, r9) @ vAA<- r0 5560 GOTO_OPCODE(ip) @ jump to next instruction 5561 /* 10-13 instructions */ 5562 5563 5564/* ------------------------------ */ 5565 .balign 64 5566.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5567/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5568/* File: armv5te/binop2addr.S */ 5569 /* 5570 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5571 * that specifies an instruction that performs "result = r0 op r1". 5572 * This could be an ARM instruction or a function call. (If the result 5573 * comes back in a register other than r0, you can override "result".) 5574 * 5575 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5576 * vCC (r1). Useful for integer division and modulus. 5577 * 5578 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5579 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5580 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5581 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5582 */ 5583 /* binop/2addr vA, vB */ 5584 mov r9, rINST, lsr #8 @ r9<- A+ 5585 mov r3, rINST, lsr #12 @ r3<- B 5586 and r9, r9, #15 5587 GET_VREG(r1, r3) @ r1<- vB 5588 GET_VREG(r0, r9) @ r0<- vA 5589 .if 0 5590 cmp r1, #0 @ is second operand zero? 5591 beq common_errDivideByZero 5592 .endif 5593 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5594 5595 and r1, r1, #31 @ optional op; may set condition codes 5596 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5597 GET_INST_OPCODE(ip) @ extract opcode from rINST 5598 SET_VREG(r0, r9) @ vAA<- r0 5599 GOTO_OPCODE(ip) @ jump to next instruction 5600 /* 10-13 instructions */ 5601 5602 5603/* ------------------------------ */ 5604 .balign 64 5605.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5606/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5607/* File: armv5te/binop2addr.S */ 5608 /* 5609 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5610 * that specifies an instruction that performs "result = r0 op r1". 5611 * This could be an ARM instruction or a function call. (If the result 5612 * comes back in a register other than r0, you can override "result".) 5613 * 5614 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5615 * vCC (r1). Useful for integer division and modulus. 5616 * 5617 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5618 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5619 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5620 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5621 */ 5622 /* binop/2addr vA, vB */ 5623 mov r9, rINST, lsr #8 @ r9<- A+ 5624 mov r3, rINST, lsr #12 @ r3<- B 5625 and r9, r9, #15 5626 GET_VREG(r1, r3) @ r1<- vB 5627 GET_VREG(r0, r9) @ r0<- vA 5628 .if 0 5629 cmp r1, #0 @ is second operand zero? 5630 beq common_errDivideByZero 5631 .endif 5632 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5633 5634 and r1, r1, #31 @ optional op; may set condition codes 5635 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5636 GET_INST_OPCODE(ip) @ extract opcode from rINST 5637 SET_VREG(r0, r9) @ vAA<- r0 5638 GOTO_OPCODE(ip) @ jump to next instruction 5639 /* 10-13 instructions */ 5640 5641 5642/* ------------------------------ */ 5643 .balign 64 5644.L_OP_USHR_INT_2ADDR: /* 0xba */ 5645/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5646/* File: armv5te/binop2addr.S */ 5647 /* 5648 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5649 * that specifies an instruction that performs "result = r0 op r1". 5650 * This could be an ARM instruction or a function call. (If the result 5651 * comes back in a register other than r0, you can override "result".) 5652 * 5653 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5654 * vCC (r1). Useful for integer division and modulus. 5655 * 5656 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5657 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5658 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5659 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5660 */ 5661 /* binop/2addr vA, vB */ 5662 mov r9, rINST, lsr #8 @ r9<- A+ 5663 mov r3, rINST, lsr #12 @ r3<- B 5664 and r9, r9, #15 5665 GET_VREG(r1, r3) @ r1<- vB 5666 GET_VREG(r0, r9) @ r0<- vA 5667 .if 0 5668 cmp r1, #0 @ is second operand zero? 5669 beq common_errDivideByZero 5670 .endif 5671 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5672 5673 and r1, r1, #31 @ optional op; may set condition codes 5674 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5675 GET_INST_OPCODE(ip) @ extract opcode from rINST 5676 SET_VREG(r0, r9) @ vAA<- r0 5677 GOTO_OPCODE(ip) @ jump to next instruction 5678 /* 10-13 instructions */ 5679 5680 5681/* ------------------------------ */ 5682 .balign 64 5683.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5684/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5685/* File: armv5te/binopWide2addr.S */ 5686 /* 5687 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5688 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5689 * This could be an ARM instruction or a function call. (If the result 5690 * comes back in a register other than r0, you can override "result".) 5691 * 5692 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5693 * vCC (r1). Useful for integer division and modulus. 5694 * 5695 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5696 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5697 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5698 * rem-double/2addr 5699 */ 5700 /* binop/2addr vA, vB */ 5701 mov r9, rINST, lsr #8 @ r9<- A+ 5702 mov r1, rINST, lsr #12 @ r1<- B 5703 and r9, r9, #15 5704 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5705 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5706 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5707 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5708 .if 0 5709 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5710 beq common_errDivideByZero 5711 .endif 5712 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5713 5714 adds r0, r0, r2 @ optional op; may set condition codes 5715 adc r1, r1, r3 @ result<- op, r0-r3 changed 5716 GET_INST_OPCODE(ip) @ extract opcode from rINST 5717 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5718 GOTO_OPCODE(ip) @ jump to next instruction 5719 /* 12-15 instructions */ 5720 5721 5722/* ------------------------------ */ 5723 .balign 64 5724.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5725/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5726/* File: armv5te/binopWide2addr.S */ 5727 /* 5728 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5729 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5730 * This could be an ARM instruction or a function call. (If the result 5731 * comes back in a register other than r0, you can override "result".) 5732 * 5733 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5734 * vCC (r1). Useful for integer division and modulus. 5735 * 5736 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5737 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5738 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5739 * rem-double/2addr 5740 */ 5741 /* binop/2addr vA, vB */ 5742 mov r9, rINST, lsr #8 @ r9<- A+ 5743 mov r1, rINST, lsr #12 @ r1<- B 5744 and r9, r9, #15 5745 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5746 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5747 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5748 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5749 .if 0 5750 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5751 beq common_errDivideByZero 5752 .endif 5753 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5754 5755 subs r0, r0, r2 @ optional op; may set condition codes 5756 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5757 GET_INST_OPCODE(ip) @ extract opcode from rINST 5758 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5759 GOTO_OPCODE(ip) @ jump to next instruction 5760 /* 12-15 instructions */ 5761 5762 5763/* ------------------------------ */ 5764 .balign 64 5765.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5766/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 5767 /* 5768 * Signed 64-bit integer multiply, "/2addr" version. 5769 * 5770 * See OP_MUL_LONG for an explanation. 5771 * 5772 * We get a little tight on registers, so to avoid looking up &fp[A] 5773 * again we stuff it into rINST. 5774 */ 5775 /* mul-long/2addr vA, vB */ 5776 mov r9, rINST, lsr #8 @ r9<- A+ 5777 mov r1, rINST, lsr #12 @ r1<- B 5778 and r9, r9, #15 5779 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5780 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5781 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5782 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5783 mul ip, r2, r1 @ ip<- ZxW 5784 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5785 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5786 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5787 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5788 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5789 GET_INST_OPCODE(ip) @ extract opcode from rINST 5790 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5791 GOTO_OPCODE(ip) @ jump to next instruction 5792 5793/* ------------------------------ */ 5794 .balign 64 5795.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5796/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 5797/* File: armv5te/binopWide2addr.S */ 5798 /* 5799 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5800 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5801 * This could be an ARM instruction or a function call. (If the result 5802 * comes back in a register other than r0, you can override "result".) 5803 * 5804 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5805 * vCC (r1). Useful for integer division and modulus. 5806 * 5807 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5808 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5809 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5810 * rem-double/2addr 5811 */ 5812 /* binop/2addr vA, vB */ 5813 mov r9, rINST, lsr #8 @ r9<- A+ 5814 mov r1, rINST, lsr #12 @ r1<- B 5815 and r9, r9, #15 5816 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5817 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5818 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5819 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5820 .if 1 5821 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5822 beq common_errDivideByZero 5823 .endif 5824 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5825 5826 @ optional op; may set condition codes 5827 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5828 GET_INST_OPCODE(ip) @ extract opcode from rINST 5829 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5830 GOTO_OPCODE(ip) @ jump to next instruction 5831 /* 12-15 instructions */ 5832 5833 5834/* ------------------------------ */ 5835 .balign 64 5836.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5837/* File: armv5te/OP_REM_LONG_2ADDR.S */ 5838/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5839/* File: armv5te/binopWide2addr.S */ 5840 /* 5841 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5842 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5843 * This could be an ARM instruction or a function call. (If the result 5844 * comes back in a register other than r0, you can override "result".) 5845 * 5846 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5847 * vCC (r1). Useful for integer division and modulus. 5848 * 5849 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5850 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5851 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5852 * rem-double/2addr 5853 */ 5854 /* binop/2addr vA, vB */ 5855 mov r9, rINST, lsr #8 @ r9<- A+ 5856 mov r1, rINST, lsr #12 @ r1<- B 5857 and r9, r9, #15 5858 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5859 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5860 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5861 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5862 .if 1 5863 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5864 beq common_errDivideByZero 5865 .endif 5866 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5867 5868 @ optional op; may set condition codes 5869 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5870 GET_INST_OPCODE(ip) @ extract opcode from rINST 5871 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5872 GOTO_OPCODE(ip) @ jump to next instruction 5873 /* 12-15 instructions */ 5874 5875 5876/* ------------------------------ */ 5877 .balign 64 5878.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5879/* File: armv5te/OP_AND_LONG_2ADDR.S */ 5880/* File: armv5te/binopWide2addr.S */ 5881 /* 5882 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5883 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5884 * This could be an ARM instruction or a function call. (If the result 5885 * comes back in a register other than r0, you can override "result".) 5886 * 5887 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5888 * vCC (r1). Useful for integer division and modulus. 5889 * 5890 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5891 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5892 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5893 * rem-double/2addr 5894 */ 5895 /* binop/2addr vA, vB */ 5896 mov r9, rINST, lsr #8 @ r9<- A+ 5897 mov r1, rINST, lsr #12 @ r1<- B 5898 and r9, r9, #15 5899 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5900 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5901 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5902 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5903 .if 0 5904 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5905 beq common_errDivideByZero 5906 .endif 5907 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5908 5909 and r0, r0, r2 @ optional op; may set condition codes 5910 and r1, r1, r3 @ result<- op, r0-r3 changed 5911 GET_INST_OPCODE(ip) @ extract opcode from rINST 5912 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5913 GOTO_OPCODE(ip) @ jump to next instruction 5914 /* 12-15 instructions */ 5915 5916 5917/* ------------------------------ */ 5918 .balign 64 5919.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5920/* File: armv5te/OP_OR_LONG_2ADDR.S */ 5921/* File: armv5te/binopWide2addr.S */ 5922 /* 5923 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5924 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5925 * This could be an ARM instruction or a function call. (If the result 5926 * comes back in a register other than r0, you can override "result".) 5927 * 5928 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5929 * vCC (r1). Useful for integer division and modulus. 5930 * 5931 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5932 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5933 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5934 * rem-double/2addr 5935 */ 5936 /* binop/2addr vA, vB */ 5937 mov r9, rINST, lsr #8 @ r9<- A+ 5938 mov r1, rINST, lsr #12 @ r1<- B 5939 and r9, r9, #15 5940 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5941 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5942 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5943 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5944 .if 0 5945 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5946 beq common_errDivideByZero 5947 .endif 5948 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5949 5950 orr r0, r0, r2 @ optional op; may set condition codes 5951 orr r1, r1, r3 @ result<- op, r0-r3 changed 5952 GET_INST_OPCODE(ip) @ extract opcode from rINST 5953 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5954 GOTO_OPCODE(ip) @ jump to next instruction 5955 /* 12-15 instructions */ 5956 5957 5958/* ------------------------------ */ 5959 .balign 64 5960.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 5961/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 5962/* File: armv5te/binopWide2addr.S */ 5963 /* 5964 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5965 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5966 * This could be an ARM instruction or a function call. (If the result 5967 * comes back in a register other than r0, you can override "result".) 5968 * 5969 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5970 * vCC (r1). Useful for integer division and modulus. 5971 * 5972 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5973 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5974 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5975 * rem-double/2addr 5976 */ 5977 /* binop/2addr vA, vB */ 5978 mov r9, rINST, lsr #8 @ r9<- A+ 5979 mov r1, rINST, lsr #12 @ r1<- B 5980 and r9, r9, #15 5981 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5982 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5983 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5984 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5985 .if 0 5986 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5987 beq common_errDivideByZero 5988 .endif 5989 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5990 5991 eor r0, r0, r2 @ optional op; may set condition codes 5992 eor r1, r1, r3 @ result<- op, r0-r3 changed 5993 GET_INST_OPCODE(ip) @ extract opcode from rINST 5994 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5995 GOTO_OPCODE(ip) @ jump to next instruction 5996 /* 12-15 instructions */ 5997 5998 5999/* ------------------------------ */ 6000 .balign 64 6001.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6002/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6003 /* 6004 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6005 * 32-bit shift distance. 6006 */ 6007 /* shl-long/2addr vA, vB */ 6008 mov r9, rINST, lsr #8 @ r9<- A+ 6009 mov r3, rINST, lsr #12 @ r3<- B 6010 and r9, r9, #15 6011 GET_VREG(r2, r3) @ r2<- vB 6012 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6013 and r2, r2, #63 @ r2<- r2 & 0x3f 6014 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6015 6016 mov r1, r1, asl r2 @ r1<- r1 << r2 6017 rsb r3, r2, #32 @ r3<- 32 - r2 6018 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6019 subs ip, r2, #32 @ ip<- r2 - 32 6020 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6021 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6022 mov r0, r0, asl r2 @ r0<- r0 << r2 6023 b .LOP_SHL_LONG_2ADDR_finish 6024 6025/* ------------------------------ */ 6026 .balign 64 6027.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6028/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6029 /* 6030 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6031 * 32-bit shift distance. 6032 */ 6033 /* shr-long/2addr vA, vB */ 6034 mov r9, rINST, lsr #8 @ r9<- A+ 6035 mov r3, rINST, lsr #12 @ r3<- B 6036 and r9, r9, #15 6037 GET_VREG(r2, r3) @ r2<- vB 6038 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6039 and r2, r2, #63 @ r2<- r2 & 0x3f 6040 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6041 6042 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6043 rsb r3, r2, #32 @ r3<- 32 - r2 6044 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6045 subs ip, r2, #32 @ ip<- r2 - 32 6046 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6047 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6048 mov r1, r1, asr r2 @ r1<- r1 >> r2 6049 b .LOP_SHR_LONG_2ADDR_finish 6050 6051/* ------------------------------ */ 6052 .balign 64 6053.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6054/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6055 /* 6056 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6057 * 32-bit shift distance. 6058 */ 6059 /* ushr-long/2addr vA, vB */ 6060 mov r9, rINST, lsr #8 @ r9<- A+ 6061 mov r3, rINST, lsr #12 @ r3<- B 6062 and r9, r9, #15 6063 GET_VREG(r2, r3) @ r2<- vB 6064 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6065 and r2, r2, #63 @ r2<- r2 & 0x3f 6066 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6067 6068 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6069 rsb r3, r2, #32 @ r3<- 32 - r2 6070 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6071 subs ip, r2, #32 @ ip<- r2 - 32 6072 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6073 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6074 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6075 b .LOP_USHR_LONG_2ADDR_finish 6076 6077/* ------------------------------ */ 6078 .balign 64 6079.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6080/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6081/* File: arm-vfp/fbinop2addr.S */ 6082 /* 6083 * Generic 32-bit floating point "/2addr" binary operation. Provide 6084 * an "instr" line that specifies an instruction that performs 6085 * "s2 = s0 op s1". 6086 * 6087 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6088 */ 6089 /* binop/2addr vA, vB */ 6090 mov r3, rINST, lsr #12 @ r3<- B 6091 mov r9, rINST, lsr #8 @ r9<- A+ 6092 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6093 and r9, r9, #15 @ r9<- A 6094 flds s1, [r3] @ s1<- vB 6095 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6096 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6097 flds s0, [r9] @ s0<- vA 6098 6099 fadds s2, s0, s1 @ s2<- op 6100 GET_INST_OPCODE(ip) @ extract opcode from rINST 6101 fsts s2, [r9] @ vAA<- s2 6102 GOTO_OPCODE(ip) @ jump to next instruction 6103 6104 6105/* ------------------------------ */ 6106 .balign 64 6107.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6108/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6109/* File: arm-vfp/fbinop2addr.S */ 6110 /* 6111 * Generic 32-bit floating point "/2addr" binary operation. Provide 6112 * an "instr" line that specifies an instruction that performs 6113 * "s2 = s0 op s1". 6114 * 6115 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6116 */ 6117 /* binop/2addr vA, vB */ 6118 mov r3, rINST, lsr #12 @ r3<- B 6119 mov r9, rINST, lsr #8 @ r9<- A+ 6120 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6121 and r9, r9, #15 @ r9<- A 6122 flds s1, [r3] @ s1<- vB 6123 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6124 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6125 flds s0, [r9] @ s0<- vA 6126 6127 fsubs s2, s0, s1 @ s2<- op 6128 GET_INST_OPCODE(ip) @ extract opcode from rINST 6129 fsts s2, [r9] @ vAA<- s2 6130 GOTO_OPCODE(ip) @ jump to next instruction 6131 6132 6133/* ------------------------------ */ 6134 .balign 64 6135.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6136/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6137/* File: arm-vfp/fbinop2addr.S */ 6138 /* 6139 * Generic 32-bit floating point "/2addr" binary operation. Provide 6140 * an "instr" line that specifies an instruction that performs 6141 * "s2 = s0 op s1". 6142 * 6143 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6144 */ 6145 /* binop/2addr vA, vB */ 6146 mov r3, rINST, lsr #12 @ r3<- B 6147 mov r9, rINST, lsr #8 @ r9<- A+ 6148 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6149 and r9, r9, #15 @ r9<- A 6150 flds s1, [r3] @ s1<- vB 6151 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6152 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6153 flds s0, [r9] @ s0<- vA 6154 6155 fmuls s2, s0, s1 @ s2<- op 6156 GET_INST_OPCODE(ip) @ extract opcode from rINST 6157 fsts s2, [r9] @ vAA<- s2 6158 GOTO_OPCODE(ip) @ jump to next instruction 6159 6160 6161/* ------------------------------ */ 6162 .balign 64 6163.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6164/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6165/* File: arm-vfp/fbinop2addr.S */ 6166 /* 6167 * Generic 32-bit floating point "/2addr" binary operation. Provide 6168 * an "instr" line that specifies an instruction that performs 6169 * "s2 = s0 op s1". 6170 * 6171 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6172 */ 6173 /* binop/2addr vA, vB */ 6174 mov r3, rINST, lsr #12 @ r3<- B 6175 mov r9, rINST, lsr #8 @ r9<- A+ 6176 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6177 and r9, r9, #15 @ r9<- A 6178 flds s1, [r3] @ s1<- vB 6179 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6180 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6181 flds s0, [r9] @ s0<- vA 6182 6183 fdivs s2, s0, s1 @ s2<- op 6184 GET_INST_OPCODE(ip) @ extract opcode from rINST 6185 fsts s2, [r9] @ vAA<- s2 6186 GOTO_OPCODE(ip) @ jump to next instruction 6187 6188 6189/* ------------------------------ */ 6190 .balign 64 6191.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6192/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6193/* EABI doesn't define a float remainder function, but libm does */ 6194/* File: armv5te/binop2addr.S */ 6195 /* 6196 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6197 * that specifies an instruction that performs "result = r0 op r1". 6198 * This could be an ARM instruction or a function call. (If the result 6199 * comes back in a register other than r0, you can override "result".) 6200 * 6201 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6202 * vCC (r1). Useful for integer division and modulus. 6203 * 6204 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6205 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6206 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6207 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6208 */ 6209 /* binop/2addr vA, vB */ 6210 mov r9, rINST, lsr #8 @ r9<- A+ 6211 mov r3, rINST, lsr #12 @ r3<- B 6212 and r9, r9, #15 6213 GET_VREG(r1, r3) @ r1<- vB 6214 GET_VREG(r0, r9) @ r0<- vA 6215 .if 0 6216 cmp r1, #0 @ is second operand zero? 6217 beq common_errDivideByZero 6218 .endif 6219 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6220 6221 @ optional op; may set condition codes 6222 bl fmodf @ r0<- op, r0-r3 changed 6223 GET_INST_OPCODE(ip) @ extract opcode from rINST 6224 SET_VREG(r0, r9) @ vAA<- r0 6225 GOTO_OPCODE(ip) @ jump to next instruction 6226 /* 10-13 instructions */ 6227 6228 6229/* ------------------------------ */ 6230 .balign 64 6231.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6232/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6233/* File: arm-vfp/fbinopWide2addr.S */ 6234 /* 6235 * Generic 64-bit floating point "/2addr" binary operation. Provide 6236 * an "instr" line that specifies an instruction that performs 6237 * "d2 = d0 op d1". 6238 * 6239 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6240 * div-double/2addr 6241 */ 6242 /* binop/2addr vA, vB */ 6243 mov r3, rINST, lsr #12 @ r3<- B 6244 mov r9, rINST, lsr #8 @ r9<- A+ 6245 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6246 and r9, r9, #15 @ r9<- A 6247 fldd d1, [r3] @ d1<- vB 6248 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6249 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6250 fldd d0, [r9] @ d0<- vA 6251 6252 faddd d2, d0, d1 @ d2<- op 6253 GET_INST_OPCODE(ip) @ extract opcode from rINST 6254 fstd d2, [r9] @ vAA<- d2 6255 GOTO_OPCODE(ip) @ jump to next instruction 6256 6257 6258/* ------------------------------ */ 6259 .balign 64 6260.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6261/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6262/* File: arm-vfp/fbinopWide2addr.S */ 6263 /* 6264 * Generic 64-bit floating point "/2addr" binary operation. Provide 6265 * an "instr" line that specifies an instruction that performs 6266 * "d2 = d0 op d1". 6267 * 6268 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6269 * div-double/2addr 6270 */ 6271 /* binop/2addr vA, vB */ 6272 mov r3, rINST, lsr #12 @ r3<- B 6273 mov r9, rINST, lsr #8 @ r9<- A+ 6274 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6275 and r9, r9, #15 @ r9<- A 6276 fldd d1, [r3] @ d1<- vB 6277 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6278 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6279 fldd d0, [r9] @ d0<- vA 6280 6281 fsubd d2, d0, d1 @ d2<- op 6282 GET_INST_OPCODE(ip) @ extract opcode from rINST 6283 fstd d2, [r9] @ vAA<- d2 6284 GOTO_OPCODE(ip) @ jump to next instruction 6285 6286 6287/* ------------------------------ */ 6288 .balign 64 6289.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6290/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6291/* File: arm-vfp/fbinopWide2addr.S */ 6292 /* 6293 * Generic 64-bit floating point "/2addr" binary operation. Provide 6294 * an "instr" line that specifies an instruction that performs 6295 * "d2 = d0 op d1". 6296 * 6297 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6298 * div-double/2addr 6299 */ 6300 /* binop/2addr vA, vB */ 6301 mov r3, rINST, lsr #12 @ r3<- B 6302 mov r9, rINST, lsr #8 @ r9<- A+ 6303 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6304 and r9, r9, #15 @ r9<- A 6305 fldd d1, [r3] @ d1<- vB 6306 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6307 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6308 fldd d0, [r9] @ d0<- vA 6309 6310 fmuld d2, d0, d1 @ d2<- op 6311 GET_INST_OPCODE(ip) @ extract opcode from rINST 6312 fstd d2, [r9] @ vAA<- d2 6313 GOTO_OPCODE(ip) @ jump to next instruction 6314 6315 6316/* ------------------------------ */ 6317 .balign 64 6318.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6319/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6320/* File: arm-vfp/fbinopWide2addr.S */ 6321 /* 6322 * Generic 64-bit floating point "/2addr" binary operation. Provide 6323 * an "instr" line that specifies an instruction that performs 6324 * "d2 = d0 op d1". 6325 * 6326 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6327 * div-double/2addr 6328 */ 6329 /* binop/2addr vA, vB */ 6330 mov r3, rINST, lsr #12 @ r3<- B 6331 mov r9, rINST, lsr #8 @ r9<- A+ 6332 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6333 and r9, r9, #15 @ r9<- A 6334 fldd d1, [r3] @ d1<- vB 6335 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6336 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6337 fldd d0, [r9] @ d0<- vA 6338 6339 fdivd d2, d0, d1 @ d2<- op 6340 GET_INST_OPCODE(ip) @ extract opcode from rINST 6341 fstd d2, [r9] @ vAA<- d2 6342 GOTO_OPCODE(ip) @ jump to next instruction 6343 6344 6345/* ------------------------------ */ 6346 .balign 64 6347.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6348/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6349/* EABI doesn't define a double remainder function, but libm does */ 6350/* File: armv5te/binopWide2addr.S */ 6351 /* 6352 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6353 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6354 * This could be an ARM instruction or a function call. (If the result 6355 * comes back in a register other than r0, you can override "result".) 6356 * 6357 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6358 * vCC (r1). Useful for integer division and modulus. 6359 * 6360 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6361 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6362 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6363 * rem-double/2addr 6364 */ 6365 /* binop/2addr vA, vB */ 6366 mov r9, rINST, lsr #8 @ r9<- A+ 6367 mov r1, rINST, lsr #12 @ r1<- B 6368 and r9, r9, #15 6369 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6370 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6371 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6372 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6373 .if 0 6374 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6375 beq common_errDivideByZero 6376 .endif 6377 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6378 6379 @ optional op; may set condition codes 6380 bl fmod @ result<- op, r0-r3 changed 6381 GET_INST_OPCODE(ip) @ extract opcode from rINST 6382 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6383 GOTO_OPCODE(ip) @ jump to next instruction 6384 /* 12-15 instructions */ 6385 6386 6387/* ------------------------------ */ 6388 .balign 64 6389.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6390/* File: armv5te/OP_ADD_INT_LIT16.S */ 6391/* File: armv5te/binopLit16.S */ 6392 /* 6393 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6394 * that specifies an instruction that performs "result = r0 op r1". 6395 * This could be an ARM instruction or a function call. (If the result 6396 * comes back in a register other than r0, you can override "result".) 6397 * 6398 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6399 * vCC (r1). Useful for integer division and modulus. 6400 * 6401 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6402 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6403 */ 6404 /* binop/lit16 vA, vB, #+CCCC */ 6405 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6406 mov r2, rINST, lsr #12 @ r2<- B 6407 mov r9, rINST, lsr #8 @ r9<- A+ 6408 GET_VREG(r0, r2) @ r0<- vB 6409 and r9, r9, #15 6410 .if 0 6411 cmp r1, #0 @ is second operand zero? 6412 beq common_errDivideByZero 6413 .endif 6414 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6415 6416 add r0, r0, r1 @ r0<- op, r0-r3 changed 6417 GET_INST_OPCODE(ip) @ extract opcode from rINST 6418 SET_VREG(r0, r9) @ vAA<- r0 6419 GOTO_OPCODE(ip) @ jump to next instruction 6420 /* 10-13 instructions */ 6421 6422 6423/* ------------------------------ */ 6424 .balign 64 6425.L_OP_RSUB_INT: /* 0xd1 */ 6426/* File: armv5te/OP_RSUB_INT.S */ 6427/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6428/* File: armv5te/binopLit16.S */ 6429 /* 6430 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6431 * that specifies an instruction that performs "result = r0 op r1". 6432 * This could be an ARM instruction or a function call. (If the result 6433 * comes back in a register other than r0, you can override "result".) 6434 * 6435 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6436 * vCC (r1). Useful for integer division and modulus. 6437 * 6438 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6439 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6440 */ 6441 /* binop/lit16 vA, vB, #+CCCC */ 6442 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6443 mov r2, rINST, lsr #12 @ r2<- B 6444 mov r9, rINST, lsr #8 @ r9<- A+ 6445 GET_VREG(r0, r2) @ r0<- vB 6446 and r9, r9, #15 6447 .if 0 6448 cmp r1, #0 @ is second operand zero? 6449 beq common_errDivideByZero 6450 .endif 6451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6452 6453 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6454 GET_INST_OPCODE(ip) @ extract opcode from rINST 6455 SET_VREG(r0, r9) @ vAA<- r0 6456 GOTO_OPCODE(ip) @ jump to next instruction 6457 /* 10-13 instructions */ 6458 6459 6460/* ------------------------------ */ 6461 .balign 64 6462.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6463/* File: armv5te/OP_MUL_INT_LIT16.S */ 6464/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6465/* File: armv5te/binopLit16.S */ 6466 /* 6467 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6468 * that specifies an instruction that performs "result = r0 op r1". 6469 * This could be an ARM instruction or a function call. (If the result 6470 * comes back in a register other than r0, you can override "result".) 6471 * 6472 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6473 * vCC (r1). Useful for integer division and modulus. 6474 * 6475 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6476 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6477 */ 6478 /* binop/lit16 vA, vB, #+CCCC */ 6479 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6480 mov r2, rINST, lsr #12 @ r2<- B 6481 mov r9, rINST, lsr #8 @ r9<- A+ 6482 GET_VREG(r0, r2) @ r0<- vB 6483 and r9, r9, #15 6484 .if 0 6485 cmp r1, #0 @ is second operand zero? 6486 beq common_errDivideByZero 6487 .endif 6488 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6489 6490 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6491 GET_INST_OPCODE(ip) @ extract opcode from rINST 6492 SET_VREG(r0, r9) @ vAA<- r0 6493 GOTO_OPCODE(ip) @ jump to next instruction 6494 /* 10-13 instructions */ 6495 6496 6497/* ------------------------------ */ 6498 .balign 64 6499.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6500/* File: armv5te/OP_DIV_INT_LIT16.S */ 6501/* File: armv5te/binopLit16.S */ 6502 /* 6503 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6504 * that specifies an instruction that performs "result = r0 op r1". 6505 * This could be an ARM instruction or a function call. (If the result 6506 * comes back in a register other than r0, you can override "result".) 6507 * 6508 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6509 * vCC (r1). Useful for integer division and modulus. 6510 * 6511 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6512 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6513 */ 6514 /* binop/lit16 vA, vB, #+CCCC */ 6515 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6516 mov r2, rINST, lsr #12 @ r2<- B 6517 mov r9, rINST, lsr #8 @ r9<- A+ 6518 GET_VREG(r0, r2) @ r0<- vB 6519 and r9, r9, #15 6520 .if 1 6521 cmp r1, #0 @ is second operand zero? 6522 beq common_errDivideByZero 6523 .endif 6524 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6525 6526 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6527 GET_INST_OPCODE(ip) @ extract opcode from rINST 6528 SET_VREG(r0, r9) @ vAA<- r0 6529 GOTO_OPCODE(ip) @ jump to next instruction 6530 /* 10-13 instructions */ 6531 6532 6533/* ------------------------------ */ 6534 .balign 64 6535.L_OP_REM_INT_LIT16: /* 0xd4 */ 6536/* File: armv5te/OP_REM_INT_LIT16.S */ 6537/* idivmod returns quotient in r0 and remainder in r1 */ 6538/* File: armv5te/binopLit16.S */ 6539 /* 6540 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6541 * that specifies an instruction that performs "result = r0 op r1". 6542 * This could be an ARM instruction or a function call. (If the result 6543 * comes back in a register other than r0, you can override "result".) 6544 * 6545 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6546 * vCC (r1). Useful for integer division and modulus. 6547 * 6548 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6549 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6550 */ 6551 /* binop/lit16 vA, vB, #+CCCC */ 6552 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6553 mov r2, rINST, lsr #12 @ r2<- B 6554 mov r9, rINST, lsr #8 @ r9<- A+ 6555 GET_VREG(r0, r2) @ r0<- vB 6556 and r9, r9, #15 6557 .if 1 6558 cmp r1, #0 @ is second operand zero? 6559 beq common_errDivideByZero 6560 .endif 6561 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6562 6563 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6564 GET_INST_OPCODE(ip) @ extract opcode from rINST 6565 SET_VREG(r1, r9) @ vAA<- r1 6566 GOTO_OPCODE(ip) @ jump to next instruction 6567 /* 10-13 instructions */ 6568 6569 6570/* ------------------------------ */ 6571 .balign 64 6572.L_OP_AND_INT_LIT16: /* 0xd5 */ 6573/* File: armv5te/OP_AND_INT_LIT16.S */ 6574/* File: armv5te/binopLit16.S */ 6575 /* 6576 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6577 * that specifies an instruction that performs "result = r0 op r1". 6578 * This could be an ARM instruction or a function call. (If the result 6579 * comes back in a register other than r0, you can override "result".) 6580 * 6581 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6582 * vCC (r1). Useful for integer division and modulus. 6583 * 6584 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6585 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6586 */ 6587 /* binop/lit16 vA, vB, #+CCCC */ 6588 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6589 mov r2, rINST, lsr #12 @ r2<- B 6590 mov r9, rINST, lsr #8 @ r9<- A+ 6591 GET_VREG(r0, r2) @ r0<- vB 6592 and r9, r9, #15 6593 .if 0 6594 cmp r1, #0 @ is second operand zero? 6595 beq common_errDivideByZero 6596 .endif 6597 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6598 6599 and r0, r0, r1 @ r0<- op, r0-r3 changed 6600 GET_INST_OPCODE(ip) @ extract opcode from rINST 6601 SET_VREG(r0, r9) @ vAA<- r0 6602 GOTO_OPCODE(ip) @ jump to next instruction 6603 /* 10-13 instructions */ 6604 6605 6606/* ------------------------------ */ 6607 .balign 64 6608.L_OP_OR_INT_LIT16: /* 0xd6 */ 6609/* File: armv5te/OP_OR_INT_LIT16.S */ 6610/* File: armv5te/binopLit16.S */ 6611 /* 6612 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6613 * that specifies an instruction that performs "result = r0 op r1". 6614 * This could be an ARM instruction or a function call. (If the result 6615 * comes back in a register other than r0, you can override "result".) 6616 * 6617 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6618 * vCC (r1). Useful for integer division and modulus. 6619 * 6620 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6621 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6622 */ 6623 /* binop/lit16 vA, vB, #+CCCC */ 6624 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6625 mov r2, rINST, lsr #12 @ r2<- B 6626 mov r9, rINST, lsr #8 @ r9<- A+ 6627 GET_VREG(r0, r2) @ r0<- vB 6628 and r9, r9, #15 6629 .if 0 6630 cmp r1, #0 @ is second operand zero? 6631 beq common_errDivideByZero 6632 .endif 6633 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6634 6635 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6636 GET_INST_OPCODE(ip) @ extract opcode from rINST 6637 SET_VREG(r0, r9) @ vAA<- r0 6638 GOTO_OPCODE(ip) @ jump to next instruction 6639 /* 10-13 instructions */ 6640 6641 6642/* ------------------------------ */ 6643 .balign 64 6644.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6645/* File: armv5te/OP_XOR_INT_LIT16.S */ 6646/* File: armv5te/binopLit16.S */ 6647 /* 6648 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6649 * that specifies an instruction that performs "result = r0 op r1". 6650 * This could be an ARM instruction or a function call. (If the result 6651 * comes back in a register other than r0, you can override "result".) 6652 * 6653 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6654 * vCC (r1). Useful for integer division and modulus. 6655 * 6656 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6657 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6658 */ 6659 /* binop/lit16 vA, vB, #+CCCC */ 6660 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6661 mov r2, rINST, lsr #12 @ r2<- B 6662 mov r9, rINST, lsr #8 @ r9<- A+ 6663 GET_VREG(r0, r2) @ r0<- vB 6664 and r9, r9, #15 6665 .if 0 6666 cmp r1, #0 @ is second operand zero? 6667 beq common_errDivideByZero 6668 .endif 6669 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6670 6671 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6672 GET_INST_OPCODE(ip) @ extract opcode from rINST 6673 SET_VREG(r0, r9) @ vAA<- r0 6674 GOTO_OPCODE(ip) @ jump to next instruction 6675 /* 10-13 instructions */ 6676 6677 6678/* ------------------------------ */ 6679 .balign 64 6680.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6681/* File: armv5te/OP_ADD_INT_LIT8.S */ 6682/* File: armv5te/binopLit8.S */ 6683 /* 6684 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6685 * that specifies an instruction that performs "result = r0 op r1". 6686 * This could be an ARM instruction or a function call. (If the result 6687 * comes back in a register other than r0, you can override "result".) 6688 * 6689 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6690 * vCC (r1). Useful for integer division and modulus. 6691 * 6692 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6693 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6694 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6695 */ 6696 /* binop/lit8 vAA, vBB, #+CC */ 6697 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6698 mov r9, rINST, lsr #8 @ r9<- AA 6699 and r2, r3, #255 @ r2<- BB 6700 GET_VREG(r0, r2) @ r0<- vBB 6701 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6702 .if 0 6703 @cmp r1, #0 @ is second operand zero? 6704 beq common_errDivideByZero 6705 .endif 6706 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6707 6708 @ optional op; may set condition codes 6709 add r0, r0, r1 @ r0<- op, r0-r3 changed 6710 GET_INST_OPCODE(ip) @ extract opcode from rINST 6711 SET_VREG(r0, r9) @ vAA<- r0 6712 GOTO_OPCODE(ip) @ jump to next instruction 6713 /* 10-12 instructions */ 6714 6715 6716/* ------------------------------ */ 6717 .balign 64 6718.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6719/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6720/* File: armv5te/binopLit8.S */ 6721 /* 6722 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6723 * that specifies an instruction that performs "result = r0 op r1". 6724 * This could be an ARM instruction or a function call. (If the result 6725 * comes back in a register other than r0, you can override "result".) 6726 * 6727 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6728 * vCC (r1). Useful for integer division and modulus. 6729 * 6730 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6731 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6732 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6733 */ 6734 /* binop/lit8 vAA, vBB, #+CC */ 6735 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6736 mov r9, rINST, lsr #8 @ r9<- AA 6737 and r2, r3, #255 @ r2<- BB 6738 GET_VREG(r0, r2) @ r0<- vBB 6739 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6740 .if 0 6741 @cmp r1, #0 @ is second operand zero? 6742 beq common_errDivideByZero 6743 .endif 6744 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6745 6746 @ optional op; may set condition codes 6747 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6748 GET_INST_OPCODE(ip) @ extract opcode from rINST 6749 SET_VREG(r0, r9) @ vAA<- r0 6750 GOTO_OPCODE(ip) @ jump to next instruction 6751 /* 10-12 instructions */ 6752 6753 6754/* ------------------------------ */ 6755 .balign 64 6756.L_OP_MUL_INT_LIT8: /* 0xda */ 6757/* File: armv5te/OP_MUL_INT_LIT8.S */ 6758/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6759/* File: armv5te/binopLit8.S */ 6760 /* 6761 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6762 * that specifies an instruction that performs "result = r0 op r1". 6763 * This could be an ARM instruction or a function call. (If the result 6764 * comes back in a register other than r0, you can override "result".) 6765 * 6766 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6767 * vCC (r1). Useful for integer division and modulus. 6768 * 6769 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6770 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6771 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6772 */ 6773 /* binop/lit8 vAA, vBB, #+CC */ 6774 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6775 mov r9, rINST, lsr #8 @ r9<- AA 6776 and r2, r3, #255 @ r2<- BB 6777 GET_VREG(r0, r2) @ r0<- vBB 6778 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6779 .if 0 6780 @cmp r1, #0 @ is second operand zero? 6781 beq common_errDivideByZero 6782 .endif 6783 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6784 6785 @ optional op; may set condition codes 6786 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6787 GET_INST_OPCODE(ip) @ extract opcode from rINST 6788 SET_VREG(r0, r9) @ vAA<- r0 6789 GOTO_OPCODE(ip) @ jump to next instruction 6790 /* 10-12 instructions */ 6791 6792 6793/* ------------------------------ */ 6794 .balign 64 6795.L_OP_DIV_INT_LIT8: /* 0xdb */ 6796/* File: armv5te/OP_DIV_INT_LIT8.S */ 6797/* File: armv5te/binopLit8.S */ 6798 /* 6799 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6800 * that specifies an instruction that performs "result = r0 op r1". 6801 * This could be an ARM instruction or a function call. (If the result 6802 * comes back in a register other than r0, you can override "result".) 6803 * 6804 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6805 * vCC (r1). Useful for integer division and modulus. 6806 * 6807 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6808 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6809 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6810 */ 6811 /* binop/lit8 vAA, vBB, #+CC */ 6812 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6813 mov r9, rINST, lsr #8 @ r9<- AA 6814 and r2, r3, #255 @ r2<- BB 6815 GET_VREG(r0, r2) @ r0<- vBB 6816 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6817 .if 1 6818 @cmp r1, #0 @ is second operand zero? 6819 beq common_errDivideByZero 6820 .endif 6821 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6822 6823 @ optional op; may set condition codes 6824 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6825 GET_INST_OPCODE(ip) @ extract opcode from rINST 6826 SET_VREG(r0, r9) @ vAA<- r0 6827 GOTO_OPCODE(ip) @ jump to next instruction 6828 /* 10-12 instructions */ 6829 6830 6831/* ------------------------------ */ 6832 .balign 64 6833.L_OP_REM_INT_LIT8: /* 0xdc */ 6834/* File: armv5te/OP_REM_INT_LIT8.S */ 6835/* idivmod returns quotient in r0 and remainder in r1 */ 6836/* File: armv5te/binopLit8.S */ 6837 /* 6838 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6839 * that specifies an instruction that performs "result = r0 op r1". 6840 * This could be an ARM instruction or a function call. (If the result 6841 * comes back in a register other than r0, you can override "result".) 6842 * 6843 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6844 * vCC (r1). Useful for integer division and modulus. 6845 * 6846 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6847 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6848 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6849 */ 6850 /* binop/lit8 vAA, vBB, #+CC */ 6851 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6852 mov r9, rINST, lsr #8 @ r9<- AA 6853 and r2, r3, #255 @ r2<- BB 6854 GET_VREG(r0, r2) @ r0<- vBB 6855 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6856 .if 1 6857 @cmp r1, #0 @ is second operand zero? 6858 beq common_errDivideByZero 6859 .endif 6860 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6861 6862 @ optional op; may set condition codes 6863 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6864 GET_INST_OPCODE(ip) @ extract opcode from rINST 6865 SET_VREG(r1, r9) @ vAA<- r1 6866 GOTO_OPCODE(ip) @ jump to next instruction 6867 /* 10-12 instructions */ 6868 6869 6870/* ------------------------------ */ 6871 .balign 64 6872.L_OP_AND_INT_LIT8: /* 0xdd */ 6873/* File: armv5te/OP_AND_INT_LIT8.S */ 6874/* File: armv5te/binopLit8.S */ 6875 /* 6876 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6877 * that specifies an instruction that performs "result = r0 op r1". 6878 * This could be an ARM instruction or a function call. (If the result 6879 * comes back in a register other than r0, you can override "result".) 6880 * 6881 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6882 * vCC (r1). Useful for integer division and modulus. 6883 * 6884 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6885 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6886 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6887 */ 6888 /* binop/lit8 vAA, vBB, #+CC */ 6889 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6890 mov r9, rINST, lsr #8 @ r9<- AA 6891 and r2, r3, #255 @ r2<- BB 6892 GET_VREG(r0, r2) @ r0<- vBB 6893 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6894 .if 0 6895 @cmp r1, #0 @ is second operand zero? 6896 beq common_errDivideByZero 6897 .endif 6898 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6899 6900 @ optional op; may set condition codes 6901 and r0, r0, r1 @ r0<- op, r0-r3 changed 6902 GET_INST_OPCODE(ip) @ extract opcode from rINST 6903 SET_VREG(r0, r9) @ vAA<- r0 6904 GOTO_OPCODE(ip) @ jump to next instruction 6905 /* 10-12 instructions */ 6906 6907 6908/* ------------------------------ */ 6909 .balign 64 6910.L_OP_OR_INT_LIT8: /* 0xde */ 6911/* File: armv5te/OP_OR_INT_LIT8.S */ 6912/* File: armv5te/binopLit8.S */ 6913 /* 6914 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6915 * that specifies an instruction that performs "result = r0 op r1". 6916 * This could be an ARM instruction or a function call. (If the result 6917 * comes back in a register other than r0, you can override "result".) 6918 * 6919 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6920 * vCC (r1). Useful for integer division and modulus. 6921 * 6922 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6923 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6924 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6925 */ 6926 /* binop/lit8 vAA, vBB, #+CC */ 6927 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6928 mov r9, rINST, lsr #8 @ r9<- AA 6929 and r2, r3, #255 @ r2<- BB 6930 GET_VREG(r0, r2) @ r0<- vBB 6931 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6932 .if 0 6933 @cmp r1, #0 @ is second operand zero? 6934 beq common_errDivideByZero 6935 .endif 6936 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6937 6938 @ optional op; may set condition codes 6939 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6940 GET_INST_OPCODE(ip) @ extract opcode from rINST 6941 SET_VREG(r0, r9) @ vAA<- r0 6942 GOTO_OPCODE(ip) @ jump to next instruction 6943 /* 10-12 instructions */ 6944 6945 6946/* ------------------------------ */ 6947 .balign 64 6948.L_OP_XOR_INT_LIT8: /* 0xdf */ 6949/* File: armv5te/OP_XOR_INT_LIT8.S */ 6950/* File: armv5te/binopLit8.S */ 6951 /* 6952 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6953 * that specifies an instruction that performs "result = r0 op r1". 6954 * This could be an ARM instruction or a function call. (If the result 6955 * comes back in a register other than r0, you can override "result".) 6956 * 6957 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6958 * vCC (r1). Useful for integer division and modulus. 6959 * 6960 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6961 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6962 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6963 */ 6964 /* binop/lit8 vAA, vBB, #+CC */ 6965 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6966 mov r9, rINST, lsr #8 @ r9<- AA 6967 and r2, r3, #255 @ r2<- BB 6968 GET_VREG(r0, r2) @ r0<- vBB 6969 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6970 .if 0 6971 @cmp r1, #0 @ is second operand zero? 6972 beq common_errDivideByZero 6973 .endif 6974 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6975 6976 @ optional op; may set condition codes 6977 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6978 GET_INST_OPCODE(ip) @ extract opcode from rINST 6979 SET_VREG(r0, r9) @ vAA<- r0 6980 GOTO_OPCODE(ip) @ jump to next instruction 6981 /* 10-12 instructions */ 6982 6983 6984/* ------------------------------ */ 6985 .balign 64 6986.L_OP_SHL_INT_LIT8: /* 0xe0 */ 6987/* File: armv5te/OP_SHL_INT_LIT8.S */ 6988/* File: armv5te/binopLit8.S */ 6989 /* 6990 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6991 * that specifies an instruction that performs "result = r0 op r1". 6992 * This could be an ARM instruction or a function call. (If the result 6993 * comes back in a register other than r0, you can override "result".) 6994 * 6995 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6996 * vCC (r1). Useful for integer division and modulus. 6997 * 6998 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6999 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7000 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7001 */ 7002 /* binop/lit8 vAA, vBB, #+CC */ 7003 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7004 mov r9, rINST, lsr #8 @ r9<- AA 7005 and r2, r3, #255 @ r2<- BB 7006 GET_VREG(r0, r2) @ r0<- vBB 7007 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7008 .if 0 7009 @cmp r1, #0 @ is second operand zero? 7010 beq common_errDivideByZero 7011 .endif 7012 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7013 7014 and r1, r1, #31 @ optional op; may set condition codes 7015 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7016 GET_INST_OPCODE(ip) @ extract opcode from rINST 7017 SET_VREG(r0, r9) @ vAA<- r0 7018 GOTO_OPCODE(ip) @ jump to next instruction 7019 /* 10-12 instructions */ 7020 7021 7022/* ------------------------------ */ 7023 .balign 64 7024.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7025/* File: armv5te/OP_SHR_INT_LIT8.S */ 7026/* File: armv5te/binopLit8.S */ 7027 /* 7028 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7029 * that specifies an instruction that performs "result = r0 op r1". 7030 * This could be an ARM instruction or a function call. (If the result 7031 * comes back in a register other than r0, you can override "result".) 7032 * 7033 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7034 * vCC (r1). Useful for integer division and modulus. 7035 * 7036 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7037 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7038 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7039 */ 7040 /* binop/lit8 vAA, vBB, #+CC */ 7041 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7042 mov r9, rINST, lsr #8 @ r9<- AA 7043 and r2, r3, #255 @ r2<- BB 7044 GET_VREG(r0, r2) @ r0<- vBB 7045 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7046 .if 0 7047 @cmp r1, #0 @ is second operand zero? 7048 beq common_errDivideByZero 7049 .endif 7050 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7051 7052 and r1, r1, #31 @ optional op; may set condition codes 7053 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7054 GET_INST_OPCODE(ip) @ extract opcode from rINST 7055 SET_VREG(r0, r9) @ vAA<- r0 7056 GOTO_OPCODE(ip) @ jump to next instruction 7057 /* 10-12 instructions */ 7058 7059 7060/* ------------------------------ */ 7061 .balign 64 7062.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7063/* File: armv5te/OP_USHR_INT_LIT8.S */ 7064/* File: armv5te/binopLit8.S */ 7065 /* 7066 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7067 * that specifies an instruction that performs "result = r0 op r1". 7068 * This could be an ARM instruction or a function call. (If the result 7069 * comes back in a register other than r0, you can override "result".) 7070 * 7071 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7072 * vCC (r1). Useful for integer division and modulus. 7073 * 7074 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7075 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7076 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7077 */ 7078 /* binop/lit8 vAA, vBB, #+CC */ 7079 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7080 mov r9, rINST, lsr #8 @ r9<- AA 7081 and r2, r3, #255 @ r2<- BB 7082 GET_VREG(r0, r2) @ r0<- vBB 7083 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7084 .if 0 7085 @cmp r1, #0 @ is second operand zero? 7086 beq common_errDivideByZero 7087 .endif 7088 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7089 7090 and r1, r1, #31 @ optional op; may set condition codes 7091 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7092 GET_INST_OPCODE(ip) @ extract opcode from rINST 7093 SET_VREG(r0, r9) @ vAA<- r0 7094 GOTO_OPCODE(ip) @ jump to next instruction 7095 /* 10-12 instructions */ 7096 7097 7098/* ------------------------------ */ 7099 .balign 64 7100.L_OP_IGET_VOLATILE: /* 0xe3 */ 7101/* File: armv5te/OP_IGET_VOLATILE.S */ 7102/* File: armv5te/OP_IGET.S */ 7103 /* 7104 * General 32-bit instance field get. 7105 * 7106 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7107 */ 7108 /* op vA, vB, field@CCCC */ 7109 mov r0, rINST, lsr #12 @ r0<- B 7110 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7111 FETCH(r1, 1) @ r1<- field ref CCCC 7112 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7113 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7114 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7115 cmp r0, #0 @ is resolved entry null? 7116 bne .LOP_IGET_VOLATILE_finish @ no, already resolved 71178: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7118 EXPORT_PC() @ resolve() could throw 7119 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7120 bl dvmResolveInstField @ r0<- resolved InstField ptr 7121 cmp r0, #0 7122 bne .LOP_IGET_VOLATILE_finish 7123 b common_exceptionThrown 7124 7125 7126/* ------------------------------ */ 7127 .balign 64 7128.L_OP_IPUT_VOLATILE: /* 0xe4 */ 7129/* File: armv5te/OP_IPUT_VOLATILE.S */ 7130/* File: armv5te/OP_IPUT.S */ 7131 /* 7132 * General 32-bit instance field put. 7133 * 7134 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 7135 */ 7136 /* op vA, vB, field@CCCC */ 7137 mov r0, rINST, lsr #12 @ r0<- B 7138 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7139 FETCH(r1, 1) @ r1<- field ref CCCC 7140 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7141 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7142 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7143 cmp r0, #0 @ is resolved entry null? 7144 bne .LOP_IPUT_VOLATILE_finish @ no, already resolved 71458: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7146 EXPORT_PC() @ resolve() could throw 7147 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7148 bl dvmResolveInstField @ r0<- resolved InstField ptr 7149 cmp r0, #0 @ success? 7150 bne .LOP_IPUT_VOLATILE_finish @ yes, finish up 7151 b common_exceptionThrown 7152 7153 7154/* ------------------------------ */ 7155 .balign 64 7156.L_OP_SGET_VOLATILE: /* 0xe5 */ 7157/* File: armv5te/OP_SGET_VOLATILE.S */ 7158/* File: armv5te/OP_SGET.S */ 7159 /* 7160 * General 32-bit SGET handler. 7161 * 7162 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7163 */ 7164 /* op vAA, field@BBBB */ 7165 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7166 FETCH(r1, 1) @ r1<- field ref BBBB 7167 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7168 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7169 cmp r0, #0 @ is resolved entry null? 7170 beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve 7171.LOP_SGET_VOLATILE_finish: @ field ptr in r0 7172 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7173 SMP_DMB @ acquiring load 7174 mov r2, rINST, lsr #8 @ r2<- AA 7175 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7176 SET_VREG(r1, r2) @ fp[AA]<- r1 7177 GET_INST_OPCODE(ip) @ extract opcode from rINST 7178 GOTO_OPCODE(ip) @ jump to next instruction 7179 7180 7181/* ------------------------------ */ 7182 .balign 64 7183.L_OP_SPUT_VOLATILE: /* 0xe6 */ 7184/* File: armv5te/OP_SPUT_VOLATILE.S */ 7185/* File: armv5te/OP_SPUT.S */ 7186 /* 7187 * General 32-bit SPUT handler. 7188 * 7189 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 7190 */ 7191 /* op vAA, field@BBBB */ 7192 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7193 FETCH(r1, 1) @ r1<- field ref BBBB 7194 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7195 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7196 cmp r0, #0 @ is resolved entry null? 7197 beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve 7198.LOP_SPUT_VOLATILE_finish: @ field ptr in r0 7199 mov r2, rINST, lsr #8 @ r2<- AA 7200 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7201 GET_VREG(r1, r2) @ r1<- fp[AA] 7202 GET_INST_OPCODE(ip) @ extract opcode from rINST 7203 SMP_DMB @ releasing store 7204 str r1, [r0, #offStaticField_value] @ field<- vAA 7205 GOTO_OPCODE(ip) @ jump to next instruction 7206 7207 7208/* ------------------------------ */ 7209 .balign 64 7210.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */ 7211/* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */ 7212/* File: armv5te/OP_IGET.S */ 7213 /* 7214 * General 32-bit instance field get. 7215 * 7216 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7217 */ 7218 /* op vA, vB, field@CCCC */ 7219 mov r0, rINST, lsr #12 @ r0<- B 7220 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7221 FETCH(r1, 1) @ r1<- field ref CCCC 7222 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7223 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7224 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7225 cmp r0, #0 @ is resolved entry null? 7226 bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved 72278: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7228 EXPORT_PC() @ resolve() could throw 7229 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7230 bl dvmResolveInstField @ r0<- resolved InstField ptr 7231 cmp r0, #0 7232 bne .LOP_IGET_OBJECT_VOLATILE_finish 7233 b common_exceptionThrown 7234 7235 7236/* ------------------------------ */ 7237 .balign 64 7238.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7239/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7240/* File: armv5te/OP_IGET_WIDE.S */ 7241 /* 7242 * Wide 32-bit instance field get. 7243 */ 7244 /* iget-wide vA, vB, field@CCCC */ 7245 mov r0, rINST, lsr #12 @ r0<- B 7246 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7247 FETCH(r1, 1) @ r1<- field ref CCCC 7248 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7249 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7250 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7251 cmp r0, #0 @ is resolved entry null? 7252 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 72538: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7254 EXPORT_PC() @ resolve() could throw 7255 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7256 bl dvmResolveInstField @ r0<- resolved InstField ptr 7257 cmp r0, #0 7258 bne .LOP_IGET_WIDE_VOLATILE_finish 7259 b common_exceptionThrown 7260 7261 7262/* ------------------------------ */ 7263 .balign 64 7264.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7265/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7266/* File: armv5te/OP_IPUT_WIDE.S */ 7267 /* iput-wide vA, vB, field@CCCC */ 7268 mov r0, rINST, lsr #12 @ r0<- B 7269 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7270 FETCH(r1, 1) @ r1<- field ref CCCC 7271 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7272 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7273 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7274 cmp r0, #0 @ is resolved entry null? 7275 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 72768: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7277 EXPORT_PC() @ resolve() could throw 7278 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7279 bl dvmResolveInstField @ r0<- resolved InstField ptr 7280 cmp r0, #0 @ success? 7281 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7282 b common_exceptionThrown 7283 7284 7285/* ------------------------------ */ 7286 .balign 64 7287.L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7288/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7289/* File: armv5te/OP_SGET_WIDE.S */ 7290 /* 7291 * 64-bit SGET handler. 7292 */ 7293 /* sget-wide vAA, field@BBBB */ 7294 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7295 FETCH(r1, 1) @ r1<- field ref BBBB 7296 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7297 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7298 cmp r0, #0 @ is resolved entry null? 7299 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7300.LOP_SGET_WIDE_VOLATILE_finish: 7301 mov r9, rINST, lsr #8 @ r9<- AA 7302 .if 1 7303 add r0, r0, #offStaticField_value @ r0<- pointer to data 7304 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 7305 .else 7306 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7307 .endif 7308 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7309 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7310 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7311 GET_INST_OPCODE(ip) @ extract opcode from rINST 7312 GOTO_OPCODE(ip) @ jump to next instruction 7313 7314 7315/* ------------------------------ */ 7316 .balign 64 7317.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7318/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7319/* File: armv5te/OP_SPUT_WIDE.S */ 7320 /* 7321 * 64-bit SPUT handler. 7322 */ 7323 /* sput-wide vAA, field@BBBB */ 7324 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7325 FETCH(r1, 1) @ r1<- field ref BBBB 7326 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7327 mov r9, rINST, lsr #8 @ r9<- AA 7328 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7329 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7330 cmp r2, #0 @ is resolved entry null? 7331 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7332.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7333 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7334 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7335 GET_INST_OPCODE(r10) @ extract opcode from rINST 7336 .if 1 7337 add r2, r2, #offStaticField_value @ r2<- pointer to data 7338 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 7339 .else 7340 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7341 .endif 7342 GOTO_OPCODE(r10) @ jump to next instruction 7343 7344 7345/* ------------------------------ */ 7346 .balign 64 7347.L_OP_BREAKPOINT: /* 0xec */ 7348/* File: armv5te/OP_BREAKPOINT.S */ 7349/* File: armv5te/unused.S */ 7350 bl common_abort 7351 7352 7353/* ------------------------------ */ 7354 .balign 64 7355.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7356/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7357 /* 7358 * Handle a throw-verification-error instruction. This throws an 7359 * exception for an error discovered during verification. The 7360 * exception is indicated by AA, with some detail provided by BBBB. 7361 */ 7362 /* op AA, ref@BBBB */ 7363 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7364 FETCH(r2, 1) @ r2<- BBBB 7365 EXPORT_PC() @ export the PC 7366 mov r1, rINST, lsr #8 @ r1<- AA 7367 bl dvmThrowVerificationError @ always throws 7368 b common_exceptionThrown @ handle exception 7369 7370/* ------------------------------ */ 7371 .balign 64 7372.L_OP_EXECUTE_INLINE: /* 0xee */ 7373/* File: armv5te/OP_EXECUTE_INLINE.S */ 7374 /* 7375 * Execute a "native inline" instruction. 7376 * 7377 * We need to call an InlineOp4Func: 7378 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7379 * 7380 * The first four args are in r0-r3, pointer to return value storage 7381 * is on the stack. The function's return value is a flag that tells 7382 * us if an exception was thrown. 7383 */ 7384 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7385 FETCH(r10, 1) @ r10<- BBBB 7386 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7387 EXPORT_PC() @ can throw 7388 sub sp, sp, #8 @ make room for arg, +64 bit align 7389 mov r0, rINST, lsr #12 @ r0<- B 7390 str r1, [sp] @ push &glue->retval 7391 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7392 add sp, sp, #8 @ pop stack 7393 cmp r0, #0 @ test boolean result of inline 7394 beq common_exceptionThrown @ returned false, handle exception 7395 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7396 GET_INST_OPCODE(ip) @ extract opcode from rINST 7397 GOTO_OPCODE(ip) @ jump to next instruction 7398 7399/* ------------------------------ */ 7400 .balign 64 7401.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7402/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7403 /* 7404 * Execute a "native inline" instruction, using "/range" semantics. 7405 * Same idea as execute-inline, but we get the args differently. 7406 * 7407 * We need to call an InlineOp4Func: 7408 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7409 * 7410 * The first four args are in r0-r3, pointer to return value storage 7411 * is on the stack. The function's return value is a flag that tells 7412 * us if an exception was thrown. 7413 */ 7414 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7415 FETCH(r10, 1) @ r10<- BBBB 7416 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7417 EXPORT_PC() @ can throw 7418 sub sp, sp, #8 @ make room for arg, +64 bit align 7419 mov r0, rINST, lsr #8 @ r0<- AA 7420 str r1, [sp] @ push &glue->retval 7421 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7422 add sp, sp, #8 @ pop stack 7423 cmp r0, #0 @ test boolean result of inline 7424 beq common_exceptionThrown @ returned false, handle exception 7425 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7426 GET_INST_OPCODE(ip) @ extract opcode from rINST 7427 GOTO_OPCODE(ip) @ jump to next instruction 7428 7429/* ------------------------------ */ 7430 .balign 64 7431.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7432/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7433 /* 7434 * invoke-direct-empty is a no-op in a "standard" interpreter. 7435 */ 7436 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7437 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7438 GOTO_OPCODE(ip) @ execute it 7439 7440/* ------------------------------ */ 7441 .balign 64 7442.L_OP_UNUSED_F1: /* 0xf1 */ 7443/* File: armv5te/OP_UNUSED_F1.S */ 7444/* File: armv5te/unused.S */ 7445 bl common_abort 7446 7447 7448/* ------------------------------ */ 7449 .balign 64 7450.L_OP_IGET_QUICK: /* 0xf2 */ 7451/* File: armv5te/OP_IGET_QUICK.S */ 7452 /* For: iget-quick, iget-object-quick */ 7453 /* op vA, vB, offset@CCCC */ 7454 mov r2, rINST, lsr #12 @ r2<- B 7455 GET_VREG(r3, r2) @ r3<- object we're operating on 7456 FETCH(r1, 1) @ r1<- field byte offset 7457 cmp r3, #0 @ check object for null 7458 mov r2, rINST, lsr #8 @ r2<- A(+) 7459 beq common_errNullObject @ object was null 7460 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7461 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7462 and r2, r2, #15 7463 GET_INST_OPCODE(ip) @ extract opcode from rINST 7464 SET_VREG(r0, r2) @ fp[A]<- r0 7465 GOTO_OPCODE(ip) @ jump to next instruction 7466 7467/* ------------------------------ */ 7468 .balign 64 7469.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7470/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7471 /* iget-wide-quick vA, vB, offset@CCCC */ 7472 mov r2, rINST, lsr #12 @ r2<- B 7473 GET_VREG(r3, r2) @ r3<- object we're operating on 7474 FETCH(ip, 1) @ ip<- field byte offset 7475 cmp r3, #0 @ check object for null 7476 mov r2, rINST, lsr #8 @ r2<- A(+) 7477 beq common_errNullObject @ object was null 7478 ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) 7479 and r2, r2, #15 7480 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7481 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7482 GET_INST_OPCODE(ip) @ extract opcode from rINST 7483 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7484 GOTO_OPCODE(ip) @ jump to next instruction 7485 7486/* ------------------------------ */ 7487 .balign 64 7488.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7489/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7490/* File: armv5te/OP_IGET_QUICK.S */ 7491 /* For: iget-quick, iget-object-quick */ 7492 /* op vA, vB, offset@CCCC */ 7493 mov r2, rINST, lsr #12 @ r2<- B 7494 GET_VREG(r3, r2) @ r3<- object we're operating on 7495 FETCH(r1, 1) @ r1<- field byte offset 7496 cmp r3, #0 @ check object for null 7497 mov r2, rINST, lsr #8 @ r2<- A(+) 7498 beq common_errNullObject @ object was null 7499 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7500 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7501 and r2, r2, #15 7502 GET_INST_OPCODE(ip) @ extract opcode from rINST 7503 SET_VREG(r0, r2) @ fp[A]<- r0 7504 GOTO_OPCODE(ip) @ jump to next instruction 7505 7506 7507/* ------------------------------ */ 7508 .balign 64 7509.L_OP_IPUT_QUICK: /* 0xf5 */ 7510/* File: armv5te/OP_IPUT_QUICK.S */ 7511 /* For: iput-quick */ 7512 /* op vA, vB, offset@CCCC */ 7513 mov r2, rINST, lsr #12 @ r2<- B 7514 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7515 FETCH(r1, 1) @ r1<- field byte offset 7516 cmp r3, #0 @ check object for null 7517 mov r2, rINST, lsr #8 @ r2<- A(+) 7518 beq common_errNullObject @ object was null 7519 and r2, r2, #15 7520 GET_VREG(r0, r2) @ r0<- fp[A] 7521 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7522 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7523 GET_INST_OPCODE(ip) @ extract opcode from rINST 7524 GOTO_OPCODE(ip) @ jump to next instruction 7525 7526/* ------------------------------ */ 7527 .balign 64 7528.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7529/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7530 /* iput-wide-quick vA, vB, offset@CCCC */ 7531 mov r0, rINST, lsr #8 @ r0<- A(+) 7532 mov r1, rINST, lsr #12 @ r1<- B 7533 and r0, r0, #15 7534 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7535 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7536 cmp r2, #0 @ check object for null 7537 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7538 beq common_errNullObject @ object was null 7539 FETCH(r3, 1) @ r3<- field byte offset 7540 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7541 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7542 GET_INST_OPCODE(ip) @ extract opcode from rINST 7543 GOTO_OPCODE(ip) @ jump to next instruction 7544 7545/* ------------------------------ */ 7546 .balign 64 7547.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7548/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7549 /* For: iput-object-quick */ 7550 /* op vA, vB, offset@CCCC */ 7551 mov r2, rINST, lsr #12 @ r2<- B 7552 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7553 FETCH(r1, 1) @ r1<- field byte offset 7554 cmp r3, #0 @ check object for null 7555 mov r2, rINST, lsr #8 @ r2<- A(+) 7556 beq common_errNullObject @ object was null 7557 and r2, r2, #15 7558 GET_VREG(r0, r2) @ r0<- fp[A] 7559 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 7560 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7561 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7562 cmp r0, #0 7563 strneb r2, [r2, r3, lsr #GC_CARD_SHIFT] @ mark card on non-null store 7564 GET_INST_OPCODE(ip) @ extract opcode from rINST 7565 GOTO_OPCODE(ip) @ jump to next instruction 7566 7567/* ------------------------------ */ 7568 .balign 64 7569.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7570/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7571 /* 7572 * Handle an optimized virtual method call. 7573 * 7574 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7575 */ 7576 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7577 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7578 FETCH(r3, 2) @ r3<- FEDC or CCCC 7579 FETCH(r1, 1) @ r1<- BBBB 7580 .if (!0) 7581 and r3, r3, #15 @ r3<- C (or stays CCCC) 7582 .endif 7583 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7584 cmp r2, #0 @ is "this" null? 7585 beq common_errNullObject @ null "this", throw exception 7586 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7587 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7588 EXPORT_PC() @ invoke must export 7589 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7590 bl common_invokeMethodNoRange @ continue on 7591 7592/* ------------------------------ */ 7593 .balign 64 7594.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7595/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7596/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7597 /* 7598 * Handle an optimized virtual method call. 7599 * 7600 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7601 */ 7602 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7603 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7604 FETCH(r3, 2) @ r3<- FEDC or CCCC 7605 FETCH(r1, 1) @ r1<- BBBB 7606 .if (!1) 7607 and r3, r3, #15 @ r3<- C (or stays CCCC) 7608 .endif 7609 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7610 cmp r2, #0 @ is "this" null? 7611 beq common_errNullObject @ null "this", throw exception 7612 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7613 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7614 EXPORT_PC() @ invoke must export 7615 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7616 bl common_invokeMethodRange @ continue on 7617 7618 7619/* ------------------------------ */ 7620 .balign 64 7621.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7622/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7623 /* 7624 * Handle an optimized "super" method call. 7625 * 7626 * for: [opt] invoke-super-quick, invoke-super-quick/range 7627 */ 7628 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7629 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7630 FETCH(r10, 2) @ r10<- GFED or CCCC 7631 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7632 .if (!0) 7633 and r10, r10, #15 @ r10<- D (or stays CCCC) 7634 .endif 7635 FETCH(r1, 1) @ r1<- BBBB 7636 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7637 EXPORT_PC() @ must export for invoke 7638 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7639 GET_VREG(r3, r10) @ r3<- "this" 7640 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7641 cmp r3, #0 @ null "this" ref? 7642 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7643 beq common_errNullObject @ "this" is null, throw exception 7644 bl common_invokeMethodNoRange @ continue on 7645 7646/* ------------------------------ */ 7647 .balign 64 7648.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7649/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7650/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7651 /* 7652 * Handle an optimized "super" method call. 7653 * 7654 * for: [opt] invoke-super-quick, invoke-super-quick/range 7655 */ 7656 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7657 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7658 FETCH(r10, 2) @ r10<- GFED or CCCC 7659 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7660 .if (!1) 7661 and r10, r10, #15 @ r10<- D (or stays CCCC) 7662 .endif 7663 FETCH(r1, 1) @ r1<- BBBB 7664 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7665 EXPORT_PC() @ must export for invoke 7666 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7667 GET_VREG(r3, r10) @ r3<- "this" 7668 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7669 cmp r3, #0 @ null "this" ref? 7670 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7671 beq common_errNullObject @ "this" is null, throw exception 7672 bl common_invokeMethodRange @ continue on 7673 7674 7675/* ------------------------------ */ 7676 .balign 64 7677.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */ 7678/* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */ 7679/* File: armv5te/OP_IPUT_OBJECT.S */ 7680 /* 7681 * 32-bit instance field put. 7682 * 7683 * for: iput-object, iput-object-volatile 7684 */ 7685 /* op vA, vB, field@CCCC */ 7686 mov r0, rINST, lsr #12 @ r0<- B 7687 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7688 FETCH(r1, 1) @ r1<- field ref CCCC 7689 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7690 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7691 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7692 cmp r0, #0 @ is resolved entry null? 7693 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved 76948: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7695 EXPORT_PC() @ resolve() could throw 7696 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7697 bl dvmResolveInstField @ r0<- resolved InstField ptr 7698 cmp r0, #0 @ success? 7699 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up 7700 b common_exceptionThrown 7701 7702 7703/* ------------------------------ */ 7704 .balign 64 7705.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */ 7706/* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */ 7707/* File: armv5te/OP_SGET.S */ 7708 /* 7709 * General 32-bit SGET handler. 7710 * 7711 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7712 */ 7713 /* op vAA, field@BBBB */ 7714 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7715 FETCH(r1, 1) @ r1<- field ref BBBB 7716 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7717 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7718 cmp r0, #0 @ is resolved entry null? 7719 beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve 7720.LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0 7721 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7722 SMP_DMB @ acquiring load 7723 mov r2, rINST, lsr #8 @ r2<- AA 7724 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7725 SET_VREG(r1, r2) @ fp[AA]<- r1 7726 GET_INST_OPCODE(ip) @ extract opcode from rINST 7727 GOTO_OPCODE(ip) @ jump to next instruction 7728 7729 7730/* ------------------------------ */ 7731 .balign 64 7732.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */ 7733/* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */ 7734/* File: armv5te/OP_SPUT_OBJECT.S */ 7735 /* 7736 * 32-bit SPUT handler for objects 7737 * 7738 * for: sput-object, sput-object-volatile 7739 */ 7740 /* op vAA, field@BBBB */ 7741 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7742 FETCH(r1, 1) @ r1<- field ref BBBB 7743 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7744 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7745 cmp r0, #0 @ is resolved entry null? 7746 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ no, continue 7747 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7748 EXPORT_PC() @ resolve() could throw, so export now 7749 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7750 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 7751 cmp r0, #0 @ success? 7752 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ yes, finish 7753 b common_exceptionThrown @ no, handle exception 7754 7755 7756 7757/* ------------------------------ */ 7758 .balign 64 7759.L_OP_UNUSED_FF: /* 0xff */ 7760/* File: armv5te/OP_UNUSED_FF.S */ 7761/* File: armv5te/unused.S */ 7762 bl common_abort 7763 7764 7765 7766 .balign 64 7767 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7768 .global dvmAsmInstructionEnd 7769dvmAsmInstructionEnd: 7770 7771/* 7772 * =========================================================================== 7773 * Sister implementations 7774 * =========================================================================== 7775 */ 7776 .global dvmAsmSisterStart 7777 .type dvmAsmSisterStart, %function 7778 .text 7779 .balign 4 7780dvmAsmSisterStart: 7781 7782/* continuation for OP_CONST_STRING */ 7783 7784 /* 7785 * Continuation if the String has not yet been resolved. 7786 * r1: BBBB (String ref) 7787 * r9: target register 7788 */ 7789.LOP_CONST_STRING_resolve: 7790 EXPORT_PC() 7791 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7792 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7793 bl dvmResolveString @ r0<- String reference 7794 cmp r0, #0 @ failed? 7795 beq common_exceptionThrown @ yup, handle the exception 7796 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7797 GET_INST_OPCODE(ip) @ extract opcode from rINST 7798 SET_VREG(r0, r9) @ vAA<- r0 7799 GOTO_OPCODE(ip) @ jump to next instruction 7800 7801/* continuation for OP_CONST_STRING_JUMBO */ 7802 7803 /* 7804 * Continuation if the String has not yet been resolved. 7805 * r1: BBBBBBBB (String ref) 7806 * r9: target register 7807 */ 7808.LOP_CONST_STRING_JUMBO_resolve: 7809 EXPORT_PC() 7810 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7811 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7812 bl dvmResolveString @ r0<- String reference 7813 cmp r0, #0 @ failed? 7814 beq common_exceptionThrown @ yup, handle the exception 7815 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7816 GET_INST_OPCODE(ip) @ extract opcode from rINST 7817 SET_VREG(r0, r9) @ vAA<- r0 7818 GOTO_OPCODE(ip) @ jump to next instruction 7819 7820/* continuation for OP_CONST_CLASS */ 7821 7822 /* 7823 * Continuation if the Class has not yet been resolved. 7824 * r1: BBBB (Class ref) 7825 * r9: target register 7826 */ 7827.LOP_CONST_CLASS_resolve: 7828 EXPORT_PC() 7829 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7830 mov r2, #1 @ r2<- true 7831 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7832 bl dvmResolveClass @ r0<- Class reference 7833 cmp r0, #0 @ failed? 7834 beq common_exceptionThrown @ yup, handle the exception 7835 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7836 GET_INST_OPCODE(ip) @ extract opcode from rINST 7837 SET_VREG(r0, r9) @ vAA<- r0 7838 GOTO_OPCODE(ip) @ jump to next instruction 7839 7840/* continuation for OP_CHECK_CAST */ 7841 7842 /* 7843 * Trivial test failed, need to perform full check. This is common. 7844 * r0 holds obj->clazz 7845 * r1 holds class resolved from BBBB 7846 * r9 holds object 7847 */ 7848.LOP_CHECK_CAST_fullcheck: 7849 bl dvmInstanceofNonTrivial @ r0<- boolean result 7850 cmp r0, #0 @ failed? 7851 bne .LOP_CHECK_CAST_okay @ no, success 7852 7853 @ A cast has failed. We need to throw a ClassCastException with the 7854 @ class of the object that failed to be cast. 7855 EXPORT_PC() @ about to throw 7856 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7857 ldr r0, .LstrClassCastExceptionPtr 7858 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7859 bl dvmThrowExceptionWithClassMessage 7860 b common_exceptionThrown 7861 7862 /* 7863 * Resolution required. This is the least-likely path. 7864 * 7865 * r2 holds BBBB 7866 * r9 holds object 7867 */ 7868.LOP_CHECK_CAST_resolve: 7869 EXPORT_PC() @ resolve() could throw 7870 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7871 mov r1, r2 @ r1<- BBBB 7872 mov r2, #0 @ r2<- false 7873 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7874 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7875 cmp r0, #0 @ got null? 7876 beq common_exceptionThrown @ yes, handle exception 7877 mov r1, r0 @ r1<- class resolved from BBB 7878 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7879 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7880 7881.LstrClassCastExceptionPtr: 7882 .word .LstrClassCastException 7883 7884/* continuation for OP_INSTANCE_OF */ 7885 7886 /* 7887 * Trivial test failed, need to perform full check. This is common. 7888 * r0 holds obj->clazz 7889 * r1 holds class resolved from BBBB 7890 * r9 holds A 7891 */ 7892.LOP_INSTANCE_OF_fullcheck: 7893 bl dvmInstanceofNonTrivial @ r0<- boolean result 7894 @ fall through to OP_INSTANCE_OF_store 7895 7896 /* 7897 * r0 holds boolean result 7898 * r9 holds A 7899 */ 7900.LOP_INSTANCE_OF_store: 7901 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7902 SET_VREG(r0, r9) @ vA<- r0 7903 GET_INST_OPCODE(ip) @ extract opcode from rINST 7904 GOTO_OPCODE(ip) @ jump to next instruction 7905 7906 /* 7907 * Trivial test succeeded, save and bail. 7908 * r9 holds A 7909 */ 7910.LOP_INSTANCE_OF_trivial: 7911 mov r0, #1 @ indicate success 7912 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7913 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7914 SET_VREG(r0, r9) @ vA<- r0 7915 GET_INST_OPCODE(ip) @ extract opcode from rINST 7916 GOTO_OPCODE(ip) @ jump to next instruction 7917 7918 /* 7919 * Resolution required. This is the least-likely path. 7920 * 7921 * r3 holds BBBB 7922 * r9 holds A 7923 */ 7924.LOP_INSTANCE_OF_resolve: 7925 EXPORT_PC() @ resolve() could throw 7926 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7927 mov r1, r3 @ r1<- BBBB 7928 mov r2, #1 @ r2<- true 7929 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7930 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7931 cmp r0, #0 @ got null? 7932 beq common_exceptionThrown @ yes, handle exception 7933 mov r1, r0 @ r1<- class resolved from BBB 7934 mov r3, rINST, lsr #12 @ r3<- B 7935 GET_VREG(r0, r3) @ r0<- vB (object) 7936 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7937 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7938 7939/* continuation for OP_NEW_INSTANCE */ 7940 7941 .balign 32 @ minimize cache lines 7942.LOP_NEW_INSTANCE_finish: @ r0=new object 7943 mov r3, rINST, lsr #8 @ r3<- AA 7944 cmp r0, #0 @ failed? 7945 beq common_exceptionThrown @ yes, handle the exception 7946 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7947 GET_INST_OPCODE(ip) @ extract opcode from rINST 7948 SET_VREG(r0, r3) @ vAA<- r0 7949 GOTO_OPCODE(ip) @ jump to next instruction 7950 7951 /* 7952 * Class initialization required. 7953 * 7954 * r0 holds class object 7955 */ 7956.LOP_NEW_INSTANCE_needinit: 7957 mov r9, r0 @ save r0 7958 bl dvmInitClass @ initialize class 7959 cmp r0, #0 @ check boolean result 7960 mov r0, r9 @ restore r0 7961 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7962 b common_exceptionThrown @ failed, deal with init exception 7963 7964 /* 7965 * Resolution required. This is the least-likely path. 7966 * 7967 * r1 holds BBBB 7968 */ 7969.LOP_NEW_INSTANCE_resolve: 7970 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7971 mov r2, #0 @ r2<- false 7972 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7973 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7974 cmp r0, #0 @ got null? 7975 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7976 b common_exceptionThrown @ yes, handle exception 7977 7978.LstrInstantiationErrorPtr: 7979 .word .LstrInstantiationError 7980 7981/* continuation for OP_NEW_ARRAY */ 7982 7983 7984 /* 7985 * Resolve class. (This is an uncommon case.) 7986 * 7987 * r1 holds array length 7988 * r2 holds class ref CCCC 7989 */ 7990.LOP_NEW_ARRAY_resolve: 7991 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7992 mov r9, r1 @ r9<- length (save) 7993 mov r1, r2 @ r1<- CCCC 7994 mov r2, #0 @ r2<- false 7995 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7996 bl dvmResolveClass @ r0<- call(clazz, ref) 7997 cmp r0, #0 @ got null? 7998 mov r1, r9 @ r1<- length (restore) 7999 beq common_exceptionThrown @ yes, handle exception 8000 @ fall through to OP_NEW_ARRAY_finish 8001 8002 /* 8003 * Finish allocation. 8004 * 8005 * r0 holds class 8006 * r1 holds array length 8007 */ 8008.LOP_NEW_ARRAY_finish: 8009 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8010 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8011 cmp r0, #0 @ failed? 8012 mov r2, rINST, lsr #8 @ r2<- A+ 8013 beq common_exceptionThrown @ yes, handle the exception 8014 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8015 and r2, r2, #15 @ r2<- A 8016 GET_INST_OPCODE(ip) @ extract opcode from rINST 8017 SET_VREG(r0, r2) @ vA<- r0 8018 GOTO_OPCODE(ip) @ jump to next instruction 8019 8020/* continuation for OP_FILLED_NEW_ARRAY */ 8021 8022 /* 8023 * On entry: 8024 * r0 holds array class 8025 * r10 holds AA or BA 8026 */ 8027.LOP_FILLED_NEW_ARRAY_continue: 8028 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8029 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8030 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8031 .if 0 8032 mov r1, r10 @ r1<- AA (length) 8033 .else 8034 mov r1, r10, lsr #4 @ r1<- B (length) 8035 .endif 8036 cmp rINST, #'I' @ array of ints? 8037 cmpne rINST, #'L' @ array of objects? 8038 cmpne rINST, #'[' @ array of arrays? 8039 mov r9, r1 @ save length in r9 8040 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8041 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8042 cmp r0, #0 @ null return? 8043 beq common_exceptionThrown @ alloc failed, handle exception 8044 8045 FETCH(r1, 2) @ r1<- FEDC or CCCC 8046 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8047 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8048 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8049 subs r9, r9, #1 @ length--, check for neg 8050 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8051 bmi 2f @ was zero, bail 8052 8053 @ copy values from registers into the array 8054 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8055 .if 0 8056 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 80571: ldr r3, [r2], #4 @ r3<- *r2++ 8058 subs r9, r9, #1 @ count-- 8059 str r3, [r0], #4 @ *contents++ = vX 8060 bpl 1b 8061 @ continue at 2 8062 .else 8063 cmp r9, #4 @ length was initially 5? 8064 and r2, r10, #15 @ r2<- A 8065 bne 1f @ <= 4 args, branch 8066 GET_VREG(r3, r2) @ r3<- vA 8067 sub r9, r9, #1 @ count-- 8068 str r3, [r0, #16] @ contents[4] = vA 80691: and r2, r1, #15 @ r2<- F/E/D/C 8070 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8071 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8072 subs r9, r9, #1 @ count-- 8073 str r3, [r0], #4 @ *contents++ = vX 8074 bpl 1b 8075 @ continue at 2 8076 .endif 8077 80782: 8079 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8080 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8081 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8082 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8083 cmp r1, #'I' @ Is int array? 8084 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card if not 8085 GOTO_OPCODE(ip) @ execute it 8086 8087 /* 8088 * Throw an exception indicating that we have not implemented this 8089 * mode of filled-new-array. 8090 */ 8091.LOP_FILLED_NEW_ARRAY_notimpl: 8092 ldr r0, .L_strInternalError 8093 ldr r1, .L_strFilledNewArrayNotImpl 8094 bl dvmThrowException 8095 b common_exceptionThrown 8096 8097 .if (!0) @ define in one or the other, not both 8098.L_strFilledNewArrayNotImpl: 8099 .word .LstrFilledNewArrayNotImpl 8100.L_strInternalError: 8101 .word .LstrInternalError 8102 .endif 8103 8104/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8105 8106 /* 8107 * On entry: 8108 * r0 holds array class 8109 * r10 holds AA or BA 8110 */ 8111.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8112 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8113 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8114 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8115 .if 1 8116 mov r1, r10 @ r1<- AA (length) 8117 .else 8118 mov r1, r10, lsr #4 @ r1<- B (length) 8119 .endif 8120 cmp rINST, #'I' @ array of ints? 8121 cmpne rINST, #'L' @ array of objects? 8122 cmpne rINST, #'[' @ array of arrays? 8123 mov r9, r1 @ save length in r9 8124 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8125 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8126 cmp r0, #0 @ null return? 8127 beq common_exceptionThrown @ alloc failed, handle exception 8128 8129 FETCH(r1, 2) @ r1<- FEDC or CCCC 8130 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8131 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8132 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8133 subs r9, r9, #1 @ length--, check for neg 8134 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8135 bmi 2f @ was zero, bail 8136 8137 @ copy values from registers into the array 8138 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8139 .if 1 8140 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 81411: ldr r3, [r2], #4 @ r3<- *r2++ 8142 subs r9, r9, #1 @ count-- 8143 str r3, [r0], #4 @ *contents++ = vX 8144 bpl 1b 8145 @ continue at 2 8146 .else 8147 cmp r9, #4 @ length was initially 5? 8148 and r2, r10, #15 @ r2<- A 8149 bne 1f @ <= 4 args, branch 8150 GET_VREG(r3, r2) @ r3<- vA 8151 sub r9, r9, #1 @ count-- 8152 str r3, [r0, #16] @ contents[4] = vA 81531: and r2, r1, #15 @ r2<- F/E/D/C 8154 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8155 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8156 subs r9, r9, #1 @ count-- 8157 str r3, [r0], #4 @ *contents++ = vX 8158 bpl 1b 8159 @ continue at 2 8160 .endif 8161 81622: 8163 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8164 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8165 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8166 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8167 cmp r1, #'I' @ Is int array? 8168 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card if not 8169 GOTO_OPCODE(ip) @ execute it 8170 8171 /* 8172 * Throw an exception indicating that we have not implemented this 8173 * mode of filled-new-array. 8174 */ 8175.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8176 ldr r0, .L_strInternalError 8177 ldr r1, .L_strFilledNewArrayNotImpl 8178 bl dvmThrowException 8179 b common_exceptionThrown 8180 8181 .if (!1) @ define in one or the other, not both 8182.L_strFilledNewArrayNotImpl: 8183 .word .LstrFilledNewArrayNotImpl 8184.L_strInternalError: 8185 .word .LstrInternalError 8186 .endif 8187 8188/* continuation for OP_CMPL_FLOAT */ 8189.LOP_CMPL_FLOAT_finish: 8190 SET_VREG(r0, r9) @ vAA<- r0 8191 GOTO_OPCODE(ip) @ jump to next instruction 8192 8193/* continuation for OP_CMPG_FLOAT */ 8194.LOP_CMPG_FLOAT_finish: 8195 SET_VREG(r0, r9) @ vAA<- r0 8196 GOTO_OPCODE(ip) @ jump to next instruction 8197 8198/* continuation for OP_CMPL_DOUBLE */ 8199.LOP_CMPL_DOUBLE_finish: 8200 SET_VREG(r0, r9) @ vAA<- r0 8201 GOTO_OPCODE(ip) @ jump to next instruction 8202 8203/* continuation for OP_CMPG_DOUBLE */ 8204.LOP_CMPG_DOUBLE_finish: 8205 SET_VREG(r0, r9) @ vAA<- r0 8206 GOTO_OPCODE(ip) @ jump to next instruction 8207 8208/* continuation for OP_CMP_LONG */ 8209 8210.LOP_CMP_LONG_less: 8211 mvn r1, #0 @ r1<- -1 8212 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8213 @ instead, we just replicate the tail end. 8214 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8215 SET_VREG(r1, r9) @ vAA<- r1 8216 GET_INST_OPCODE(ip) @ extract opcode from rINST 8217 GOTO_OPCODE(ip) @ jump to next instruction 8218 8219.LOP_CMP_LONG_greater: 8220 mov r1, #1 @ r1<- 1 8221 @ fall through to _finish 8222 8223.LOP_CMP_LONG_finish: 8224 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8225 SET_VREG(r1, r9) @ vAA<- r1 8226 GET_INST_OPCODE(ip) @ extract opcode from rINST 8227 GOTO_OPCODE(ip) @ jump to next instruction 8228 8229/* continuation for OP_AGET_WIDE */ 8230 8231.LOP_AGET_WIDE_finish: 8232 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8233 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8234 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8235 GET_INST_OPCODE(ip) @ extract opcode from rINST 8236 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8237 GOTO_OPCODE(ip) @ jump to next instruction 8238 8239/* continuation for OP_APUT_WIDE */ 8240 8241.LOP_APUT_WIDE_finish: 8242 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8243 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8244 GET_INST_OPCODE(ip) @ extract opcode from rINST 8245 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8246 GOTO_OPCODE(ip) @ jump to next instruction 8247 8248/* continuation for OP_APUT_OBJECT */ 8249 /* 8250 * On entry: 8251 * r1 = vBB (arrayObj) 8252 * r9 = vAA (obj) 8253 * r10 = offset into array (vBB + vCC * width) 8254 */ 8255.LOP_APUT_OBJECT_finish: 8256 cmp r9, #0 @ storing null reference? 8257 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8258 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8259 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8260 bl dvmCanPutArrayElement @ test object type vs. array type 8261 cmp r0, #0 @ okay? 8262 beq common_errArrayStore @ no 8263 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8264 ldr r2, [rGLUE, #offGlue_cardTable] @ get biased CT base 8265 add r10, #offArrayObject_contents @ r0<- pointer to slot 8266 GET_INST_OPCODE(ip) @ extract opcode from rINST 8267 str r9, [r10] @ vBB[vCC]<- vAA 8268 strb r2, [r2, r10, lsr #GC_CARD_SHIFT] @ mark card 8269 GOTO_OPCODE(ip) @ jump to next instruction 8270.LOP_APUT_OBJECT_skip_check: 8271 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8272 GET_INST_OPCODE(ip) @ extract opcode from rINST 8273 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8274 GOTO_OPCODE(ip) @ jump to next instruction 8275 8276/* continuation for OP_IGET */ 8277 8278 /* 8279 * Currently: 8280 * r0 holds resolved field 8281 * r9 holds object 8282 */ 8283.LOP_IGET_finish: 8284 @bl common_squeak0 8285 cmp r9, #0 @ check object for null 8286 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8287 beq common_errNullObject @ object was null 8288 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8289 @ no-op @ acquiring load 8290 mov r2, rINST, lsr #8 @ r2<- A+ 8291 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8292 and r2, r2, #15 @ r2<- A 8293 GET_INST_OPCODE(ip) @ extract opcode from rINST 8294 SET_VREG(r0, r2) @ fp[A]<- r0 8295 GOTO_OPCODE(ip) @ jump to next instruction 8296 8297/* continuation for OP_IGET_WIDE */ 8298 8299 /* 8300 * Currently: 8301 * r0 holds resolved field 8302 * r9 holds object 8303 */ 8304.LOP_IGET_WIDE_finish: 8305 cmp r9, #0 @ check object for null 8306 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8307 beq common_errNullObject @ object was null 8308 .if 0 8309 add r0, r9, r3 @ r0<- address of field 8310 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 8311 .else 8312 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8313 .endif 8314 mov r2, rINST, lsr #8 @ r2<- A+ 8315 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8316 and r2, r2, #15 @ r2<- A 8317 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8318 GET_INST_OPCODE(ip) @ extract opcode from rINST 8319 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8320 GOTO_OPCODE(ip) @ jump to next instruction 8321 8322/* continuation for OP_IGET_OBJECT */ 8323 8324 /* 8325 * Currently: 8326 * r0 holds resolved field 8327 * r9 holds object 8328 */ 8329.LOP_IGET_OBJECT_finish: 8330 @bl common_squeak0 8331 cmp r9, #0 @ check object for null 8332 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8333 beq common_errNullObject @ object was null 8334 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8335 @ no-op @ acquiring load 8336 mov r2, rINST, lsr #8 @ r2<- A+ 8337 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8338 and r2, r2, #15 @ r2<- A 8339 GET_INST_OPCODE(ip) @ extract opcode from rINST 8340 SET_VREG(r0, r2) @ fp[A]<- r0 8341 GOTO_OPCODE(ip) @ jump to next instruction 8342 8343/* continuation for OP_IGET_BOOLEAN */ 8344 8345 /* 8346 * Currently: 8347 * r0 holds resolved field 8348 * r9 holds object 8349 */ 8350.LOP_IGET_BOOLEAN_finish: 8351 @bl common_squeak1 8352 cmp r9, #0 @ check object for null 8353 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8354 beq common_errNullObject @ object was null 8355 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8356 @ no-op @ acquiring load 8357 mov r2, rINST, lsr #8 @ r2<- A+ 8358 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8359 and r2, r2, #15 @ r2<- A 8360 GET_INST_OPCODE(ip) @ extract opcode from rINST 8361 SET_VREG(r0, r2) @ fp[A]<- r0 8362 GOTO_OPCODE(ip) @ jump to next instruction 8363 8364/* continuation for OP_IGET_BYTE */ 8365 8366 /* 8367 * Currently: 8368 * r0 holds resolved field 8369 * r9 holds object 8370 */ 8371.LOP_IGET_BYTE_finish: 8372 @bl common_squeak2 8373 cmp r9, #0 @ check object for null 8374 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8375 beq common_errNullObject @ object was null 8376 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8377 @ no-op @ acquiring load 8378 mov r2, rINST, lsr #8 @ r2<- A+ 8379 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8380 and r2, r2, #15 @ r2<- A 8381 GET_INST_OPCODE(ip) @ extract opcode from rINST 8382 SET_VREG(r0, r2) @ fp[A]<- r0 8383 GOTO_OPCODE(ip) @ jump to next instruction 8384 8385/* continuation for OP_IGET_CHAR */ 8386 8387 /* 8388 * Currently: 8389 * r0 holds resolved field 8390 * r9 holds object 8391 */ 8392.LOP_IGET_CHAR_finish: 8393 @bl common_squeak3 8394 cmp r9, #0 @ check object for null 8395 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8396 beq common_errNullObject @ object was null 8397 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8398 @ no-op @ acquiring load 8399 mov r2, rINST, lsr #8 @ r2<- A+ 8400 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8401 and r2, r2, #15 @ r2<- A 8402 GET_INST_OPCODE(ip) @ extract opcode from rINST 8403 SET_VREG(r0, r2) @ fp[A]<- r0 8404 GOTO_OPCODE(ip) @ jump to next instruction 8405 8406/* continuation for OP_IGET_SHORT */ 8407 8408 /* 8409 * Currently: 8410 * r0 holds resolved field 8411 * r9 holds object 8412 */ 8413.LOP_IGET_SHORT_finish: 8414 @bl common_squeak4 8415 cmp r9, #0 @ check object for null 8416 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8417 beq common_errNullObject @ object was null 8418 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8419 @ no-op @ acquiring load 8420 mov r2, rINST, lsr #8 @ r2<- A+ 8421 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8422 and r2, r2, #15 @ r2<- A 8423 GET_INST_OPCODE(ip) @ extract opcode from rINST 8424 SET_VREG(r0, r2) @ fp[A]<- r0 8425 GOTO_OPCODE(ip) @ jump to next instruction 8426 8427/* continuation for OP_IPUT */ 8428 8429 /* 8430 * Currently: 8431 * r0 holds resolved field 8432 * r9 holds object 8433 */ 8434.LOP_IPUT_finish: 8435 @bl common_squeak0 8436 mov r1, rINST, lsr #8 @ r1<- A+ 8437 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8438 and r1, r1, #15 @ r1<- A 8439 cmp r9, #0 @ check object for null 8440 GET_VREG(r0, r1) @ r0<- fp[A] 8441 beq common_errNullObject @ object was null 8442 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8443 GET_INST_OPCODE(ip) @ extract opcode from rINST 8444 @ no-op @ releasing store 8445 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8446 GOTO_OPCODE(ip) @ jump to next instruction 8447 8448/* continuation for OP_IPUT_WIDE */ 8449 8450 /* 8451 * Currently: 8452 * r0 holds resolved field 8453 * r9 holds object 8454 */ 8455.LOP_IPUT_WIDE_finish: 8456 mov r2, rINST, lsr #8 @ r2<- A+ 8457 cmp r9, #0 @ check object for null 8458 and r2, r2, #15 @ r2<- A 8459 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8460 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8461 beq common_errNullObject @ object was null 8462 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8463 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8464 GET_INST_OPCODE(r10) @ extract opcode from rINST 8465 .if 0 8466 add r2, r9, r3 @ r2<- target address 8467 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 8468 .else 8469 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 8470 .endif 8471 GOTO_OPCODE(r10) @ jump to next instruction 8472 8473/* continuation for OP_IPUT_OBJECT */ 8474 8475 /* 8476 * Currently: 8477 * r0 holds resolved field 8478 * r9 holds object 8479 */ 8480.LOP_IPUT_OBJECT_finish: 8481 @bl common_squeak0 8482 mov r1, rINST, lsr #8 @ r1<- A+ 8483 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8484 and r1, r1, #15 @ r1<- A 8485 cmp r9, #0 @ check object for null 8486 GET_VREG(r0, r1) @ r0<- fp[A] 8487 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8488 beq common_errNullObject @ object was null 8489 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8490 add r9, r3 @ r9<- direct ptr to target location 8491 GET_INST_OPCODE(ip) @ extract opcode from rINST 8492 @ no-op @ releasing store 8493 str r0, [r9] @ obj.field (8/16/32 bits)<- r0 8494 cmp r0, #0 @ stored a null reference? 8495 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 8496 GOTO_OPCODE(ip) @ jump to next instruction 8497 8498/* continuation for OP_IPUT_BOOLEAN */ 8499 8500 /* 8501 * Currently: 8502 * r0 holds resolved field 8503 * r9 holds object 8504 */ 8505.LOP_IPUT_BOOLEAN_finish: 8506 @bl common_squeak1 8507 mov r1, rINST, lsr #8 @ r1<- A+ 8508 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8509 and r1, r1, #15 @ r1<- A 8510 cmp r9, #0 @ check object for null 8511 GET_VREG(r0, r1) @ r0<- fp[A] 8512 beq common_errNullObject @ object was null 8513 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8514 GET_INST_OPCODE(ip) @ extract opcode from rINST 8515 @ no-op @ releasing store 8516 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8517 GOTO_OPCODE(ip) @ jump to next instruction 8518 8519/* continuation for OP_IPUT_BYTE */ 8520 8521 /* 8522 * Currently: 8523 * r0 holds resolved field 8524 * r9 holds object 8525 */ 8526.LOP_IPUT_BYTE_finish: 8527 @bl common_squeak2 8528 mov r1, rINST, lsr #8 @ r1<- A+ 8529 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8530 and r1, r1, #15 @ r1<- A 8531 cmp r9, #0 @ check object for null 8532 GET_VREG(r0, r1) @ r0<- fp[A] 8533 beq common_errNullObject @ object was null 8534 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8535 GET_INST_OPCODE(ip) @ extract opcode from rINST 8536 @ no-op @ releasing store 8537 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8538 GOTO_OPCODE(ip) @ jump to next instruction 8539 8540/* continuation for OP_IPUT_CHAR */ 8541 8542 /* 8543 * Currently: 8544 * r0 holds resolved field 8545 * r9 holds object 8546 */ 8547.LOP_IPUT_CHAR_finish: 8548 @bl common_squeak3 8549 mov r1, rINST, lsr #8 @ r1<- A+ 8550 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8551 and r1, r1, #15 @ r1<- A 8552 cmp r9, #0 @ check object for null 8553 GET_VREG(r0, r1) @ r0<- fp[A] 8554 beq common_errNullObject @ object was null 8555 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8556 GET_INST_OPCODE(ip) @ extract opcode from rINST 8557 @ no-op @ releasing store 8558 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8559 GOTO_OPCODE(ip) @ jump to next instruction 8560 8561/* continuation for OP_IPUT_SHORT */ 8562 8563 /* 8564 * Currently: 8565 * r0 holds resolved field 8566 * r9 holds object 8567 */ 8568.LOP_IPUT_SHORT_finish: 8569 @bl common_squeak4 8570 mov r1, rINST, lsr #8 @ r1<- A+ 8571 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8572 and r1, r1, #15 @ r1<- A 8573 cmp r9, #0 @ check object for null 8574 GET_VREG(r0, r1) @ r0<- fp[A] 8575 beq common_errNullObject @ object was null 8576 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8577 GET_INST_OPCODE(ip) @ extract opcode from rINST 8578 @ no-op @ releasing store 8579 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8580 GOTO_OPCODE(ip) @ jump to next instruction 8581 8582/* continuation for OP_SGET */ 8583 8584 /* 8585 * Continuation if the field has not yet been resolved. 8586 * r1: BBBB field ref 8587 */ 8588.LOP_SGET_resolve: 8589 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8590 EXPORT_PC() @ resolve() could throw, so export now 8591 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8592 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8593 cmp r0, #0 @ success? 8594 bne .LOP_SGET_finish @ yes, finish 8595 b common_exceptionThrown @ no, handle exception 8596 8597/* continuation for OP_SGET_WIDE */ 8598 8599 /* 8600 * Continuation if the field has not yet been resolved. 8601 * r1: BBBB field ref 8602 * 8603 * Returns StaticField pointer in r0. 8604 */ 8605.LOP_SGET_WIDE_resolve: 8606 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8607 EXPORT_PC() @ resolve() could throw, so export now 8608 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8609 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8610 cmp r0, #0 @ success? 8611 bne .LOP_SGET_WIDE_finish @ yes, finish 8612 b common_exceptionThrown @ no, handle exception 8613 8614/* continuation for OP_SGET_OBJECT */ 8615 8616 /* 8617 * Continuation if the field has not yet been resolved. 8618 * r1: BBBB field ref 8619 */ 8620.LOP_SGET_OBJECT_resolve: 8621 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8622 EXPORT_PC() @ resolve() could throw, so export now 8623 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8624 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8625 cmp r0, #0 @ success? 8626 bne .LOP_SGET_OBJECT_finish @ yes, finish 8627 b common_exceptionThrown @ no, handle exception 8628 8629/* continuation for OP_SGET_BOOLEAN */ 8630 8631 /* 8632 * Continuation if the field has not yet been resolved. 8633 * r1: BBBB field ref 8634 */ 8635.LOP_SGET_BOOLEAN_resolve: 8636 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8637 EXPORT_PC() @ resolve() could throw, so export now 8638 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8639 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8640 cmp r0, #0 @ success? 8641 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8642 b common_exceptionThrown @ no, handle exception 8643 8644/* continuation for OP_SGET_BYTE */ 8645 8646 /* 8647 * Continuation if the field has not yet been resolved. 8648 * r1: BBBB field ref 8649 */ 8650.LOP_SGET_BYTE_resolve: 8651 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8652 EXPORT_PC() @ resolve() could throw, so export now 8653 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8654 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8655 cmp r0, #0 @ success? 8656 bne .LOP_SGET_BYTE_finish @ yes, finish 8657 b common_exceptionThrown @ no, handle exception 8658 8659/* continuation for OP_SGET_CHAR */ 8660 8661 /* 8662 * Continuation if the field has not yet been resolved. 8663 * r1: BBBB field ref 8664 */ 8665.LOP_SGET_CHAR_resolve: 8666 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8667 EXPORT_PC() @ resolve() could throw, so export now 8668 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8669 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8670 cmp r0, #0 @ success? 8671 bne .LOP_SGET_CHAR_finish @ yes, finish 8672 b common_exceptionThrown @ no, handle exception 8673 8674/* continuation for OP_SGET_SHORT */ 8675 8676 /* 8677 * Continuation if the field has not yet been resolved. 8678 * r1: BBBB field ref 8679 */ 8680.LOP_SGET_SHORT_resolve: 8681 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8682 EXPORT_PC() @ resolve() could throw, so export now 8683 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8684 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8685 cmp r0, #0 @ success? 8686 bne .LOP_SGET_SHORT_finish @ yes, finish 8687 b common_exceptionThrown @ no, handle exception 8688 8689/* continuation for OP_SPUT */ 8690 8691 /* 8692 * Continuation if the field has not yet been resolved. 8693 * r1: BBBB field ref 8694 */ 8695.LOP_SPUT_resolve: 8696 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8697 EXPORT_PC() @ resolve() could throw, so export now 8698 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8699 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8700 cmp r0, #0 @ success? 8701 bne .LOP_SPUT_finish @ yes, finish 8702 b common_exceptionThrown @ no, handle exception 8703 8704/* continuation for OP_SPUT_WIDE */ 8705 8706 /* 8707 * Continuation if the field has not yet been resolved. 8708 * r1: BBBB field ref 8709 * r9: &fp[AA] 8710 * 8711 * Returns StaticField pointer in r2. 8712 */ 8713.LOP_SPUT_WIDE_resolve: 8714 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8715 EXPORT_PC() @ resolve() could throw, so export now 8716 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8717 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8718 cmp r0, #0 @ success? 8719 mov r2, r0 @ copy to r2 8720 bne .LOP_SPUT_WIDE_finish @ yes, finish 8721 b common_exceptionThrown @ no, handle exception 8722 8723/* continuation for OP_SPUT_OBJECT */ 8724.LOP_SPUT_OBJECT_finish: @ field ptr in r0 8725 mov r2, rINST, lsr #8 @ r2<- AA 8726 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8727 GET_VREG(r1, r2) @ r1<- fp[AA] 8728 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8729 GET_INST_OPCODE(ip) @ extract opcode from rINST 8730 add r0, #offStaticField_value @ r0<- pointer to store target 8731 @ no-op @ releasing store 8732 str r1, [r0] @ field<- vAA 8733 cmp r1, #0 @ stored a null object? 8734 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ mark card if not 8735 GOTO_OPCODE(ip) @ jump to next instruction 8736 8737/* continuation for OP_SPUT_BOOLEAN */ 8738 8739 /* 8740 * Continuation if the field has not yet been resolved. 8741 * r1: BBBB field ref 8742 */ 8743.LOP_SPUT_BOOLEAN_resolve: 8744 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8745 EXPORT_PC() @ resolve() could throw, so export now 8746 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8747 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8748 cmp r0, #0 @ success? 8749 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8750 b common_exceptionThrown @ no, handle exception 8751 8752/* continuation for OP_SPUT_BYTE */ 8753 8754 /* 8755 * Continuation if the field has not yet been resolved. 8756 * r1: BBBB field ref 8757 */ 8758.LOP_SPUT_BYTE_resolve: 8759 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8760 EXPORT_PC() @ resolve() could throw, so export now 8761 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8762 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8763 cmp r0, #0 @ success? 8764 bne .LOP_SPUT_BYTE_finish @ yes, finish 8765 b common_exceptionThrown @ no, handle exception 8766 8767/* continuation for OP_SPUT_CHAR */ 8768 8769 /* 8770 * Continuation if the field has not yet been resolved. 8771 * r1: BBBB field ref 8772 */ 8773.LOP_SPUT_CHAR_resolve: 8774 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8775 EXPORT_PC() @ resolve() could throw, so export now 8776 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8777 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8778 cmp r0, #0 @ success? 8779 bne .LOP_SPUT_CHAR_finish @ yes, finish 8780 b common_exceptionThrown @ no, handle exception 8781 8782/* continuation for OP_SPUT_SHORT */ 8783 8784 /* 8785 * Continuation if the field has not yet been resolved. 8786 * r1: BBBB field ref 8787 */ 8788.LOP_SPUT_SHORT_resolve: 8789 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8790 EXPORT_PC() @ resolve() could throw, so export now 8791 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8792 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8793 cmp r0, #0 @ success? 8794 bne .LOP_SPUT_SHORT_finish @ yes, finish 8795 b common_exceptionThrown @ no, handle exception 8796 8797/* continuation for OP_INVOKE_VIRTUAL */ 8798 8799 /* 8800 * At this point: 8801 * r0 = resolved base method 8802 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8803 */ 8804.LOP_INVOKE_VIRTUAL_continue: 8805 GET_VREG(r1, r10) @ r1<- "this" ptr 8806 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8807 cmp r1, #0 @ is "this" null? 8808 beq common_errNullObject @ null "this", throw exception 8809 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8810 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8811 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8812 bl common_invokeMethodNoRange @ continue on 8813 8814/* continuation for OP_INVOKE_SUPER */ 8815 8816 /* 8817 * At this point: 8818 * r0 = resolved base method 8819 * r9 = method->clazz 8820 */ 8821.LOP_INVOKE_SUPER_continue: 8822 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8823 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8824 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8825 EXPORT_PC() @ must export for invoke 8826 cmp r2, r3 @ compare (methodIndex, vtableCount) 8827 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8828 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8829 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8830 bl common_invokeMethodNoRange @ continue on 8831 8832.LOP_INVOKE_SUPER_resolve: 8833 mov r0, r9 @ r0<- method->clazz 8834 mov r2, #METHOD_VIRTUAL @ resolver method type 8835 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8836 cmp r0, #0 @ got null? 8837 bne .LOP_INVOKE_SUPER_continue @ no, continue 8838 b common_exceptionThrown @ yes, handle exception 8839 8840 /* 8841 * Throw a NoSuchMethodError with the method name as the message. 8842 * r0 = resolved base method 8843 */ 8844.LOP_INVOKE_SUPER_nsm: 8845 ldr r1, [r0, #offMethod_name] @ r1<- method name 8846 b common_errNoSuchMethod 8847 8848/* continuation for OP_INVOKE_DIRECT */ 8849 8850 /* 8851 * On entry: 8852 * r1 = reference (BBBB or CCCC) 8853 * r10 = "this" register 8854 */ 8855.LOP_INVOKE_DIRECT_resolve: 8856 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8857 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8858 mov r2, #METHOD_DIRECT @ resolver method type 8859 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8860 cmp r0, #0 @ got null? 8861 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8862 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8863 b common_exceptionThrown @ yes, handle exception 8864 8865/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8866 8867 /* 8868 * At this point: 8869 * r0 = resolved base method 8870 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8871 */ 8872.LOP_INVOKE_VIRTUAL_RANGE_continue: 8873 GET_VREG(r1, r10) @ r1<- "this" ptr 8874 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8875 cmp r1, #0 @ is "this" null? 8876 beq common_errNullObject @ null "this", throw exception 8877 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8878 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8879 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8880 bl common_invokeMethodRange @ continue on 8881 8882/* continuation for OP_INVOKE_SUPER_RANGE */ 8883 8884 /* 8885 * At this point: 8886 * r0 = resolved base method 8887 * r9 = method->clazz 8888 */ 8889.LOP_INVOKE_SUPER_RANGE_continue: 8890 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8891 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8892 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8893 EXPORT_PC() @ must export for invoke 8894 cmp r2, r3 @ compare (methodIndex, vtableCount) 8895 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8896 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8897 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8898 bl common_invokeMethodRange @ continue on 8899 8900.LOP_INVOKE_SUPER_RANGE_resolve: 8901 mov r0, r9 @ r0<- method->clazz 8902 mov r2, #METHOD_VIRTUAL @ resolver method type 8903 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8904 cmp r0, #0 @ got null? 8905 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8906 b common_exceptionThrown @ yes, handle exception 8907 8908 /* 8909 * Throw a NoSuchMethodError with the method name as the message. 8910 * r0 = resolved base method 8911 */ 8912.LOP_INVOKE_SUPER_RANGE_nsm: 8913 ldr r1, [r0, #offMethod_name] @ r1<- method name 8914 b common_errNoSuchMethod 8915 8916/* continuation for OP_INVOKE_DIRECT_RANGE */ 8917 8918 /* 8919 * On entry: 8920 * r1 = reference (BBBB or CCCC) 8921 * r10 = "this" register 8922 */ 8923.LOP_INVOKE_DIRECT_RANGE_resolve: 8924 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8925 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8926 mov r2, #METHOD_DIRECT @ resolver method type 8927 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8928 cmp r0, #0 @ got null? 8929 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8930 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8931 b common_exceptionThrown @ yes, handle exception 8932 8933/* continuation for OP_FLOAT_TO_LONG */ 8934/* 8935 * Convert the float in r0 to a long in r0/r1. 8936 * 8937 * We have to clip values to long min/max per the specification. The 8938 * expected common case is a "reasonable" value that converts directly 8939 * to modest integer. The EABI convert function isn't doing this for us. 8940 */ 8941f2l_doconv: 8942 stmfd sp!, {r4, lr} 8943 mov r1, #0x5f000000 @ (float)maxlong 8944 mov r4, r0 8945 bl __aeabi_fcmpge @ is arg >= maxlong? 8946 cmp r0, #0 @ nonzero == yes 8947 mvnne r0, #0 @ return maxlong (7fffffff) 8948 mvnne r1, #0x80000000 8949 ldmnefd sp!, {r4, pc} 8950 8951 mov r0, r4 @ recover arg 8952 mov r1, #0xdf000000 @ (float)minlong 8953 bl __aeabi_fcmple @ is arg <= minlong? 8954 cmp r0, #0 @ nonzero == yes 8955 movne r0, #0 @ return minlong (80000000) 8956 movne r1, #0x80000000 8957 ldmnefd sp!, {r4, pc} 8958 8959 mov r0, r4 @ recover arg 8960 mov r1, r4 8961 bl __aeabi_fcmpeq @ is arg == self? 8962 cmp r0, #0 @ zero == no 8963 moveq r1, #0 @ return zero for NaN 8964 ldmeqfd sp!, {r4, pc} 8965 8966 mov r0, r4 @ recover arg 8967 bl __aeabi_f2lz @ convert float to long 8968 ldmfd sp!, {r4, pc} 8969 8970/* continuation for OP_DOUBLE_TO_LONG */ 8971/* 8972 * Convert the double in r0/r1 to a long in r0/r1. 8973 * 8974 * We have to clip values to long min/max per the specification. The 8975 * expected common case is a "reasonable" value that converts directly 8976 * to modest integer. The EABI convert function isn't doing this for us. 8977 */ 8978d2l_doconv: 8979 stmfd sp!, {r4, r5, lr} @ save regs 8980 mov r3, #0x43000000 @ maxlong, as a double (high word) 8981 add r3, #0x00e00000 @ 0x43e00000 8982 mov r2, #0 @ maxlong, as a double (low word) 8983 sub sp, sp, #4 @ align for EABI 8984 mov r4, r0 @ save a copy of r0 8985 mov r5, r1 @ and r1 8986 bl __aeabi_dcmpge @ is arg >= maxlong? 8987 cmp r0, #0 @ nonzero == yes 8988 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8989 mvnne r1, #0x80000000 8990 bne 1f 8991 8992 mov r0, r4 @ recover arg 8993 mov r1, r5 8994 mov r3, #0xc3000000 @ minlong, as a double (high word) 8995 add r3, #0x00e00000 @ 0xc3e00000 8996 mov r2, #0 @ minlong, as a double (low word) 8997 bl __aeabi_dcmple @ is arg <= minlong? 8998 cmp r0, #0 @ nonzero == yes 8999 movne r0, #0 @ return minlong (8000000000000000) 9000 movne r1, #0x80000000 9001 bne 1f 9002 9003 mov r0, r4 @ recover arg 9004 mov r1, r5 9005 mov r2, r4 @ compare against self 9006 mov r3, r5 9007 bl __aeabi_dcmpeq @ is arg == self? 9008 cmp r0, #0 @ zero == no 9009 moveq r1, #0 @ return zero for NaN 9010 beq 1f 9011 9012 mov r0, r4 @ recover arg 9013 mov r1, r5 9014 bl __aeabi_d2lz @ convert double to long 9015 90161: 9017 add sp, sp, #4 9018 ldmfd sp!, {r4, r5, pc} 9019 9020/* continuation for OP_MUL_LONG */ 9021 9022.LOP_MUL_LONG_finish: 9023 GET_INST_OPCODE(ip) @ extract opcode from rINST 9024 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9025 GOTO_OPCODE(ip) @ jump to next instruction 9026 9027/* continuation for OP_SHL_LONG */ 9028 9029.LOP_SHL_LONG_finish: 9030 mov r0, r0, asl r2 @ r0<- r0 << r2 9031 GET_INST_OPCODE(ip) @ extract opcode from rINST 9032 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9033 GOTO_OPCODE(ip) @ jump to next instruction 9034 9035/* continuation for OP_SHR_LONG */ 9036 9037.LOP_SHR_LONG_finish: 9038 mov r1, r1, asr r2 @ r1<- r1 >> r2 9039 GET_INST_OPCODE(ip) @ extract opcode from rINST 9040 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9041 GOTO_OPCODE(ip) @ jump to next instruction 9042 9043/* continuation for OP_USHR_LONG */ 9044 9045.LOP_USHR_LONG_finish: 9046 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9047 GET_INST_OPCODE(ip) @ extract opcode from rINST 9048 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9049 GOTO_OPCODE(ip) @ jump to next instruction 9050 9051/* continuation for OP_SHL_LONG_2ADDR */ 9052 9053.LOP_SHL_LONG_2ADDR_finish: 9054 GET_INST_OPCODE(ip) @ extract opcode from rINST 9055 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9056 GOTO_OPCODE(ip) @ jump to next instruction 9057 9058/* continuation for OP_SHR_LONG_2ADDR */ 9059 9060.LOP_SHR_LONG_2ADDR_finish: 9061 GET_INST_OPCODE(ip) @ extract opcode from rINST 9062 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9063 GOTO_OPCODE(ip) @ jump to next instruction 9064 9065/* continuation for OP_USHR_LONG_2ADDR */ 9066 9067.LOP_USHR_LONG_2ADDR_finish: 9068 GET_INST_OPCODE(ip) @ extract opcode from rINST 9069 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9070 GOTO_OPCODE(ip) @ jump to next instruction 9071 9072/* continuation for OP_IGET_VOLATILE */ 9073 9074 /* 9075 * Currently: 9076 * r0 holds resolved field 9077 * r9 holds object 9078 */ 9079.LOP_IGET_VOLATILE_finish: 9080 @bl common_squeak0 9081 cmp r9, #0 @ check object for null 9082 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9083 beq common_errNullObject @ object was null 9084 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9085 SMP_DMB @ acquiring load 9086 mov r2, rINST, lsr #8 @ r2<- A+ 9087 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9088 and r2, r2, #15 @ r2<- A 9089 GET_INST_OPCODE(ip) @ extract opcode from rINST 9090 SET_VREG(r0, r2) @ fp[A]<- r0 9091 GOTO_OPCODE(ip) @ jump to next instruction 9092 9093/* continuation for OP_IPUT_VOLATILE */ 9094 9095 /* 9096 * Currently: 9097 * r0 holds resolved field 9098 * r9 holds object 9099 */ 9100.LOP_IPUT_VOLATILE_finish: 9101 @bl common_squeak0 9102 mov r1, rINST, lsr #8 @ r1<- A+ 9103 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9104 and r1, r1, #15 @ r1<- A 9105 cmp r9, #0 @ check object for null 9106 GET_VREG(r0, r1) @ r0<- fp[A] 9107 beq common_errNullObject @ object was null 9108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9109 GET_INST_OPCODE(ip) @ extract opcode from rINST 9110 SMP_DMB @ releasing store 9111 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9112 GOTO_OPCODE(ip) @ jump to next instruction 9113 9114/* continuation for OP_SGET_VOLATILE */ 9115 9116 /* 9117 * Continuation if the field has not yet been resolved. 9118 * r1: BBBB field ref 9119 */ 9120.LOP_SGET_VOLATILE_resolve: 9121 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9122 EXPORT_PC() @ resolve() could throw, so export now 9123 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9124 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9125 cmp r0, #0 @ success? 9126 bne .LOP_SGET_VOLATILE_finish @ yes, finish 9127 b common_exceptionThrown @ no, handle exception 9128 9129/* continuation for OP_SPUT_VOLATILE */ 9130 9131 /* 9132 * Continuation if the field has not yet been resolved. 9133 * r1: BBBB field ref 9134 */ 9135.LOP_SPUT_VOLATILE_resolve: 9136 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9137 EXPORT_PC() @ resolve() could throw, so export now 9138 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9139 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9140 cmp r0, #0 @ success? 9141 bne .LOP_SPUT_VOLATILE_finish @ yes, finish 9142 b common_exceptionThrown @ no, handle exception 9143 9144/* continuation for OP_IGET_OBJECT_VOLATILE */ 9145 9146 /* 9147 * Currently: 9148 * r0 holds resolved field 9149 * r9 holds object 9150 */ 9151.LOP_IGET_OBJECT_VOLATILE_finish: 9152 @bl common_squeak0 9153 cmp r9, #0 @ check object for null 9154 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9155 beq common_errNullObject @ object was null 9156 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9157 SMP_DMB @ acquiring load 9158 mov r2, rINST, lsr #8 @ r2<- A+ 9159 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9160 and r2, r2, #15 @ r2<- A 9161 GET_INST_OPCODE(ip) @ extract opcode from rINST 9162 SET_VREG(r0, r2) @ fp[A]<- r0 9163 GOTO_OPCODE(ip) @ jump to next instruction 9164 9165/* continuation for OP_IGET_WIDE_VOLATILE */ 9166 9167 /* 9168 * Currently: 9169 * r0 holds resolved field 9170 * r9 holds object 9171 */ 9172.LOP_IGET_WIDE_VOLATILE_finish: 9173 cmp r9, #0 @ check object for null 9174 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9175 beq common_errNullObject @ object was null 9176 .if 1 9177 add r0, r9, r3 @ r0<- address of field 9178 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 9179 .else 9180 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9181 .endif 9182 mov r2, rINST, lsr #8 @ r2<- A+ 9183 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9184 and r2, r2, #15 @ r2<- A 9185 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9186 GET_INST_OPCODE(ip) @ extract opcode from rINST 9187 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9188 GOTO_OPCODE(ip) @ jump to next instruction 9189 9190/* continuation for OP_IPUT_WIDE_VOLATILE */ 9191 9192 /* 9193 * Currently: 9194 * r0 holds resolved field 9195 * r9 holds object 9196 */ 9197.LOP_IPUT_WIDE_VOLATILE_finish: 9198 mov r2, rINST, lsr #8 @ r2<- A+ 9199 cmp r9, #0 @ check object for null 9200 and r2, r2, #15 @ r2<- A 9201 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9202 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9203 beq common_errNullObject @ object was null 9204 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9205 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9206 GET_INST_OPCODE(r10) @ extract opcode from rINST 9207 .if 1 9208 add r2, r9, r3 @ r2<- target address 9209 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 9210 .else 9211 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9212 .endif 9213 GOTO_OPCODE(r10) @ jump to next instruction 9214 9215/* continuation for OP_SGET_WIDE_VOLATILE */ 9216 9217 /* 9218 * Continuation if the field has not yet been resolved. 9219 * r1: BBBB field ref 9220 * 9221 * Returns StaticField pointer in r0. 9222 */ 9223.LOP_SGET_WIDE_VOLATILE_resolve: 9224 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9225 EXPORT_PC() @ resolve() could throw, so export now 9226 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9227 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9228 cmp r0, #0 @ success? 9229 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9230 b common_exceptionThrown @ no, handle exception 9231 9232/* continuation for OP_SPUT_WIDE_VOLATILE */ 9233 9234 /* 9235 * Continuation if the field has not yet been resolved. 9236 * r1: BBBB field ref 9237 * r9: &fp[AA] 9238 * 9239 * Returns StaticField pointer in r2. 9240 */ 9241.LOP_SPUT_WIDE_VOLATILE_resolve: 9242 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9243 EXPORT_PC() @ resolve() could throw, so export now 9244 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9245 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9246 cmp r0, #0 @ success? 9247 mov r2, r0 @ copy to r2 9248 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9249 b common_exceptionThrown @ no, handle exception 9250 9251/* continuation for OP_EXECUTE_INLINE */ 9252 9253 /* 9254 * Extract args, call function. 9255 * r0 = #of args (0-4) 9256 * r10 = call index 9257 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9258 * 9259 * Other ideas: 9260 * - Use a jump table from the main piece to jump directly into the 9261 * AND/LDR pairs. Costs a data load, saves a branch. 9262 * - Have five separate pieces that do the loading, so we can work the 9263 * interleave a little better. Increases code size. 9264 */ 9265.LOP_EXECUTE_INLINE_continue: 9266 rsb r0, r0, #4 @ r0<- 4-r0 9267 FETCH(r9, 2) @ r9<- FEDC 9268 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9269 bl common_abort @ (skipped due to ARM prefetch) 92704: and ip, r9, #0xf000 @ isolate F 9271 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 92723: and ip, r9, #0x0f00 @ isolate E 9273 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 92742: and ip, r9, #0x00f0 @ isolate D 9275 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 92761: and ip, r9, #0x000f @ isolate C 9277 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 92780: 9279 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9280 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9281 @ (not reached) 9282 9283.LOP_EXECUTE_INLINE_table: 9284 .word gDvmInlineOpsTable 9285 9286/* continuation for OP_EXECUTE_INLINE_RANGE */ 9287 9288 /* 9289 * Extract args, call function. 9290 * r0 = #of args (0-4) 9291 * r10 = call index 9292 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9293 */ 9294.LOP_EXECUTE_INLINE_RANGE_continue: 9295 rsb r0, r0, #4 @ r0<- 4-r0 9296 FETCH(r9, 2) @ r9<- CCCC 9297 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9298 bl common_abort @ (skipped due to ARM prefetch) 92994: add ip, r9, #3 @ base+3 9300 GET_VREG(r3, ip) @ r3<- vBase[3] 93013: add ip, r9, #2 @ base+2 9302 GET_VREG(r2, ip) @ r2<- vBase[2] 93032: add ip, r9, #1 @ base+1 9304 GET_VREG(r1, ip) @ r1<- vBase[1] 93051: add ip, r9, #0 @ (nop) 9306 GET_VREG(r0, ip) @ r0<- vBase[0] 93070: 9308 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9309 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9310 @ (not reached) 9311 9312.LOP_EXECUTE_INLINE_RANGE_table: 9313 .word gDvmInlineOpsTable 9314 9315/* continuation for OP_IPUT_OBJECT_VOLATILE */ 9316 9317 /* 9318 * Currently: 9319 * r0 holds resolved field 9320 * r9 holds object 9321 */ 9322.LOP_IPUT_OBJECT_VOLATILE_finish: 9323 @bl common_squeak0 9324 mov r1, rINST, lsr #8 @ r1<- A+ 9325 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9326 and r1, r1, #15 @ r1<- A 9327 cmp r9, #0 @ check object for null 9328 GET_VREG(r0, r1) @ r0<- fp[A] 9329 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9330 beq common_errNullObject @ object was null 9331 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9332 add r9, r3 @ r9<- direct ptr to target location 9333 GET_INST_OPCODE(ip) @ extract opcode from rINST 9334 SMP_DMB @ releasing store 9335 str r0, [r9] @ obj.field (8/16/32 bits)<- r0 9336 cmp r0, #0 @ stored a null reference? 9337 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 9338 GOTO_OPCODE(ip) @ jump to next instruction 9339 9340/* continuation for OP_SGET_OBJECT_VOLATILE */ 9341 9342 /* 9343 * Continuation if the field has not yet been resolved. 9344 * r1: BBBB field ref 9345 */ 9346.LOP_SGET_OBJECT_VOLATILE_resolve: 9347 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9348 EXPORT_PC() @ resolve() could throw, so export now 9349 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9350 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9351 cmp r0, #0 @ success? 9352 bne .LOP_SGET_OBJECT_VOLATILE_finish @ yes, finish 9353 b common_exceptionThrown @ no, handle exception 9354 9355/* continuation for OP_SPUT_OBJECT_VOLATILE */ 9356.LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0 9357 mov r2, rINST, lsr #8 @ r2<- AA 9358 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9359 GET_VREG(r1, r2) @ r1<- fp[AA] 9360 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9361 GET_INST_OPCODE(ip) @ extract opcode from rINST 9362 add r0, #offStaticField_value @ r0<- pointer to store target 9363 SMP_DMB @ releasing store 9364 str r1, [r0] @ field<- vAA 9365 cmp r1, #0 @ stored a null object? 9366 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ mark card if not 9367 GOTO_OPCODE(ip) @ jump to next instruction 9368 9369 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9370 .global dvmAsmSisterEnd 9371dvmAsmSisterEnd: 9372 9373/* File: armv5te/footer.S */ 9374 9375/* 9376 * =========================================================================== 9377 * Common subroutines and data 9378 * =========================================================================== 9379 */ 9380 9381 9382 9383 .text 9384 .align 2 9385 9386#if defined(WITH_JIT) 9387#if defined(WITH_SELF_VERIFICATION) 9388 .global dvmJitToInterpPunt 9389dvmJitToInterpPunt: 9390 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9391 mov r2,#kSVSPunt @ r2<- interpreter entry point 9392 mov r3, #0 9393 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9394 b jitSVShadowRunEnd @ doesn't return 9395 9396 .global dvmJitToInterpSingleStep 9397dvmJitToInterpSingleStep: 9398 str lr,[rGLUE,#offGlue_jitResumeNPC] 9399 str r1,[rGLUE,#offGlue_jitResumeDPC] 9400 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9401 b jitSVShadowRunEnd @ doesn't return 9402 9403 .global dvmJitToInterpTraceSelectNoChain 9404dvmJitToInterpTraceSelectNoChain: 9405 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9406 mov r0,rPC @ pass our target PC 9407 mov r2,#kSVSTraceSelectNoChain @ r2<- interpreter entry point 9408 mov r3, #0 9409 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9410 b jitSVShadowRunEnd @ doesn't return 9411 9412 .global dvmJitToInterpTraceSelect 9413dvmJitToInterpTraceSelect: 9414 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9415 ldr r0,[lr, #-1] @ pass our target PC 9416 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9417 mov r3, #0 9418 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9419 b jitSVShadowRunEnd @ doesn't return 9420 9421 .global dvmJitToInterpBackwardBranch 9422dvmJitToInterpBackwardBranch: 9423 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9424 ldr r0,[lr, #-1] @ pass our target PC 9425 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9426 mov r3, #0 9427 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9428 b jitSVShadowRunEnd @ doesn't return 9429 9430 .global dvmJitToInterpNormal 9431dvmJitToInterpNormal: 9432 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9433 ldr r0,[lr, #-1] @ pass our target PC 9434 mov r2,#kSVSNormal @ r2<- interpreter entry point 9435 mov r3, #0 9436 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9437 b jitSVShadowRunEnd @ doesn't return 9438 9439 .global dvmJitToInterpNoChain 9440dvmJitToInterpNoChain: 9441 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9442 mov r0,rPC @ pass our target PC 9443 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9444 mov r3, #0 9445 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9446 b jitSVShadowRunEnd @ doesn't return 9447#else 9448/* 9449 * Return from the translation cache to the interpreter when the compiler is 9450 * having issues translating/executing a Dalvik instruction. We have to skip 9451 * the code cache lookup otherwise it is possible to indefinitely bouce 9452 * between the interpreter and the code cache if the instruction that fails 9453 * to be compiled happens to be at a trace start. 9454 */ 9455 .global dvmJitToInterpPunt 9456dvmJitToInterpPunt: 9457 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9458 mov rPC, r0 9459#if defined(WITH_JIT_TUNING) 9460 mov r0,lr 9461 bl dvmBumpPunt; 9462#endif 9463 EXPORT_PC() 9464 mov r0, #0 9465 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9466 adrl rIBASE, dvmAsmInstructionStart 9467 FETCH_INST() 9468 GET_INST_OPCODE(ip) 9469 GOTO_OPCODE(ip) 9470 9471/* 9472 * Return to the interpreter to handle a single instruction. 9473 * On entry: 9474 * r0 <= PC 9475 * r1 <= PC of resume instruction 9476 * lr <= resume point in translation 9477 */ 9478 .global dvmJitToInterpSingleStep 9479dvmJitToInterpSingleStep: 9480 str lr,[rGLUE,#offGlue_jitResumeNPC] 9481 str r1,[rGLUE,#offGlue_jitResumeDPC] 9482 mov r1,#kInterpEntryInstr 9483 @ enum is 4 byte in aapcs-EABI 9484 str r1, [rGLUE, #offGlue_entryPoint] 9485 mov rPC,r0 9486 EXPORT_PC() 9487 9488 adrl rIBASE, dvmAsmInstructionStart 9489 mov r2,#kJitSingleStep @ Ask for single step and then revert 9490 str r2,[rGLUE,#offGlue_jitState] 9491 mov r1,#1 @ set changeInterp to bail to debug interp 9492 b common_gotoBail 9493 9494/* 9495 * Return from the translation cache and immediately request 9496 * a translation for the exit target. Commonly used for callees. 9497 */ 9498 .global dvmJitToInterpTraceSelectNoChain 9499dvmJitToInterpTraceSelectNoChain: 9500#if defined(WITH_JIT_TUNING) 9501 bl dvmBumpNoChain 9502#endif 9503 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9504 mov r0,rPC 9505 bl dvmJitGetCodeAddr @ Is there a translation? 9506 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9507 mov r1, rPC @ arg1 of translation may need this 9508 mov lr, #0 @ in case target is HANDLER_INTERPRET 9509 cmp r0,#0 9510 bxne r0 @ continue native execution if so 9511 b 2f 9512 9513/* 9514 * Return from the translation cache and immediately request 9515 * a translation for the exit target. Commonly used following 9516 * invokes. 9517 */ 9518 .global dvmJitToInterpTraceSelect 9519dvmJitToInterpTraceSelect: 9520 ldr rPC,[lr, #-1] @ get our target PC 9521 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9522 add rINST,lr,#-5 @ save start of chain branch 9523 add rINST, #-4 @ .. which is 9 bytes back 9524 mov r0,rPC 9525 bl dvmJitGetCodeAddr @ Is there a translation? 9526 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9527 cmp r0,#0 9528 beq 2f 9529 mov r1,rINST 9530 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9531 mov r1, rPC @ arg1 of translation may need this 9532 mov lr, #0 @ in case target is HANDLER_INTERPRET 9533 cmp r0,#0 @ successful chain? 9534 bxne r0 @ continue native execution 9535 b toInterpreter @ didn't chain - resume with interpreter 9536 9537/* No translation, so request one if profiling isn't disabled*/ 95382: 9539 adrl rIBASE, dvmAsmInstructionStart 9540 GET_JIT_PROF_TABLE(r0) 9541 FETCH_INST() 9542 cmp r0, #0 9543 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9544 bne common_selectTrace 9545 GET_INST_OPCODE(ip) 9546 GOTO_OPCODE(ip) 9547 9548/* 9549 * Return from the translation cache to the interpreter. 9550 * The return was done with a BLX from thumb mode, and 9551 * the following 32-bit word contains the target rPC value. 9552 * Note that lr (r14) will have its low-order bit set to denote 9553 * its thumb-mode origin. 9554 * 9555 * We'll need to stash our lr origin away, recover the new 9556 * target and then check to see if there is a translation available 9557 * for our new target. If so, we do a translation chain and 9558 * go back to native execution. Otherwise, it's back to the 9559 * interpreter (after treating this entry as a potential 9560 * trace start). 9561 */ 9562 .global dvmJitToInterpNormal 9563dvmJitToInterpNormal: 9564 ldr rPC,[lr, #-1] @ get our target PC 9565 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9566 add rINST,lr,#-5 @ save start of chain branch 9567 add rINST,#-4 @ .. which is 9 bytes back 9568#if defined(WITH_JIT_TUNING) 9569 bl dvmBumpNormal 9570#endif 9571 mov r0,rPC 9572 bl dvmJitGetCodeAddr @ Is there a translation? 9573 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9574 cmp r0,#0 9575 beq toInterpreter @ go if not, otherwise do chain 9576 mov r1,rINST 9577 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9578 mov r1, rPC @ arg1 of translation may need this 9579 mov lr, #0 @ in case target is HANDLER_INTERPRET 9580 cmp r0,#0 @ successful chain? 9581 bxne r0 @ continue native execution 9582 b toInterpreter @ didn't chain - resume with interpreter 9583 9584/* 9585 * Return from the translation cache to the interpreter to do method invocation. 9586 * Check if translation exists for the callee, but don't chain to it. 9587 */ 9588 .global dvmJitToInterpNoChain 9589dvmJitToInterpNoChain: 9590#if defined(WITH_JIT_TUNING) 9591 bl dvmBumpNoChain 9592#endif 9593 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9594 mov r0,rPC 9595 bl dvmJitGetCodeAddr @ Is there a translation? 9596 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9597 mov r1, rPC @ arg1 of translation may need this 9598 mov lr, #0 @ in case target is HANDLER_INTERPRET 9599 cmp r0,#0 9600 bxne r0 @ continue native execution if so 9601#endif 9602 9603/* 9604 * No translation, restore interpreter regs and start interpreting. 9605 * rGLUE & rFP were preserved in the translated code, and rPC has 9606 * already been restored by the time we get here. We'll need to set 9607 * up rIBASE & rINST, and load the address of the JitTable into r0. 9608 */ 9609toInterpreter: 9610 EXPORT_PC() 9611 adrl rIBASE, dvmAsmInstructionStart 9612 FETCH_INST() 9613 GET_JIT_PROF_TABLE(r0) 9614 @ NOTE: intended fallthrough 9615/* 9616 * Common code to update potential trace start counter, and initiate 9617 * a trace-build if appropriate. On entry, rPC should point to the 9618 * next instruction to execute, and rINST should be already loaded with 9619 * the next opcode word, and r0 holds a pointer to the jit profile 9620 * table (pJitProfTable). 9621 */ 9622common_testUpdateProfile: 9623 cmp r0,#0 9624 GET_INST_OPCODE(ip) 9625 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9626 9627common_updateProfile: 9628 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9629 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9630 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9631 GET_INST_OPCODE(ip) 9632 subs r1,r1,#1 @ decrement counter 9633 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9634 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9635 9636/* 9637 * Here, we switch to the debug interpreter to request 9638 * trace selection. First, though, check to see if there 9639 * is already a native translation in place (and, if so, 9640 * jump to it now). 9641 */ 9642 GET_JIT_THRESHOLD(r1) 9643 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9644 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9645 EXPORT_PC() 9646 mov r0,rPC 9647 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9648 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9649 mov r1, rPC @ arg1 of translation may need this 9650 mov lr, #0 @ in case target is HANDLER_INTERPRET 9651 cmp r0,#0 9652#if !defined(WITH_SELF_VERIFICATION) 9653 bxne r0 @ jump to the translation 9654 mov r2,#kJitTSelectRequest @ ask for trace selection 9655 @ fall-through to common_selectTrace 9656#else 9657 moveq r2,#kJitTSelectRequest @ ask for trace selection 9658 beq common_selectTrace 9659 /* 9660 * At this point, we have a target translation. However, if 9661 * that translation is actually the interpret-only pseudo-translation 9662 * we want to treat it the same as no translation. 9663 */ 9664 mov r10, r0 @ save target 9665 bl dvmCompilerGetInterpretTemplate 9666 cmp r0, r10 @ special case? 9667 bne jitSVShadowRunStart @ set up self verification shadow space 9668 GET_INST_OPCODE(ip) 9669 GOTO_OPCODE(ip) 9670 /* no return */ 9671#endif 9672 9673/* 9674 * On entry: 9675 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9676 */ 9677common_selectTrace: 9678 str r2,[rGLUE,#offGlue_jitState] 9679 mov r2,#kInterpEntryInstr @ normal entry reason 9680 str r2,[rGLUE,#offGlue_entryPoint] 9681 mov r1,#1 @ set changeInterp 9682 b common_gotoBail 9683 9684#if defined(WITH_SELF_VERIFICATION) 9685/* 9686 * Save PC and registers to shadow memory for self verification mode 9687 * before jumping to native translation. 9688 * On entry: 9689 * rPC, rFP, rGLUE: the values that they should contain 9690 * r10: the address of the target translation. 9691 */ 9692jitSVShadowRunStart: 9693 mov r0,rPC @ r0<- program counter 9694 mov r1,rFP @ r1<- frame pointer 9695 mov r2,rGLUE @ r2<- InterpState pointer 9696 mov r3,r10 @ r3<- target translation 9697 bl dvmSelfVerificationSaveState @ save registers to shadow space 9698 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9699 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9700 bx r10 @ jump to the translation 9701 9702/* 9703 * Restore PC, registers, and interpState to original values 9704 * before jumping back to the interpreter. 9705 */ 9706jitSVShadowRunEnd: 9707 mov r1,rFP @ pass ending fp 9708 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9709 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9710 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9711 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9712 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9713 cmp r1,#0 @ check for punt condition 9714 beq 1f 9715 mov r2,#kJitSelfVerification @ ask for self verification 9716 str r2,[rGLUE,#offGlue_jitState] 9717 mov r2,#kInterpEntryInstr @ normal entry reason 9718 str r2,[rGLUE,#offGlue_entryPoint] 9719 mov r1,#1 @ set changeInterp 9720 b common_gotoBail 9721 97221: @ exit to interpreter without check 9723 EXPORT_PC() 9724 adrl rIBASE, dvmAsmInstructionStart 9725 FETCH_INST() 9726 GET_INST_OPCODE(ip) 9727 GOTO_OPCODE(ip) 9728#endif 9729 9730#endif 9731 9732/* 9733 * Common code when a backward branch is taken. 9734 * 9735 * TODO: we could avoid a branch by just setting r0 and falling through 9736 * into the common_periodicChecks code, and having a test on r0 at the 9737 * end determine if we should return to the caller or update & branch to 9738 * the next instr. 9739 * 9740 * On entry: 9741 * r9 is PC adjustment *in bytes* 9742 */ 9743common_backwardBranch: 9744 mov r0, #kInterpEntryInstr 9745 bl common_periodicChecks 9746#if defined(WITH_JIT) 9747 GET_JIT_PROF_TABLE(r0) 9748 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9749 cmp r0,#0 9750 bne common_updateProfile 9751 GET_INST_OPCODE(ip) 9752 GOTO_OPCODE(ip) 9753#else 9754 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9755 GET_INST_OPCODE(ip) @ extract opcode from rINST 9756 GOTO_OPCODE(ip) @ jump to next instruction 9757#endif 9758 9759 9760/* 9761 * Need to see if the thread needs to be suspended or debugger/profiler 9762 * activity has begun. If so, we suspend the thread or side-exit to 9763 * the debug interpreter as appropriate. 9764 * 9765 * The common case is no activity on any of these, so we want to figure 9766 * that out quickly. If something is up, we can then sort out what. 9767 * 9768 * We want to be fast if the VM was built without debugger or profiler 9769 * support, but we also need to recognize that the system is usually 9770 * shipped with both of these enabled. 9771 * 9772 * TODO: reduce this so we're just checking a single location. 9773 * 9774 * On entry: 9775 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 9776 * r9 is trampoline PC adjustment *in bytes* 9777 */ 9778common_periodicChecks: 9779 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9780 9781#if defined(WITH_DEBUGGER) 9782 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9783#endif 9784#if defined(WITH_PROFILER) 9785 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9786#endif 9787 9788 ldr ip, [r3] @ ip<- suspendCount (int) 9789 9790#if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9791 cmp r1, #0 @ debugger enabled? 9792 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9793 ldr r2, [r2] @ r2<- activeProfilers (int) 9794 orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive 9795 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 9796#elif defined(WITH_DEBUGGER) 9797 cmp r1, #0 @ debugger enabled? 9798 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9799 orrsne ip, ip, r1 @ yes, ip<- suspend | debugger; set Z 9800 @ (if not enabled, Z was set by test for r1==0, which is what we want) 9801#elif defined (WITH_PROFILER) 9802 ldr r2, [r2] @ r2<- activeProfilers (int) 9803 orrs ip, ip, r2 @ ip<- suspendCount | activeProfilers 9804#else 9805 cmp ip, #0 @ not ORing anything in; set Z 9806#endif 9807 9808 bxeq lr @ all zero, return 9809 9810 /* 9811 * One or more interesting events have happened. Figure out what. 9812 * 9813 * If debugging or profiling are compiled in, we need to disambiguate. 9814 * 9815 * r0 still holds the reentry type. 9816 */ 9817#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9818 ldr ip, [r3] @ ip<- suspendCount (int) 9819 cmp ip, #0 @ want suspend? 9820 beq 1f @ no, must be debugger/profiler 9821#endif 9822 9823 stmfd sp!, {r0, lr} @ preserve r0 and lr 9824#if defined(WITH_JIT) 9825 /* 9826 * Refresh the Jit's cached copy of profile table pointer. This pointer 9827 * doubles as the Jit's on/off switch. 9828 */ 9829 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9830 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9831 ldr r3, [r3] @ r3 <- pJitProfTable 9832 EXPORT_PC() @ need for precise GC 9833 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9834#else 9835 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9836 EXPORT_PC() @ need for precise GC 9837#endif 9838 bl dvmCheckSuspendPending @ do full check, suspend if necessary 9839 ldmfd sp!, {r0, lr} @ restore r0 and lr 9840 9841#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9842 9843 /* 9844 * Reload the debugger/profiler enable flags. We're checking to see 9845 * if either of these got set while we were suspended. 9846 * 9847 * We can't really avoid the #ifdefs here, because the fields don't 9848 * exist when the feature is disabled. 9849 */ 9850#if defined(WITH_DEBUGGER) 9851 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9852 cmp r1, #0 @ debugger enabled? 9853 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9854#else 9855 mov r1, #0 9856#endif 9857#if defined(WITH_PROFILER) 9858 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9859 ldr r2, [r2] @ r2<- activeProfilers (int) 9860#else 9861 mov r2, #0 9862#endif 9863 9864 orrs r1, r1, r2 9865 beq 2f 9866 98671: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 9868 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 9869 add rPC, rPC, r9 @ update rPC 9870 mov r1, #1 @ "want switch" = true 9871 b common_gotoBail @ side exit 9872 9873#endif /*WITH_DEBUGGER || WITH_PROFILER*/ 9874 98752: 9876 bx lr @ nothing to do, return 9877 9878 9879/* 9880 * The equivalent of "goto bail", this calls through the "bail handler". 9881 * 9882 * State registers will be saved to the "glue" area before bailing. 9883 * 9884 * On entry: 9885 * r1 is "bool changeInterp", indicating if we want to switch to the 9886 * other interpreter or just bail all the way out 9887 */ 9888common_gotoBail: 9889 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9890 mov r0, rGLUE @ r0<- glue ptr 9891 b dvmMterpStdBail @ call(glue, changeInterp) 9892 9893 @add r1, r1, #1 @ using (boolean+1) 9894 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9895 @bl _longjmp @ does not return 9896 @bl common_abort 9897 9898 9899/* 9900 * Common code for method invocation with range. 9901 * 9902 * On entry: 9903 * r0 is "Method* methodToCall", the method we're trying to call 9904 */ 9905common_invokeMethodRange: 9906.LinvokeNewRange: 9907 @ prepare to copy args to "outs" area of current frame 9908 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9909 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9910 beq .LinvokeArgsDone @ if no args, skip the rest 9911 FETCH(r1, 2) @ r1<- CCCC 9912 9913 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9914 @ (very few methods have > 10 args; could unroll for common cases) 9915 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9916 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9917 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 99181: ldr r1, [r3], #4 @ val = *fp++ 9919 subs r2, r2, #1 @ count-- 9920 str r1, [r10], #4 @ *outs++ = val 9921 bne 1b @ ...while count != 0 9922 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9923 b .LinvokeArgsDone 9924 9925/* 9926 * Common code for method invocation without range. 9927 * 9928 * On entry: 9929 * r0 is "Method* methodToCall", the method we're trying to call 9930 */ 9931common_invokeMethodNoRange: 9932.LinvokeNewNoRange: 9933 @ prepare to copy args to "outs" area of current frame 9934 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9935 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9936 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9937 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9938 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9939 beq .LinvokeArgsDone 9940 9941 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9942.LinvokeNonRange: 9943 rsb r2, r2, #5 @ r2<- 5-r2 9944 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9945 bl common_abort @ (skipped due to ARM prefetch) 99465: and ip, rINST, #0x0f00 @ isolate A 9947 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9948 mov r0, r0 @ nop 9949 str r2, [r10, #-4]! @ *--outs = vA 99504: and ip, r1, #0xf000 @ isolate G 9951 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9952 mov r0, r0 @ nop 9953 str r2, [r10, #-4]! @ *--outs = vG 99543: and ip, r1, #0x0f00 @ isolate F 9955 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9956 mov r0, r0 @ nop 9957 str r2, [r10, #-4]! @ *--outs = vF 99582: and ip, r1, #0x00f0 @ isolate E 9959 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9960 mov r0, r0 @ nop 9961 str r2, [r10, #-4]! @ *--outs = vE 99621: and ip, r1, #0x000f @ isolate D 9963 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9964 mov r0, r0 @ nop 9965 str r2, [r10, #-4]! @ *--outs = vD 99660: @ fall through to .LinvokeArgsDone 9967 9968.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9969 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9970 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9971 @ find space for the new stack frame, check for overflow 9972 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9973 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9974 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9975@ bl common_dumpRegs 9976 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9977 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9978 cmp r3, r9 @ bottom < interpStackEnd? 9979 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9980 blo .LstackOverflow @ yes, this frame will overflow stack 9981 9982 @ set up newSaveArea 9983#ifdef EASY_GDB 9984 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9985 str ip, [r10, #offStackSaveArea_prevSave] 9986#endif 9987 str rFP, [r10, #offStackSaveArea_prevFrame] 9988 str rPC, [r10, #offStackSaveArea_savedPc] 9989#if defined(WITH_JIT) 9990 mov r9, #0 9991 str r9, [r10, #offStackSaveArea_returnAddr] 9992#endif 9993 str r0, [r10, #offStackSaveArea_method] 9994 tst r3, #ACC_NATIVE 9995 bne .LinvokeNative 9996 9997 /* 9998 stmfd sp!, {r0-r3} 9999 bl common_printNewline 10000 mov r0, rFP 10001 mov r1, #0 10002 bl dvmDumpFp 10003 ldmfd sp!, {r0-r3} 10004 stmfd sp!, {r0-r3} 10005 mov r0, r1 10006 mov r1, r10 10007 bl dvmDumpFp 10008 bl common_printNewline 10009 ldmfd sp!, {r0-r3} 10010 */ 10011 10012 ldrh r9, [r2] @ r9 <- load INST from new PC 10013 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10014 mov rPC, r2 @ publish new rPC 10015 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10016 10017 @ Update "glue" values for the new method 10018 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10019 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10020 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10021#if defined(WITH_JIT) 10022 GET_JIT_PROF_TABLE(r0) 10023 mov rFP, r1 @ fp = newFp 10024 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10025 mov rINST, r9 @ publish new rINST 10026 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10027 cmp r0,#0 10028 bne common_updateProfile 10029 GOTO_OPCODE(ip) @ jump to next instruction 10030#else 10031 mov rFP, r1 @ fp = newFp 10032 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10033 mov rINST, r9 @ publish new rINST 10034 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10035 GOTO_OPCODE(ip) @ jump to next instruction 10036#endif 10037 10038.LinvokeNative: 10039 @ Prep for the native call 10040 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10041 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10042 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10043 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10044 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10045 mov r9, r3 @ r9<- glue->self (preserve) 10046 10047 mov r2, r0 @ r2<- methodToCall 10048 mov r0, r1 @ r0<- newFp (points to args) 10049 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10050 10051#ifdef ASSIST_DEBUGGER 10052 /* insert fake function header to help gdb find the stack frame */ 10053 b .Lskip 10054 .type dalvik_mterp, %function 10055dalvik_mterp: 10056 .fnstart 10057 MTERP_ENTRY1 10058 MTERP_ENTRY2 10059.Lskip: 10060#endif 10061 10062 @mov lr, pc @ set return addr 10063 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10064 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10065 10066#if defined(WITH_JIT) 10067 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10068#endif 10069 10070 @ native return; r9=self, r10=newSaveArea 10071 @ equivalent to dvmPopJniLocals 10072 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10073 ldr r1, [r9, #offThread_exception] @ check for exception 10074#if defined(WITH_JIT) 10075 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10076#endif 10077 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10078 cmp r1, #0 @ null? 10079 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10080#if defined(WITH_JIT) 10081 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10082#endif 10083 bne common_exceptionThrown @ no, handle exception 10084 10085 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10086 GET_INST_OPCODE(ip) @ extract opcode from rINST 10087 GOTO_OPCODE(ip) @ jump to next instruction 10088 10089.LstackOverflow: @ r0=methodToCall 10090 mov r1, r0 @ r1<- methodToCall 10091 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10092 bl dvmHandleStackOverflow 10093 b common_exceptionThrown 10094#ifdef ASSIST_DEBUGGER 10095 .fnend 10096#endif 10097 10098 10099 /* 10100 * Common code for method invocation, calling through "glue code". 10101 * 10102 * TODO: now that we have range and non-range invoke handlers, this 10103 * needs to be split into two. Maybe just create entry points 10104 * that set r9 and jump here? 10105 * 10106 * On entry: 10107 * r0 is "Method* methodToCall", the method we're trying to call 10108 * r9 is "bool methodCallRange", indicating if this is a /range variant 10109 */ 10110 .if 0 10111.LinvokeOld: 10112 sub sp, sp, #8 @ space for args + pad 10113 FETCH(ip, 2) @ ip<- FEDC or CCCC 10114 mov r2, r0 @ A2<- methodToCall 10115 mov r0, rGLUE @ A0<- glue 10116 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10117 mov r1, r9 @ A1<- methodCallRange 10118 mov r3, rINST, lsr #8 @ A3<- AA 10119 str ip, [sp, #0] @ A4<- ip 10120 bl dvmMterp_invokeMethod @ call the C invokeMethod 10121 add sp, sp, #8 @ remove arg area 10122 b common_resumeAfterGlueCall @ continue to next instruction 10123 .endif 10124 10125 10126 10127/* 10128 * Common code for handling a return instruction. 10129 * 10130 * This does not return. 10131 */ 10132common_returnFromMethod: 10133.LreturnNew: 10134 mov r0, #kInterpEntryReturn 10135 mov r9, #0 10136 bl common_periodicChecks 10137 10138 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10139 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10140 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10141 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10142 @ r2<- method we're returning to 10143 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10144 cmp r2, #0 @ is this a break frame? 10145 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10146 mov r1, #0 @ "want switch" = false 10147 beq common_gotoBail @ break frame, bail out completely 10148 10149 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10150 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10151 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10152 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10153#if defined(WITH_JIT) 10154 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10155 GET_JIT_PROF_TABLE(r0) 10156 mov rPC, r9 @ publish new rPC 10157 str r1, [rGLUE, #offGlue_methodClassDex] 10158 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10159 cmp r10, #0 @ caller is compiled code 10160 blxne r10 10161 GET_INST_OPCODE(ip) @ extract opcode from rINST 10162 cmp r0,#0 10163 bne common_updateProfile 10164 GOTO_OPCODE(ip) @ jump to next instruction 10165#else 10166 GET_INST_OPCODE(ip) @ extract opcode from rINST 10167 mov rPC, r9 @ publish new rPC 10168 str r1, [rGLUE, #offGlue_methodClassDex] 10169 GOTO_OPCODE(ip) @ jump to next instruction 10170#endif 10171 10172 /* 10173 * Return handling, calls through "glue code". 10174 */ 10175 .if 0 10176.LreturnOld: 10177 SAVE_PC_FP_TO_GLUE() @ export state 10178 mov r0, rGLUE @ arg to function 10179 bl dvmMterp_returnFromMethod 10180 b common_resumeAfterGlueCall 10181 .endif 10182 10183 10184/* 10185 * Somebody has thrown an exception. Handle it. 10186 * 10187 * If the exception processing code returns to us (instead of falling 10188 * out of the interpreter), continue with whatever the next instruction 10189 * now happens to be. 10190 * 10191 * This does not return. 10192 */ 10193 .global dvmMterpCommonExceptionThrown 10194dvmMterpCommonExceptionThrown: 10195common_exceptionThrown: 10196.LexceptionNew: 10197 mov r0, #kInterpEntryThrow 10198 mov r9, #0 10199 bl common_periodicChecks 10200 10201 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10202 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10203 mov r1, r10 @ r1<- self 10204 mov r0, r9 @ r0<- exception 10205 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10206 mov r3, #0 @ r3<- NULL 10207 str r3, [r10, #offThread_exception] @ self->exception = NULL 10208 10209 /* set up args and a local for "&fp" */ 10210 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10211 str rFP, [sp, #-4]! @ *--sp = fp 10212 mov ip, sp @ ip<- &fp 10213 mov r3, #0 @ r3<- false 10214 str ip, [sp, #-4]! @ *--sp = &fp 10215 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10216 mov r0, r10 @ r0<- self 10217 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10218 mov r2, r9 @ r2<- exception 10219 sub r1, rPC, r1 @ r1<- pc - method->insns 10220 mov r1, r1, asr #1 @ r1<- offset in code units 10221 10222 /* call, r0 gets catchRelPc (a code-unit offset) */ 10223 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10224 10225 /* fix earlier stack overflow if necessary; may trash rFP */ 10226 ldrb r1, [r10, #offThread_stackOverflowed] 10227 cmp r1, #0 @ did we overflow earlier? 10228 beq 1f @ no, skip ahead 10229 mov rFP, r0 @ save relPc result in rFP 10230 mov r0, r10 @ r0<- self 10231 mov r1, r9 @ r1<- exception 10232 bl dvmCleanupStackOverflow @ call(self) 10233 mov r0, rFP @ restore result 102341: 10235 10236 /* update frame pointer and check result from dvmFindCatchBlock */ 10237 ldr rFP, [sp, #4] @ retrieve the updated rFP 10238 cmp r0, #0 @ is catchRelPc < 0? 10239 add sp, sp, #8 @ restore stack 10240 bmi .LnotCaughtLocally 10241 10242 /* adjust locals to match self->curFrame and updated PC */ 10243 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10244 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10245 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10246 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10247 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10248 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10249 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10250 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10251 10252 /* release the tracked alloc on the exception */ 10253 mov r0, r9 @ r0<- exception 10254 mov r1, r10 @ r1<- self 10255 bl dvmReleaseTrackedAlloc @ release the exception 10256 10257 /* restore the exception if the handler wants it */ 10258 FETCH_INST() @ load rINST from rPC 10259 GET_INST_OPCODE(ip) @ extract opcode from rINST 10260 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10261 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10262 GOTO_OPCODE(ip) @ jump to next instruction 10263 10264.LnotCaughtLocally: @ r9=exception, r10=self 10265 /* fix stack overflow if necessary */ 10266 ldrb r1, [r10, #offThread_stackOverflowed] 10267 cmp r1, #0 @ did we overflow earlier? 10268 movne r0, r10 @ if yes: r0<- self 10269 movne r1, r9 @ if yes: r1<- exception 10270 blne dvmCleanupStackOverflow @ if yes: call(self) 10271 10272 @ may want to show "not caught locally" debug messages here 10273#if DVM_SHOW_EXCEPTION >= 2 10274 /* call __android_log_print(prio, tag, format, ...) */ 10275 /* "Exception %s from %s:%d not caught locally" */ 10276 @ dvmLineNumFromPC(method, pc - method->insns) 10277 ldr r0, [rGLUE, #offGlue_method] 10278 ldr r1, [r0, #offMethod_insns] 10279 sub r1, rPC, r1 10280 asr r1, r1, #1 10281 bl dvmLineNumFromPC 10282 str r0, [sp, #-4]! 10283 @ dvmGetMethodSourceFile(method) 10284 ldr r0, [rGLUE, #offGlue_method] 10285 bl dvmGetMethodSourceFile 10286 str r0, [sp, #-4]! 10287 @ exception->clazz->descriptor 10288 ldr r3, [r9, #offObject_clazz] 10289 ldr r3, [r3, #offClassObject_descriptor] 10290 @ 10291 ldr r2, strExceptionNotCaughtLocally 10292 ldr r1, strLogTag 10293 mov r0, #3 @ LOG_DEBUG 10294 bl __android_log_print 10295#endif 10296 str r9, [r10, #offThread_exception] @ restore exception 10297 mov r0, r9 @ r0<- exception 10298 mov r1, r10 @ r1<- self 10299 bl dvmReleaseTrackedAlloc @ release the exception 10300 mov r1, #0 @ "want switch" = false 10301 b common_gotoBail @ bail out 10302 10303 10304 /* 10305 * Exception handling, calls through "glue code". 10306 */ 10307 .if 0 10308.LexceptionOld: 10309 SAVE_PC_FP_TO_GLUE() @ export state 10310 mov r0, rGLUE @ arg to function 10311 bl dvmMterp_exceptionThrown 10312 b common_resumeAfterGlueCall 10313 .endif 10314 10315 10316/* 10317 * After returning from a "glued" function, pull out the updated 10318 * values and start executing at the next instruction. 10319 */ 10320common_resumeAfterGlueCall: 10321 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10322 FETCH_INST() @ load rINST from rPC 10323 GET_INST_OPCODE(ip) @ extract opcode from rINST 10324 GOTO_OPCODE(ip) @ jump to next instruction 10325 10326/* 10327 * Invalid array index. 10328 */ 10329common_errArrayIndex: 10330 EXPORT_PC() 10331 ldr r0, strArrayIndexException 10332 mov r1, #0 10333 bl dvmThrowException 10334 b common_exceptionThrown 10335 10336/* 10337 * Invalid array value. 10338 */ 10339common_errArrayStore: 10340 EXPORT_PC() 10341 ldr r0, strArrayStoreException 10342 mov r1, #0 10343 bl dvmThrowException 10344 b common_exceptionThrown 10345 10346/* 10347 * Integer divide or mod by zero. 10348 */ 10349common_errDivideByZero: 10350 EXPORT_PC() 10351 ldr r0, strArithmeticException 10352 ldr r1, strDivideByZero 10353 bl dvmThrowException 10354 b common_exceptionThrown 10355 10356/* 10357 * Attempt to allocate an array with a negative size. 10358 */ 10359common_errNegativeArraySize: 10360 EXPORT_PC() 10361 ldr r0, strNegativeArraySizeException 10362 mov r1, #0 10363 bl dvmThrowException 10364 b common_exceptionThrown 10365 10366/* 10367 * Invocation of a non-existent method. 10368 */ 10369common_errNoSuchMethod: 10370 EXPORT_PC() 10371 ldr r0, strNoSuchMethodError 10372 mov r1, #0 10373 bl dvmThrowException 10374 b common_exceptionThrown 10375 10376/* 10377 * We encountered a null object when we weren't expecting one. We 10378 * export the PC, throw a NullPointerException, and goto the exception 10379 * processing code. 10380 */ 10381common_errNullObject: 10382 EXPORT_PC() 10383 ldr r0, strNullPointerException 10384 mov r1, #0 10385 bl dvmThrowException 10386 b common_exceptionThrown 10387 10388/* 10389 * For debugging, cause an immediate fault. The source address will 10390 * be in lr (use a bl instruction to jump here). 10391 */ 10392common_abort: 10393 ldr pc, .LdeadFood 10394.LdeadFood: 10395 .word 0xdeadf00d 10396 10397/* 10398 * Spit out a "we were here", preserving all registers. (The attempt 10399 * to save ip won't work, but we need to save an even number of 10400 * registers for EABI 64-bit stack alignment.) 10401 */ 10402 .macro SQUEAK num 10403common_squeak\num: 10404 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10405 ldr r0, strSqueak 10406 mov r1, #\num 10407 bl printf 10408 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10409 bx lr 10410 .endm 10411 10412 SQUEAK 0 10413 SQUEAK 1 10414 SQUEAK 2 10415 SQUEAK 3 10416 SQUEAK 4 10417 SQUEAK 5 10418 10419/* 10420 * Spit out the number in r0, preserving registers. 10421 */ 10422common_printNum: 10423 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10424 mov r1, r0 10425 ldr r0, strSqueak 10426 bl printf 10427 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10428 bx lr 10429 10430/* 10431 * Print a newline, preserving registers. 10432 */ 10433common_printNewline: 10434 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10435 ldr r0, strNewline 10436 bl printf 10437 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10438 bx lr 10439 10440 /* 10441 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10442 */ 10443common_printHex: 10444 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10445 mov r1, r0 10446 ldr r0, strPrintHex 10447 bl printf 10448 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10449 bx lr 10450 10451/* 10452 * Print the 64-bit quantity in r0-r1, preserving registers. 10453 */ 10454common_printLong: 10455 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10456 mov r3, r1 10457 mov r2, r0 10458 ldr r0, strPrintLong 10459 bl printf 10460 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10461 bx lr 10462 10463/* 10464 * Print full method info. Pass the Method* in r0. Preserves regs. 10465 */ 10466common_printMethod: 10467 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10468 bl dvmMterpPrintMethod 10469 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10470 bx lr 10471 10472/* 10473 * Call a C helper function that dumps regs and possibly some 10474 * additional info. Requires the C function to be compiled in. 10475 */ 10476 .if 0 10477common_dumpRegs: 10478 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10479 bl dvmMterpDumpArmRegs 10480 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10481 bx lr 10482 .endif 10483 10484#if 0 10485/* 10486 * Experiment on VFP mode. 10487 * 10488 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10489 * 10490 * Updates the bits specified by "mask", setting them to the values in "val". 10491 */ 10492setFPSCR: 10493 and r0, r0, r1 @ make sure no stray bits are set 10494 fmrx r2, fpscr @ get VFP reg 10495 mvn r1, r1 @ bit-invert mask 10496 and r2, r2, r1 @ clear masked bits 10497 orr r2, r2, r0 @ set specified bits 10498 fmxr fpscr, r2 @ set VFP reg 10499 mov r0, r2 @ return new value 10500 bx lr 10501 10502 .align 2 10503 .global dvmConfigureFP 10504 .type dvmConfigureFP, %function 10505dvmConfigureFP: 10506 stmfd sp!, {ip, lr} 10507 /* 0x03000000 sets DN/FZ */ 10508 /* 0x00009f00 clears the six exception enable flags */ 10509 bl common_squeak0 10510 mov r0, #0x03000000 @ r0<- 0x03000000 10511 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10512 bl setFPSCR 10513 ldmfd sp!, {ip, pc} 10514#endif 10515 10516 10517/* 10518 * String references, must be close to the code that uses them. 10519 */ 10520 .align 2 10521strArithmeticException: 10522 .word .LstrArithmeticException 10523strArrayIndexException: 10524 .word .LstrArrayIndexException 10525strArrayStoreException: 10526 .word .LstrArrayStoreException 10527strDivideByZero: 10528 .word .LstrDivideByZero 10529strNegativeArraySizeException: 10530 .word .LstrNegativeArraySizeException 10531strNoSuchMethodError: 10532 .word .LstrNoSuchMethodError 10533strNullPointerException: 10534 .word .LstrNullPointerException 10535 10536strLogTag: 10537 .word .LstrLogTag 10538strExceptionNotCaughtLocally: 10539 .word .LstrExceptionNotCaughtLocally 10540 10541strNewline: 10542 .word .LstrNewline 10543strSqueak: 10544 .word .LstrSqueak 10545strPrintHex: 10546 .word .LstrPrintHex 10547strPrintLong: 10548 .word .LstrPrintLong 10549 10550/* 10551 * Zero-terminated ASCII string data. 10552 * 10553 * On ARM we have two choices: do like gcc does, and LDR from a .word 10554 * with the address, or use an ADR pseudo-op to get the address 10555 * directly. ADR saves 4 bytes and an indirection, but it's using a 10556 * PC-relative addressing mode and hence has a limited range, which 10557 * makes it not work well with mergeable string sections. 10558 */ 10559 .section .rodata.str1.4,"aMS",%progbits,1 10560 10561.LstrBadEntryPoint: 10562 .asciz "Bad entry point %d\n" 10563.LstrArithmeticException: 10564 .asciz "Ljava/lang/ArithmeticException;" 10565.LstrArrayIndexException: 10566 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10567.LstrArrayStoreException: 10568 .asciz "Ljava/lang/ArrayStoreException;" 10569.LstrClassCastException: 10570 .asciz "Ljava/lang/ClassCastException;" 10571.LstrDivideByZero: 10572 .asciz "divide by zero" 10573.LstrFilledNewArrayNotImpl: 10574 .asciz "filled-new-array only implemented for objects and 'int'" 10575.LstrInternalError: 10576 .asciz "Ljava/lang/InternalError;" 10577.LstrInstantiationError: 10578 .asciz "Ljava/lang/InstantiationError;" 10579.LstrNegativeArraySizeException: 10580 .asciz "Ljava/lang/NegativeArraySizeException;" 10581.LstrNoSuchMethodError: 10582 .asciz "Ljava/lang/NoSuchMethodError;" 10583.LstrNullPointerException: 10584 .asciz "Ljava/lang/NullPointerException;" 10585 10586.LstrLogTag: 10587 .asciz "mterp" 10588.LstrExceptionNotCaughtLocally: 10589 .asciz "Exception %s from %s:%d not caught locally\n" 10590 10591.LstrNewline: 10592 .asciz "\n" 10593.LstrSqueak: 10594 .asciz "<%d>" 10595.LstrPrintHex: 10596 .asciz "<0x%x>" 10597.LstrPrintLong: 10598 .asciz "<%lld>" 10599 10600