InterpAsm-armv5te-vfp.S revision 0890e5bf0b2a502ca1030e9773fabc16ef1b5981
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te-vfp'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24/* 25 * ARMv5 definitions and declarations. 26 */ 27 28/* 29ARM EABI general notes: 30 31r0-r3 hold first 4 args to a method; they are not preserved across method calls 32r4-r8 are available for general use 33r9 is given special treatment in some situations, but not for us 34r10 (sl) seems to be generally available 35r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 36r12 (ip) is scratch -- not preserved across method calls 37r13 (sp) should be managed carefully in case a signal arrives 38r14 (lr) must be preserved 39r15 (pc) can be tinkered with directly 40 41r0 holds returns of <= 4 bytes 42r0-r1 hold returns of 8 bytes, low word in r0 43 44Callee must save/restore r4+ (except r12) if it modifies them. If VFP 45is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 46s0-s15 (d0-d7, q0-a3) do not need to be. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59Mterp and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r4 rPC interpreted program counter, used for fetching instructions 65 r5 rFP interpreted frame pointer, used for accessing locals and args 66 r6 rGLUE MterpGlue pointer 67 r7 rINST first 16-bit code unit of current instruction 68 r8 rIBASE interpreted instruction base pointer, used for computed goto 69 70Macros are provided for common operations. Each macro MUST emit only 71one instruction to make instruction-counting easier. They MUST NOT alter 72unspecified registers or condition codes. 73*/ 74 75/* single-purpose registers, given names for clarity */ 76#define rPC r4 77#define rFP r5 78#define rGLUE r6 79#define rINST r7 80#define rIBASE r8 81 82/* save/restore the PC and/or FP from the glue struct */ 83#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 84#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 85#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 86#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 87#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 88#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 89 90/* 91 * "export" the PC to the stack frame, f/b/o future exception objects. Must 92 * be done *before* something calls dvmThrowException. 93 * 94 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 95 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 96 * 97 * It's okay to do this more than once. 98 */ 99#define EXPORT_PC() \ 100 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 101 102/* 103 * Given a frame pointer, find the stack save area. 104 * 105 * In C this is "((StackSaveArea*)(_fp) -1)". 106 */ 107#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 108 sub _reg, _fpreg, #sizeofStackSaveArea 109 110/* 111 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 112 */ 113#define FETCH_INST() ldrh rINST, [rPC] 114 115/* 116 * Fetch the next instruction from the specified offset. Advances rPC 117 * to point to the next instruction. "_count" is in 16-bit code units. 118 * 119 * Because of the limited size of immediate constants on ARM, this is only 120 * suitable for small forward movements (i.e. don't try to implement "goto" 121 * with this). 122 * 123 * This must come AFTER anything that can throw an exception, or the 124 * exception catch may miss. (This also implies that it must come after 125 * EXPORT_PC().) 126 */ 127#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 128 129/* 130 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 131 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 132 */ 133#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 134 ldrh _dreg, [_sreg, #(_count*2)]! 135 136/* 137 * Fetch the next instruction from an offset specified by _reg. Updates 138 * rPC to point to the next instruction. "_reg" must specify the distance 139 * in bytes, *not* 16-bit code units, and may be a signed value. 140 * 141 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 142 * bits that hold the shift distance are used for the half/byte/sign flags. 143 * In some cases we can pre-double _reg for free, so we require a byte offset 144 * here. 145 */ 146#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 147 148/* 149 * Fetch a half-word code unit from an offset past the current PC. The 150 * "_count" value is in 16-bit code units. Does not advance rPC. 151 * 152 * The "_S" variant works the same but treats the value as signed. 153 */ 154#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 155#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 156 157/* 158 * Fetch one byte from an offset past the current PC. Pass in the same 159 * "_count" as you would for FETCH, and an additional 0/1 indicating which 160 * byte of the halfword you want (lo/hi). 161 */ 162#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 163 164/* 165 * Put the instruction's opcode field into the specified register. 166 */ 167#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 168 169/* 170 * Put the prefetched instruction's opcode field into the specified register. 171 */ 172#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 173 174/* 175 * Begin executing the opcode in _reg. Because this only jumps within the 176 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 177 */ 178#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 180#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 181 182/* 183 * Get/set the 32-bit value from a Dalvik register. 184 */ 185#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 186#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 187 188#if defined(WITH_JIT) 189#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 191#endif 192 193/* 194 * Convert a virtual register index into an address. 195 */ 196#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 197 add _reg, rFP, _vreg, lsl #2 198 199/* 200 * This is a #include, not a %include, because we want the C pre-processor 201 * to expand the macros into assembler assignment statements. 202 */ 203#include "../common/asm-constants.h" 204 205#if defined(WITH_JIT) 206#include "../common/jit-config.h" 207#endif 208 209/* File: armv5te/platform.S */ 210/* 211 * =========================================================================== 212 * CPU-version-specific defines 213 * =========================================================================== 214 */ 215 216/* 217 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 218 * one-way branch. 219 * 220 * May modify IP. Does not modify LR. 221 */ 222.macro LDR_PC source 223 ldr pc, \source 224.endm 225 226/* 227 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 228 * Jump to subroutine. 229 * 230 * May modify IP and LR. 231 */ 232.macro LDR_PC_LR source 233 mov lr, pc 234 ldr pc, \source 235.endm 236 237/* 238 * Macro for "LDMFD SP!, {...regs...,PC}". 239 * 240 * May modify IP and LR. 241 */ 242.macro LDMFD_PC regs 243 ldmfd sp!, {\regs,pc} 244.endm 245 246/* 247 * Macro for data memory barrier; not meaningful pre-ARMv6K. 248 */ 249.macro SMP_DMB 250.endm 251 252/* File: armv5te/entry.S */ 253/* 254 * Copyright (C) 2008 The Android Open Source Project 255 * 256 * Licensed under the Apache License, Version 2.0 (the "License"); 257 * you may not use this file except in compliance with the License. 258 * You may obtain a copy of the License at 259 * 260 * http://www.apache.org/licenses/LICENSE-2.0 261 * 262 * Unless required by applicable law or agreed to in writing, software 263 * distributed under the License is distributed on an "AS IS" BASIS, 264 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 265 * See the License for the specific language governing permissions and 266 * limitations under the License. 267 */ 268/* 269 * Interpreter entry point. 270 */ 271 272/* 273 * We don't have formal stack frames, so gdb scans upward in the code 274 * to find the start of the function (a label with the %function type), 275 * and then looks at the next few instructions to figure out what 276 * got pushed onto the stack. From this it figures out how to restore 277 * the registers, including PC, for the previous stack frame. If gdb 278 * sees a non-function label, it stops scanning, so either we need to 279 * have nothing but assembler-local labels between the entry point and 280 * the break, or we need to fake it out. 281 * 282 * When this is defined, we add some stuff to make gdb less confused. 283 */ 284#define ASSIST_DEBUGGER 1 285 286 .text 287 .align 2 288 .global dvmMterpStdRun 289 .type dvmMterpStdRun, %function 290 291/* 292 * On entry: 293 * r0 MterpGlue* glue 294 * 295 * This function returns a boolean "changeInterp" value. The return comes 296 * via a call to dvmMterpStdBail(). 297 */ 298dvmMterpStdRun: 299#define MTERP_ENTRY1 \ 300 .save {r4-r10,fp,lr}; \ 301 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 302#define MTERP_ENTRY2 \ 303 .pad #4; \ 304 sub sp, sp, #4 @ align 64 305 306 .fnstart 307 MTERP_ENTRY1 308 MTERP_ENTRY2 309 310 /* save stack pointer, add magic word for debuggerd */ 311 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 312 313 /* set up "named" registers, figure out entry point */ 314 mov rGLUE, r0 @ set rGLUE 315 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 316 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 317 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 318 cmp r1, #kInterpEntryInstr @ usual case? 319 bne .Lnot_instr @ no, handle it 320 321#if defined(WITH_JIT) 322.LentryInstr: 323 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 324 /* Entry is always a possible trace start */ 325 GET_JIT_PROF_TABLE(r0) 326 FETCH_INST() 327 mov r1, #0 @ prepare the value for the new state 328 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 329 cmp r0,#0 330 bne common_updateProfile 331 GET_INST_OPCODE(ip) 332 GOTO_OPCODE(ip) 333#else 334 /* start executing the instruction at rPC */ 335 FETCH_INST() @ load rINST from rPC 336 GET_INST_OPCODE(ip) @ extract opcode from rINST 337 GOTO_OPCODE(ip) @ jump to next instruction 338#endif 339 340.Lnot_instr: 341 cmp r1, #kInterpEntryReturn @ were we returning from a method? 342 beq common_returnFromMethod 343 344.Lnot_return: 345 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 346 beq common_exceptionThrown 347 348#if defined(WITH_JIT) 349.Lnot_throw: 350 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 351 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 352 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 353 bne .Lbad_arg 354 cmp rPC,r2 355 bne .LentryInstr @ must have branched, don't resume 356#if defined(WITH_SELF_VERIFICATION) 357 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 358 b jitSVShadowRunStart @ re-enter the translation after the 359 @ single-stepped instruction 360 @noreturn 361#endif 362 mov r1, #kInterpEntryInstr 363 str r1, [rGLUE, #offGlue_entryPoint] 364 bx r10 @ re-enter the translation 365#endif 366 367.Lbad_arg: 368 ldr r0, strBadEntryPoint 369 @ r1 holds value of entryPoint 370 bl printf 371 bl dvmAbort 372 .fnend 373 374 375 .global dvmMterpStdBail 376 .type dvmMterpStdBail, %function 377 378/* 379 * Restore the stack pointer and PC from the save point established on entry. 380 * This is essentially the same as a longjmp, but should be cheaper. The 381 * last instruction causes us to return to whoever called dvmMterpStdRun. 382 * 383 * We pushed some registers on the stack in dvmMterpStdRun, then saved 384 * SP and LR. Here we restore SP, restore the registers, and then restore 385 * LR to PC. 386 * 387 * On entry: 388 * r0 MterpGlue* glue 389 * r1 bool changeInterp 390 */ 391dvmMterpStdBail: 392 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 393 mov r0, r1 @ return the changeInterp value 394 add sp, sp, #4 @ un-align 64 395 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 396 397 398/* 399 * String references. 400 */ 401strBadEntryPoint: 402 .word .LstrBadEntryPoint 403 404 405 .global dvmAsmInstructionStart 406 .type dvmAsmInstructionStart, %function 407dvmAsmInstructionStart = .L_OP_NOP 408 .text 409 410/* ------------------------------ */ 411 .balign 64 412.L_OP_NOP: /* 0x00 */ 413/* File: armv5te/OP_NOP.S */ 414 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 415 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 416 GOTO_OPCODE(ip) @ execute it 417 418#ifdef ASSIST_DEBUGGER 419 /* insert fake function header to help gdb find the stack frame */ 420 .type dalvik_inst, %function 421dalvik_inst: 422 .fnstart 423 MTERP_ENTRY1 424 MTERP_ENTRY2 425 .fnend 426#endif 427 428/* ------------------------------ */ 429 .balign 64 430.L_OP_MOVE: /* 0x01 */ 431/* File: armv5te/OP_MOVE.S */ 432 /* for move, move-object, long-to-int */ 433 /* op vA, vB */ 434 mov r1, rINST, lsr #12 @ r1<- B from 15:12 435 mov r0, rINST, lsr #8 @ r0<- A from 11:8 436 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 437 GET_VREG(r2, r1) @ r2<- fp[B] 438 and r0, r0, #15 439 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 440 SET_VREG(r2, r0) @ fp[A]<- r2 441 GOTO_OPCODE(ip) @ execute next instruction 442 443/* ------------------------------ */ 444 .balign 64 445.L_OP_MOVE_FROM16: /* 0x02 */ 446/* File: armv5te/OP_MOVE_FROM16.S */ 447 /* for: move/from16, move-object/from16 */ 448 /* op vAA, vBBBB */ 449 FETCH(r1, 1) @ r1<- BBBB 450 mov r0, rINST, lsr #8 @ r0<- AA 451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 452 GET_VREG(r2, r1) @ r2<- fp[BBBB] 453 GET_INST_OPCODE(ip) @ extract opcode from rINST 454 SET_VREG(r2, r0) @ fp[AA]<- r2 455 GOTO_OPCODE(ip) @ jump to next instruction 456 457/* ------------------------------ */ 458 .balign 64 459.L_OP_MOVE_16: /* 0x03 */ 460/* File: armv5te/OP_MOVE_16.S */ 461 /* for: move/16, move-object/16 */ 462 /* op vAAAA, vBBBB */ 463 FETCH(r1, 2) @ r1<- BBBB 464 FETCH(r0, 1) @ r0<- AAAA 465 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 466 GET_VREG(r2, r1) @ r2<- fp[BBBB] 467 GET_INST_OPCODE(ip) @ extract opcode from rINST 468 SET_VREG(r2, r0) @ fp[AAAA]<- r2 469 GOTO_OPCODE(ip) @ jump to next instruction 470 471/* ------------------------------ */ 472 .balign 64 473.L_OP_MOVE_WIDE: /* 0x04 */ 474/* File: armv5te/OP_MOVE_WIDE.S */ 475 /* move-wide vA, vB */ 476 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 477 mov r2, rINST, lsr #8 @ r2<- A(+) 478 mov r3, rINST, lsr #12 @ r3<- B 479 and r2, r2, #15 480 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 481 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 482 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 483 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 484 GET_INST_OPCODE(ip) @ extract opcode from rINST 485 stmia r2, {r0-r1} @ fp[A]<- r0/r1 486 GOTO_OPCODE(ip) @ jump to next instruction 487 488/* ------------------------------ */ 489 .balign 64 490.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 491/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 492 /* move-wide/from16 vAA, vBBBB */ 493 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 494 FETCH(r3, 1) @ r3<- BBBB 495 mov r2, rINST, lsr #8 @ r2<- AA 496 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 497 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 498 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 500 GET_INST_OPCODE(ip) @ extract opcode from rINST 501 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 502 GOTO_OPCODE(ip) @ jump to next instruction 503 504/* ------------------------------ */ 505 .balign 64 506.L_OP_MOVE_WIDE_16: /* 0x06 */ 507/* File: armv5te/OP_MOVE_WIDE_16.S */ 508 /* move-wide/16 vAAAA, vBBBB */ 509 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 510 FETCH(r3, 2) @ r3<- BBBB 511 FETCH(r2, 1) @ r2<- AAAA 512 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 513 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 514 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 515 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 516 GET_INST_OPCODE(ip) @ extract opcode from rINST 517 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 518 GOTO_OPCODE(ip) @ jump to next instruction 519 520/* ------------------------------ */ 521 .balign 64 522.L_OP_MOVE_OBJECT: /* 0x07 */ 523/* File: armv5te/OP_MOVE_OBJECT.S */ 524/* File: armv5te/OP_MOVE.S */ 525 /* for move, move-object, long-to-int */ 526 /* op vA, vB */ 527 mov r1, rINST, lsr #12 @ r1<- B from 15:12 528 mov r0, rINST, lsr #8 @ r0<- A from 11:8 529 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 530 GET_VREG(r2, r1) @ r2<- fp[B] 531 and r0, r0, #15 532 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 533 SET_VREG(r2, r0) @ fp[A]<- r2 534 GOTO_OPCODE(ip) @ execute next instruction 535 536 537/* ------------------------------ */ 538 .balign 64 539.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 540/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 541/* File: armv5te/OP_MOVE_FROM16.S */ 542 /* for: move/from16, move-object/from16 */ 543 /* op vAA, vBBBB */ 544 FETCH(r1, 1) @ r1<- BBBB 545 mov r0, rINST, lsr #8 @ r0<- AA 546 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 547 GET_VREG(r2, r1) @ r2<- fp[BBBB] 548 GET_INST_OPCODE(ip) @ extract opcode from rINST 549 SET_VREG(r2, r0) @ fp[AA]<- r2 550 GOTO_OPCODE(ip) @ jump to next instruction 551 552 553/* ------------------------------ */ 554 .balign 64 555.L_OP_MOVE_OBJECT_16: /* 0x09 */ 556/* File: armv5te/OP_MOVE_OBJECT_16.S */ 557/* File: armv5te/OP_MOVE_16.S */ 558 /* for: move/16, move-object/16 */ 559 /* op vAAAA, vBBBB */ 560 FETCH(r1, 2) @ r1<- BBBB 561 FETCH(r0, 1) @ r0<- AAAA 562 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 563 GET_VREG(r2, r1) @ r2<- fp[BBBB] 564 GET_INST_OPCODE(ip) @ extract opcode from rINST 565 SET_VREG(r2, r0) @ fp[AAAA]<- r2 566 GOTO_OPCODE(ip) @ jump to next instruction 567 568 569/* ------------------------------ */ 570 .balign 64 571.L_OP_MOVE_RESULT: /* 0x0a */ 572/* File: armv5te/OP_MOVE_RESULT.S */ 573 /* for: move-result, move-result-object */ 574 /* op vAA */ 575 mov r2, rINST, lsr #8 @ r2<- AA 576 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 577 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 578 GET_INST_OPCODE(ip) @ extract opcode from rINST 579 SET_VREG(r0, r2) @ fp[AA]<- r0 580 GOTO_OPCODE(ip) @ jump to next instruction 581 582/* ------------------------------ */ 583 .balign 64 584.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 585/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 586 /* move-result-wide vAA */ 587 mov r2, rINST, lsr #8 @ r2<- AA 588 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 589 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 590 ldmia r3, {r0-r1} @ r0/r1<- retval.j 591 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 592 GET_INST_OPCODE(ip) @ extract opcode from rINST 593 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 594 GOTO_OPCODE(ip) @ jump to next instruction 595 596/* ------------------------------ */ 597 .balign 64 598.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 599/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 600/* File: armv5te/OP_MOVE_RESULT.S */ 601 /* for: move-result, move-result-object */ 602 /* op vAA */ 603 mov r2, rINST, lsr #8 @ r2<- AA 604 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 605 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 606 GET_INST_OPCODE(ip) @ extract opcode from rINST 607 SET_VREG(r0, r2) @ fp[AA]<- r0 608 GOTO_OPCODE(ip) @ jump to next instruction 609 610 611/* ------------------------------ */ 612 .balign 64 613.L_OP_MOVE_EXCEPTION: /* 0x0d */ 614/* File: armv5te/OP_MOVE_EXCEPTION.S */ 615 /* move-exception vAA */ 616 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 617 mov r2, rINST, lsr #8 @ r2<- AA 618 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 619 mov r1, #0 @ r1<- 0 620 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 621 SET_VREG(r3, r2) @ fp[AA]<- exception obj 622 GET_INST_OPCODE(ip) @ extract opcode from rINST 623 str r1, [r0, #offThread_exception] @ dvmClearException bypass 624 GOTO_OPCODE(ip) @ jump to next instruction 625 626/* ------------------------------ */ 627 .balign 64 628.L_OP_RETURN_VOID: /* 0x0e */ 629/* File: armv5te/OP_RETURN_VOID.S */ 630 b common_returnFromMethod 631 632/* ------------------------------ */ 633 .balign 64 634.L_OP_RETURN: /* 0x0f */ 635/* File: armv5te/OP_RETURN.S */ 636 /* 637 * Return a 32-bit value. Copies the return value into the "glue" 638 * structure, then jumps to the return handler. 639 * 640 * for: return, return-object 641 */ 642 /* op vAA */ 643 mov r2, rINST, lsr #8 @ r2<- AA 644 GET_VREG(r0, r2) @ r0<- vAA 645 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 646 b common_returnFromMethod 647 648/* ------------------------------ */ 649 .balign 64 650.L_OP_RETURN_WIDE: /* 0x10 */ 651/* File: armv5te/OP_RETURN_WIDE.S */ 652 /* 653 * Return a 64-bit value. Copies the return value into the "glue" 654 * structure, then jumps to the return handler. 655 */ 656 /* return-wide vAA */ 657 mov r2, rINST, lsr #8 @ r2<- AA 658 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 659 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 660 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 661 stmia r3, {r0-r1} @ retval<- r0/r1 662 b common_returnFromMethod 663 664/* ------------------------------ */ 665 .balign 64 666.L_OP_RETURN_OBJECT: /* 0x11 */ 667/* File: armv5te/OP_RETURN_OBJECT.S */ 668/* File: armv5te/OP_RETURN.S */ 669 /* 670 * Return a 32-bit value. Copies the return value into the "glue" 671 * structure, then jumps to the return handler. 672 * 673 * for: return, return-object 674 */ 675 /* op vAA */ 676 mov r2, rINST, lsr #8 @ r2<- AA 677 GET_VREG(r0, r2) @ r0<- vAA 678 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 679 b common_returnFromMethod 680 681 682/* ------------------------------ */ 683 .balign 64 684.L_OP_CONST_4: /* 0x12 */ 685/* File: armv5te/OP_CONST_4.S */ 686 /* const/4 vA, #+B */ 687 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 688 mov r0, rINST, lsr #8 @ r0<- A+ 689 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 690 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 691 and r0, r0, #15 692 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 693 SET_VREG(r1, r0) @ fp[A]<- r1 694 GOTO_OPCODE(ip) @ execute next instruction 695 696/* ------------------------------ */ 697 .balign 64 698.L_OP_CONST_16: /* 0x13 */ 699/* File: armv5te/OP_CONST_16.S */ 700 /* const/16 vAA, #+BBBB */ 701 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 702 mov r3, rINST, lsr #8 @ r3<- AA 703 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 704 SET_VREG(r0, r3) @ vAA<- r0 705 GET_INST_OPCODE(ip) @ extract opcode from rINST 706 GOTO_OPCODE(ip) @ jump to next instruction 707 708/* ------------------------------ */ 709 .balign 64 710.L_OP_CONST: /* 0x14 */ 711/* File: armv5te/OP_CONST.S */ 712 /* const vAA, #+BBBBbbbb */ 713 mov r3, rINST, lsr #8 @ r3<- AA 714 FETCH(r0, 1) @ r0<- bbbb (low) 715 FETCH(r1, 2) @ r1<- BBBB (high) 716 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 717 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 718 GET_INST_OPCODE(ip) @ extract opcode from rINST 719 SET_VREG(r0, r3) @ vAA<- r0 720 GOTO_OPCODE(ip) @ jump to next instruction 721 722/* ------------------------------ */ 723 .balign 64 724.L_OP_CONST_HIGH16: /* 0x15 */ 725/* File: armv5te/OP_CONST_HIGH16.S */ 726 /* const/high16 vAA, #+BBBB0000 */ 727 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 728 mov r3, rINST, lsr #8 @ r3<- AA 729 mov r0, r0, lsl #16 @ r0<- BBBB0000 730 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 731 SET_VREG(r0, r3) @ vAA<- r0 732 GET_INST_OPCODE(ip) @ extract opcode from rINST 733 GOTO_OPCODE(ip) @ jump to next instruction 734 735/* ------------------------------ */ 736 .balign 64 737.L_OP_CONST_WIDE_16: /* 0x16 */ 738/* File: armv5te/OP_CONST_WIDE_16.S */ 739 /* const-wide/16 vAA, #+BBBB */ 740 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 741 mov r3, rINST, lsr #8 @ r3<- AA 742 mov r1, r0, asr #31 @ r1<- ssssssss 743 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 744 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 745 GET_INST_OPCODE(ip) @ extract opcode from rINST 746 stmia r3, {r0-r1} @ vAA<- r0/r1 747 GOTO_OPCODE(ip) @ jump to next instruction 748 749/* ------------------------------ */ 750 .balign 64 751.L_OP_CONST_WIDE_32: /* 0x17 */ 752/* File: armv5te/OP_CONST_WIDE_32.S */ 753 /* const-wide/32 vAA, #+BBBBbbbb */ 754 FETCH(r0, 1) @ r0<- 0000bbbb (low) 755 mov r3, rINST, lsr #8 @ r3<- AA 756 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 757 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 758 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 759 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 760 mov r1, r0, asr #31 @ r1<- ssssssss 761 GET_INST_OPCODE(ip) @ extract opcode from rINST 762 stmia r3, {r0-r1} @ vAA<- r0/r1 763 GOTO_OPCODE(ip) @ jump to next instruction 764 765/* ------------------------------ */ 766 .balign 64 767.L_OP_CONST_WIDE: /* 0x18 */ 768/* File: armv5te/OP_CONST_WIDE.S */ 769 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 770 FETCH(r0, 1) @ r0<- bbbb (low) 771 FETCH(r1, 2) @ r1<- BBBB (low middle) 772 FETCH(r2, 3) @ r2<- hhhh (high middle) 773 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 774 FETCH(r3, 4) @ r3<- HHHH (high) 775 mov r9, rINST, lsr #8 @ r9<- AA 776 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 777 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 778 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 779 GET_INST_OPCODE(ip) @ extract opcode from rINST 780 stmia r9, {r0-r1} @ vAA<- r0/r1 781 GOTO_OPCODE(ip) @ jump to next instruction 782 783/* ------------------------------ */ 784 .balign 64 785.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 786/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 787 /* const-wide/high16 vAA, #+BBBB000000000000 */ 788 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 789 mov r3, rINST, lsr #8 @ r3<- AA 790 mov r0, #0 @ r0<- 00000000 791 mov r1, r1, lsl #16 @ r1<- BBBB0000 792 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 793 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 794 GET_INST_OPCODE(ip) @ extract opcode from rINST 795 stmia r3, {r0-r1} @ vAA<- r0/r1 796 GOTO_OPCODE(ip) @ jump to next instruction 797 798/* ------------------------------ */ 799 .balign 64 800.L_OP_CONST_STRING: /* 0x1a */ 801/* File: armv5te/OP_CONST_STRING.S */ 802 /* const/string vAA, String@BBBB */ 803 FETCH(r1, 1) @ r1<- BBBB 804 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 805 mov r9, rINST, lsr #8 @ r9<- AA 806 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 807 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 808 cmp r0, #0 @ not yet resolved? 809 beq .LOP_CONST_STRING_resolve 810 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 811 GET_INST_OPCODE(ip) @ extract opcode from rINST 812 SET_VREG(r0, r9) @ vAA<- r0 813 GOTO_OPCODE(ip) @ jump to next instruction 814 815/* ------------------------------ */ 816 .balign 64 817.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 818/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 819 /* const/string vAA, String@BBBBBBBB */ 820 FETCH(r0, 1) @ r0<- bbbb (low) 821 FETCH(r1, 2) @ r1<- BBBB (high) 822 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 823 mov r9, rINST, lsr #8 @ r9<- AA 824 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 825 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 826 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 827 cmp r0, #0 828 beq .LOP_CONST_STRING_JUMBO_resolve 829 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 830 GET_INST_OPCODE(ip) @ extract opcode from rINST 831 SET_VREG(r0, r9) @ vAA<- r0 832 GOTO_OPCODE(ip) @ jump to next instruction 833 834/* ------------------------------ */ 835 .balign 64 836.L_OP_CONST_CLASS: /* 0x1c */ 837/* File: armv5te/OP_CONST_CLASS.S */ 838 /* const/class vAA, Class@BBBB */ 839 FETCH(r1, 1) @ r1<- BBBB 840 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 841 mov r9, rINST, lsr #8 @ r9<- AA 842 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 843 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 844 cmp r0, #0 @ not yet resolved? 845 beq .LOP_CONST_CLASS_resolve 846 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 847 GET_INST_OPCODE(ip) @ extract opcode from rINST 848 SET_VREG(r0, r9) @ vAA<- r0 849 GOTO_OPCODE(ip) @ jump to next instruction 850 851/* ------------------------------ */ 852 .balign 64 853.L_OP_MONITOR_ENTER: /* 0x1d */ 854/* File: armv5te/OP_MONITOR_ENTER.S */ 855 /* 856 * Synchronize on an object. 857 */ 858 /* monitor-enter vAA */ 859 mov r2, rINST, lsr #8 @ r2<- AA 860 GET_VREG(r1, r2) @ r1<- vAA (object) 861 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 862 cmp r1, #0 @ null object? 863 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 864 beq common_errNullObject @ null object, throw an exception 865 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 866 bl dvmLockObject @ call(self, obj) 867#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 868 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 869 ldr r1, [r0, #offThread_exception] @ check for exception 870 cmp r1, #0 871 bne common_exceptionThrown @ exception raised, bail out 872#endif 873 GET_INST_OPCODE(ip) @ extract opcode from rINST 874 GOTO_OPCODE(ip) @ jump to next instruction 875 876/* ------------------------------ */ 877 .balign 64 878.L_OP_MONITOR_EXIT: /* 0x1e */ 879/* File: armv5te/OP_MONITOR_EXIT.S */ 880 /* 881 * Unlock an object. 882 * 883 * Exceptions that occur when unlocking a monitor need to appear as 884 * if they happened at the following instruction. See the Dalvik 885 * instruction spec. 886 */ 887 /* monitor-exit vAA */ 888 mov r2, rINST, lsr #8 @ r2<- AA 889 EXPORT_PC() @ before fetch: export the PC 890 GET_VREG(r1, r2) @ r1<- vAA (object) 891 cmp r1, #0 @ null object? 892 beq 1f @ yes 893 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 894 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 895 cmp r0, #0 @ failed? 896 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 897 beq common_exceptionThrown @ yes, exception is pending 898 GET_INST_OPCODE(ip) @ extract opcode from rINST 899 GOTO_OPCODE(ip) @ jump to next instruction 9001: 901 FETCH_ADVANCE_INST(1) @ advance before throw 902 b common_errNullObject 903 904/* ------------------------------ */ 905 .balign 64 906.L_OP_CHECK_CAST: /* 0x1f */ 907/* File: armv5te/OP_CHECK_CAST.S */ 908 /* 909 * Check to see if a cast from one class to another is allowed. 910 */ 911 /* check-cast vAA, class@BBBB */ 912 mov r3, rINST, lsr #8 @ r3<- AA 913 FETCH(r2, 1) @ r2<- BBBB 914 GET_VREG(r9, r3) @ r9<- object 915 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 916 cmp r9, #0 @ is object null? 917 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 918 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 919 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 920 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 921 cmp r1, #0 @ have we resolved this before? 922 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 923.LOP_CHECK_CAST_resolved: 924 cmp r0, r1 @ same class (trivial success)? 925 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 926.LOP_CHECK_CAST_okay: 927 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 928 GET_INST_OPCODE(ip) @ extract opcode from rINST 929 GOTO_OPCODE(ip) @ jump to next instruction 930 931/* ------------------------------ */ 932 .balign 64 933.L_OP_INSTANCE_OF: /* 0x20 */ 934/* File: armv5te/OP_INSTANCE_OF.S */ 935 /* 936 * Check to see if an object reference is an instance of a class. 937 * 938 * Most common situation is a non-null object, being compared against 939 * an already-resolved class. 940 */ 941 /* instance-of vA, vB, class@CCCC */ 942 mov r3, rINST, lsr #12 @ r3<- B 943 mov r9, rINST, lsr #8 @ r9<- A+ 944 GET_VREG(r0, r3) @ r0<- vB (object) 945 and r9, r9, #15 @ r9<- A 946 cmp r0, #0 @ is object null? 947 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 948 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 949 FETCH(r3, 1) @ r3<- CCCC 950 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 951 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 952 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 953 cmp r1, #0 @ have we resolved this before? 954 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 955.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 956 cmp r0, r1 @ same class (trivial success)? 957 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 958 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 959 960/* ------------------------------ */ 961 .balign 64 962.L_OP_ARRAY_LENGTH: /* 0x21 */ 963/* File: armv5te/OP_ARRAY_LENGTH.S */ 964 /* 965 * Return the length of an array. 966 */ 967 mov r1, rINST, lsr #12 @ r1<- B 968 mov r2, rINST, lsr #8 @ r2<- A+ 969 GET_VREG(r0, r1) @ r0<- vB (object ref) 970 and r2, r2, #15 @ r2<- A 971 cmp r0, #0 @ is object null? 972 beq common_errNullObject @ yup, fail 973 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 974 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 975 GET_INST_OPCODE(ip) @ extract opcode from rINST 976 SET_VREG(r3, r2) @ vB<- length 977 GOTO_OPCODE(ip) @ jump to next instruction 978 979/* ------------------------------ */ 980 .balign 64 981.L_OP_NEW_INSTANCE: /* 0x22 */ 982/* File: armv5te/OP_NEW_INSTANCE.S */ 983 /* 984 * Create a new instance of a class. 985 */ 986 /* new-instance vAA, class@BBBB */ 987 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 988 FETCH(r1, 1) @ r1<- BBBB 989 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 990 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 991 EXPORT_PC() @ req'd for init, resolve, alloc 992 cmp r0, #0 @ already resolved? 993 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 994.LOP_NEW_INSTANCE_resolved: @ r0=class 995 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 996 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 997 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 998.LOP_NEW_INSTANCE_initialized: @ r0=class 999 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1000 bl dvmAllocObject @ r0<- new object 1001 b .LOP_NEW_INSTANCE_finish @ continue 1002 1003/* ------------------------------ */ 1004 .balign 64 1005.L_OP_NEW_ARRAY: /* 0x23 */ 1006/* File: armv5te/OP_NEW_ARRAY.S */ 1007 /* 1008 * Allocate an array of objects, specified with the array class 1009 * and a count. 1010 * 1011 * The verifier guarantees that this is an array class, so we don't 1012 * check for it here. 1013 */ 1014 /* new-array vA, vB, class@CCCC */ 1015 mov r0, rINST, lsr #12 @ r0<- B 1016 FETCH(r2, 1) @ r2<- CCCC 1017 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1018 GET_VREG(r1, r0) @ r1<- vB (array length) 1019 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1020 cmp r1, #0 @ check length 1021 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1022 bmi common_errNegativeArraySize @ negative length, bail 1023 cmp r0, #0 @ already resolved? 1024 EXPORT_PC() @ req'd for resolve, alloc 1025 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1026 b .LOP_NEW_ARRAY_resolve @ do resolve now 1027 1028/* ------------------------------ */ 1029 .balign 64 1030.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1031/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1032 /* 1033 * Create a new array with elements filled from registers. 1034 * 1035 * for: filled-new-array, filled-new-array/range 1036 */ 1037 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1038 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1039 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1040 FETCH(r1, 1) @ r1<- BBBB 1041 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1042 EXPORT_PC() @ need for resolve and alloc 1043 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1044 mov r10, rINST, lsr #8 @ r10<- AA or BA 1045 cmp r0, #0 @ already resolved? 1046 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10478: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1048 mov r2, #0 @ r2<- false 1049 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1050 bl dvmResolveClass @ r0<- call(clazz, ref) 1051 cmp r0, #0 @ got null? 1052 beq common_exceptionThrown @ yes, handle exception 1053 b .LOP_FILLED_NEW_ARRAY_continue 1054 1055/* ------------------------------ */ 1056 .balign 64 1057.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1058/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1059/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1060 /* 1061 * Create a new array with elements filled from registers. 1062 * 1063 * for: filled-new-array, filled-new-array/range 1064 */ 1065 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1066 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1067 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1068 FETCH(r1, 1) @ r1<- BBBB 1069 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1070 EXPORT_PC() @ need for resolve and alloc 1071 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1072 mov r10, rINST, lsr #8 @ r10<- AA or BA 1073 cmp r0, #0 @ already resolved? 1074 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10758: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1076 mov r2, #0 @ r2<- false 1077 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1078 bl dvmResolveClass @ r0<- call(clazz, ref) 1079 cmp r0, #0 @ got null? 1080 beq common_exceptionThrown @ yes, handle exception 1081 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1082 1083 1084/* ------------------------------ */ 1085 .balign 64 1086.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1087/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1088 /* fill-array-data vAA, +BBBBBBBB */ 1089 FETCH(r0, 1) @ r0<- bbbb (lo) 1090 FETCH(r1, 2) @ r1<- BBBB (hi) 1091 mov r3, rINST, lsr #8 @ r3<- AA 1092 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1093 GET_VREG(r0, r3) @ r0<- vAA (array object) 1094 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1095 EXPORT_PC(); 1096 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1097 cmp r0, #0 @ 0 means an exception is thrown 1098 beq common_exceptionThrown @ has exception 1099 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1100 GET_INST_OPCODE(ip) @ extract opcode from rINST 1101 GOTO_OPCODE(ip) @ jump to next instruction 1102 1103/* ------------------------------ */ 1104 .balign 64 1105.L_OP_THROW: /* 0x27 */ 1106/* File: armv5te/OP_THROW.S */ 1107 /* 1108 * Throw an exception object in the current thread. 1109 */ 1110 /* throw vAA */ 1111 mov r2, rINST, lsr #8 @ r2<- AA 1112 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1113 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1114 EXPORT_PC() @ exception handler can throw 1115 cmp r1, #0 @ null object? 1116 beq common_errNullObject @ yes, throw an NPE instead 1117 @ bypass dvmSetException, just store it 1118 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1119 b common_exceptionThrown 1120 1121/* ------------------------------ */ 1122 .balign 64 1123.L_OP_GOTO: /* 0x28 */ 1124/* File: armv5te/OP_GOTO.S */ 1125 /* 1126 * Unconditional branch, 8-bit offset. 1127 * 1128 * The branch distance is a signed code-unit offset, which we need to 1129 * double to get a byte offset. 1130 */ 1131 /* goto +AA */ 1132 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1133 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1134 mov r9, r9, lsl #1 @ r9<- byte offset 1135 bmi common_backwardBranch @ backward branch, do periodic checks 1136#if defined(WITH_JIT) 1137 GET_JIT_PROF_TABLE(r0) 1138 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1139 cmp r0,#0 1140 bne common_updateProfile 1141 GET_INST_OPCODE(ip) @ extract opcode from rINST 1142 GOTO_OPCODE(ip) @ jump to next instruction 1143#else 1144 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1145 GET_INST_OPCODE(ip) @ extract opcode from rINST 1146 GOTO_OPCODE(ip) @ jump to next instruction 1147#endif 1148 1149/* ------------------------------ */ 1150 .balign 64 1151.L_OP_GOTO_16: /* 0x29 */ 1152/* File: armv5te/OP_GOTO_16.S */ 1153 /* 1154 * Unconditional branch, 16-bit offset. 1155 * 1156 * The branch distance is a signed code-unit offset, which we need to 1157 * double to get a byte offset. 1158 */ 1159 /* goto/16 +AAAA */ 1160 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1161 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1162 bmi common_backwardBranch @ backward branch, do periodic checks 1163#if defined(WITH_JIT) 1164 GET_JIT_PROF_TABLE(r0) 1165 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1166 cmp r0,#0 1167 bne common_updateProfile 1168 GET_INST_OPCODE(ip) @ extract opcode from rINST 1169 GOTO_OPCODE(ip) @ jump to next instruction 1170#else 1171 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1172 GET_INST_OPCODE(ip) @ extract opcode from rINST 1173 GOTO_OPCODE(ip) @ jump to next instruction 1174#endif 1175 1176/* ------------------------------ */ 1177 .balign 64 1178.L_OP_GOTO_32: /* 0x2a */ 1179/* File: armv5te/OP_GOTO_32.S */ 1180 /* 1181 * Unconditional branch, 32-bit offset. 1182 * 1183 * The branch distance is a signed code-unit offset, which we need to 1184 * double to get a byte offset. 1185 * 1186 * Unlike most opcodes, this one is allowed to branch to itself, so 1187 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1188 * instruction doesn't affect the V flag, so we need to clear it 1189 * explicitly. 1190 */ 1191 /* goto/32 +AAAAAAAA */ 1192 FETCH(r0, 1) @ r0<- aaaa (lo) 1193 FETCH(r1, 2) @ r1<- AAAA (hi) 1194 cmp ip, ip @ (clear V flag during stall) 1195 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1196 mov r9, r0, asl #1 @ r9<- byte offset 1197 ble common_backwardBranch @ backward branch, do periodic checks 1198#if defined(WITH_JIT) 1199 GET_JIT_PROF_TABLE(r0) 1200 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1201 cmp r0,#0 1202 bne common_updateProfile 1203 GET_INST_OPCODE(ip) @ extract opcode from rINST 1204 GOTO_OPCODE(ip) @ jump to next instruction 1205#else 1206 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1207 GET_INST_OPCODE(ip) @ extract opcode from rINST 1208 GOTO_OPCODE(ip) @ jump to next instruction 1209#endif 1210 1211/* ------------------------------ */ 1212 .balign 64 1213.L_OP_PACKED_SWITCH: /* 0x2b */ 1214/* File: armv5te/OP_PACKED_SWITCH.S */ 1215 /* 1216 * Handle a packed-switch or sparse-switch instruction. In both cases 1217 * we decode it and hand it off to a helper function. 1218 * 1219 * We don't really expect backward branches in a switch statement, but 1220 * they're perfectly legal, so we check for them here. 1221 * 1222 * for: packed-switch, sparse-switch 1223 */ 1224 /* op vAA, +BBBB */ 1225 FETCH(r0, 1) @ r0<- bbbb (lo) 1226 FETCH(r1, 2) @ r1<- BBBB (hi) 1227 mov r3, rINST, lsr #8 @ r3<- AA 1228 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1229 GET_VREG(r1, r3) @ r1<- vAA 1230 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1231 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1232 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1233 bmi common_backwardBranch @ backward branch, do periodic checks 1234 beq common_backwardBranch @ (want to use BLE but V is unknown) 1235#if defined(WITH_JIT) 1236 GET_JIT_PROF_TABLE(r0) 1237 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1238 cmp r0,#0 1239 bne common_updateProfile 1240 GET_INST_OPCODE(ip) @ extract opcode from rINST 1241 GOTO_OPCODE(ip) @ jump to next instruction 1242#else 1243 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1244 GET_INST_OPCODE(ip) @ extract opcode from rINST 1245 GOTO_OPCODE(ip) @ jump to next instruction 1246#endif 1247 1248/* ------------------------------ */ 1249 .balign 64 1250.L_OP_SPARSE_SWITCH: /* 0x2c */ 1251/* File: armv5te/OP_SPARSE_SWITCH.S */ 1252/* File: armv5te/OP_PACKED_SWITCH.S */ 1253 /* 1254 * Handle a packed-switch or sparse-switch instruction. In both cases 1255 * we decode it and hand it off to a helper function. 1256 * 1257 * We don't really expect backward branches in a switch statement, but 1258 * they're perfectly legal, so we check for them here. 1259 * 1260 * for: packed-switch, sparse-switch 1261 */ 1262 /* op vAA, +BBBB */ 1263 FETCH(r0, 1) @ r0<- bbbb (lo) 1264 FETCH(r1, 2) @ r1<- BBBB (hi) 1265 mov r3, rINST, lsr #8 @ r3<- AA 1266 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1267 GET_VREG(r1, r3) @ r1<- vAA 1268 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1269 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1270 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1271 bmi common_backwardBranch @ backward branch, do periodic checks 1272 beq common_backwardBranch @ (want to use BLE but V is unknown) 1273#if defined(WITH_JIT) 1274 GET_JIT_PROF_TABLE(r0) 1275 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1276 cmp r0,#0 1277 bne common_updateProfile 1278 GET_INST_OPCODE(ip) @ extract opcode from rINST 1279 GOTO_OPCODE(ip) @ jump to next instruction 1280#else 1281 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1282 GET_INST_OPCODE(ip) @ extract opcode from rINST 1283 GOTO_OPCODE(ip) @ jump to next instruction 1284#endif 1285 1286 1287/* ------------------------------ */ 1288 .balign 64 1289.L_OP_CMPL_FLOAT: /* 0x2d */ 1290/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1291 /* 1292 * Compare two floating-point values. Puts 0, 1, or -1 into the 1293 * destination register based on the results of the comparison. 1294 * 1295 * int compare(x, y) { 1296 * if (x == y) { 1297 * return 0; 1298 * } else if (x > y) { 1299 * return 1; 1300 * } else if (x < y) { 1301 * return -1; 1302 * } else { 1303 * return -1; 1304 * } 1305 * } 1306 */ 1307 /* op vAA, vBB, vCC */ 1308 FETCH(r0, 1) @ r0<- CCBB 1309 mov r9, rINST, lsr #8 @ r9<- AA 1310 and r2, r0, #255 @ r2<- BB 1311 mov r3, r0, lsr #8 @ r3<- CC 1312 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1313 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1314 flds s0, [r2] @ s0<- vBB 1315 flds s1, [r3] @ s1<- vCC 1316 fcmpes s0, s1 @ compare (vBB, vCC) 1317 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1318 mvn r0, #0 @ r0<- -1 (default) 1319 GET_INST_OPCODE(ip) @ extract opcode from rINST 1320 fmstat @ export status flags 1321 movgt r0, #1 @ (greater than) r1<- 1 1322 moveq r0, #0 @ (equal) r1<- 0 1323 b .LOP_CMPL_FLOAT_finish @ argh 1324 1325 1326/* ------------------------------ */ 1327 .balign 64 1328.L_OP_CMPG_FLOAT: /* 0x2e */ 1329/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1330 /* 1331 * Compare two floating-point values. Puts 0, 1, or -1 into the 1332 * destination register based on the results of the comparison. 1333 * 1334 * int compare(x, y) { 1335 * if (x == y) { 1336 * return 0; 1337 * } else if (x < y) { 1338 * return -1; 1339 * } else if (x > y) { 1340 * return 1; 1341 * } else { 1342 * return 1; 1343 * } 1344 * } 1345 */ 1346 /* op vAA, vBB, vCC */ 1347 FETCH(r0, 1) @ r0<- CCBB 1348 mov r9, rINST, lsr #8 @ r9<- AA 1349 and r2, r0, #255 @ r2<- BB 1350 mov r3, r0, lsr #8 @ r3<- CC 1351 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1352 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1353 flds s0, [r2] @ s0<- vBB 1354 flds s1, [r3] @ s1<- vCC 1355 fcmpes s0, s1 @ compare (vBB, vCC) 1356 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1357 mov r0, #1 @ r0<- 1 (default) 1358 GET_INST_OPCODE(ip) @ extract opcode from rINST 1359 fmstat @ export status flags 1360 mvnmi r0, #0 @ (less than) r1<- -1 1361 moveq r0, #0 @ (equal) r1<- 0 1362 b .LOP_CMPG_FLOAT_finish @ argh 1363 1364 1365/* ------------------------------ */ 1366 .balign 64 1367.L_OP_CMPL_DOUBLE: /* 0x2f */ 1368/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1369 /* 1370 * Compare two floating-point values. Puts 0, 1, or -1 into the 1371 * destination register based on the results of the comparison. 1372 * 1373 * int compare(x, y) { 1374 * if (x == y) { 1375 * return 0; 1376 * } else if (x > y) { 1377 * return 1; 1378 * } else if (x < y) { 1379 * return -1; 1380 * } else { 1381 * return -1; 1382 * } 1383 * } 1384 */ 1385 /* op vAA, vBB, vCC */ 1386 FETCH(r0, 1) @ r0<- CCBB 1387 mov r9, rINST, lsr #8 @ r9<- AA 1388 and r2, r0, #255 @ r2<- BB 1389 mov r3, r0, lsr #8 @ r3<- CC 1390 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1391 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1392 fldd d0, [r2] @ d0<- vBB 1393 fldd d1, [r3] @ d1<- vCC 1394 fcmped d0, d1 @ compare (vBB, vCC) 1395 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1396 mvn r0, #0 @ r0<- -1 (default) 1397 GET_INST_OPCODE(ip) @ extract opcode from rINST 1398 fmstat @ export status flags 1399 movgt r0, #1 @ (greater than) r1<- 1 1400 moveq r0, #0 @ (equal) r1<- 0 1401 b .LOP_CMPL_DOUBLE_finish @ argh 1402 1403 1404/* ------------------------------ */ 1405 .balign 64 1406.L_OP_CMPG_DOUBLE: /* 0x30 */ 1407/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1408 /* 1409 * Compare two floating-point values. Puts 0, 1, or -1 into the 1410 * destination register based on the results of the comparison. 1411 * 1412 * int compare(x, y) { 1413 * if (x == y) { 1414 * return 0; 1415 * } else if (x < y) { 1416 * return -1; 1417 * } else if (x > y) { 1418 * return 1; 1419 * } else { 1420 * return 1; 1421 * } 1422 * } 1423 */ 1424 /* op vAA, vBB, vCC */ 1425 FETCH(r0, 1) @ r0<- CCBB 1426 mov r9, rINST, lsr #8 @ r9<- AA 1427 and r2, r0, #255 @ r2<- BB 1428 mov r3, r0, lsr #8 @ r3<- CC 1429 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1430 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1431 fldd d0, [r2] @ d0<- vBB 1432 fldd d1, [r3] @ d1<- vCC 1433 fcmped d0, d1 @ compare (vBB, vCC) 1434 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1435 mov r0, #1 @ r0<- 1 (default) 1436 GET_INST_OPCODE(ip) @ extract opcode from rINST 1437 fmstat @ export status flags 1438 mvnmi r0, #0 @ (less than) r1<- -1 1439 moveq r0, #0 @ (equal) r1<- 0 1440 b .LOP_CMPG_DOUBLE_finish @ argh 1441 1442 1443/* ------------------------------ */ 1444 .balign 64 1445.L_OP_CMP_LONG: /* 0x31 */ 1446/* File: armv5te/OP_CMP_LONG.S */ 1447 /* 1448 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1449 * register based on the results of the comparison. 1450 * 1451 * We load the full values with LDM, but in practice many values could 1452 * be resolved by only looking at the high word. This could be made 1453 * faster or slower by splitting the LDM into a pair of LDRs. 1454 * 1455 * If we just wanted to set condition flags, we could do this: 1456 * subs ip, r0, r2 1457 * sbcs ip, r1, r3 1458 * subeqs ip, r0, r2 1459 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1460 * integer value, which we can do with 2 conditional mov/mvn instructions 1461 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1462 * us a constant 5-cycle path plus a branch at the end to the 1463 * instruction epilogue code. The multi-compare approach below needs 1464 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1465 * in the worst case (the 64-bit values are equal). 1466 */ 1467 /* cmp-long vAA, vBB, vCC */ 1468 FETCH(r0, 1) @ r0<- CCBB 1469 mov r9, rINST, lsr #8 @ r9<- AA 1470 and r2, r0, #255 @ r2<- BB 1471 mov r3, r0, lsr #8 @ r3<- CC 1472 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1473 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1474 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1475 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1476 cmp r1, r3 @ compare (vBB+1, vCC+1) 1477 blt .LOP_CMP_LONG_less @ signed compare on high part 1478 bgt .LOP_CMP_LONG_greater 1479 subs r1, r0, r2 @ r1<- r0 - r2 1480 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1481 bne .LOP_CMP_LONG_less 1482 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1483 1484/* ------------------------------ */ 1485 .balign 64 1486.L_OP_IF_EQ: /* 0x32 */ 1487/* File: armv5te/OP_IF_EQ.S */ 1488/* File: armv5te/bincmp.S */ 1489 /* 1490 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1491 * fragment that specifies the *reverse* comparison to perform, e.g. 1492 * for "if-le" you would use "gt". 1493 * 1494 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1495 */ 1496 /* if-cmp vA, vB, +CCCC */ 1497 mov r0, rINST, lsr #8 @ r0<- A+ 1498 mov r1, rINST, lsr #12 @ r1<- B 1499 and r0, r0, #15 1500 GET_VREG(r3, r1) @ r3<- vB 1501 GET_VREG(r2, r0) @ r2<- vA 1502 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1503 cmp r2, r3 @ compare (vA, vB) 1504 bne 1f @ branch to 1 if comparison failed 1505 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1506 movs r9, r9, asl #1 @ convert to bytes, check sign 1507 bmi common_backwardBranch @ yes, do periodic checks 15081: 1509#if defined(WITH_JIT) 1510 GET_JIT_PROF_TABLE(r0) 1511 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1512 b common_testUpdateProfile 1513#else 1514 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1515 GET_INST_OPCODE(ip) @ extract opcode from rINST 1516 GOTO_OPCODE(ip) @ jump to next instruction 1517#endif 1518 1519 1520/* ------------------------------ */ 1521 .balign 64 1522.L_OP_IF_NE: /* 0x33 */ 1523/* File: armv5te/OP_IF_NE.S */ 1524/* File: armv5te/bincmp.S */ 1525 /* 1526 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1527 * fragment that specifies the *reverse* comparison to perform, e.g. 1528 * for "if-le" you would use "gt". 1529 * 1530 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1531 */ 1532 /* if-cmp vA, vB, +CCCC */ 1533 mov r0, rINST, lsr #8 @ r0<- A+ 1534 mov r1, rINST, lsr #12 @ r1<- B 1535 and r0, r0, #15 1536 GET_VREG(r3, r1) @ r3<- vB 1537 GET_VREG(r2, r0) @ r2<- vA 1538 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1539 cmp r2, r3 @ compare (vA, vB) 1540 beq 1f @ branch to 1 if comparison failed 1541 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1542 movs r9, r9, asl #1 @ convert to bytes, check sign 1543 bmi common_backwardBranch @ yes, do periodic checks 15441: 1545#if defined(WITH_JIT) 1546 GET_JIT_PROF_TABLE(r0) 1547 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1548 b common_testUpdateProfile 1549#else 1550 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1551 GET_INST_OPCODE(ip) @ extract opcode from rINST 1552 GOTO_OPCODE(ip) @ jump to next instruction 1553#endif 1554 1555 1556/* ------------------------------ */ 1557 .balign 64 1558.L_OP_IF_LT: /* 0x34 */ 1559/* File: armv5te/OP_IF_LT.S */ 1560/* File: armv5te/bincmp.S */ 1561 /* 1562 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1563 * fragment that specifies the *reverse* comparison to perform, e.g. 1564 * for "if-le" you would use "gt". 1565 * 1566 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1567 */ 1568 /* if-cmp vA, vB, +CCCC */ 1569 mov r0, rINST, lsr #8 @ r0<- A+ 1570 mov r1, rINST, lsr #12 @ r1<- B 1571 and r0, r0, #15 1572 GET_VREG(r3, r1) @ r3<- vB 1573 GET_VREG(r2, r0) @ r2<- vA 1574 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1575 cmp r2, r3 @ compare (vA, vB) 1576 bge 1f @ branch to 1 if comparison failed 1577 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1578 movs r9, r9, asl #1 @ convert to bytes, check sign 1579 bmi common_backwardBranch @ yes, do periodic checks 15801: 1581#if defined(WITH_JIT) 1582 GET_JIT_PROF_TABLE(r0) 1583 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1584 b common_testUpdateProfile 1585#else 1586 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1587 GET_INST_OPCODE(ip) @ extract opcode from rINST 1588 GOTO_OPCODE(ip) @ jump to next instruction 1589#endif 1590 1591 1592/* ------------------------------ */ 1593 .balign 64 1594.L_OP_IF_GE: /* 0x35 */ 1595/* File: armv5te/OP_IF_GE.S */ 1596/* File: armv5te/bincmp.S */ 1597 /* 1598 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1599 * fragment that specifies the *reverse* comparison to perform, e.g. 1600 * for "if-le" you would use "gt". 1601 * 1602 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1603 */ 1604 /* if-cmp vA, vB, +CCCC */ 1605 mov r0, rINST, lsr #8 @ r0<- A+ 1606 mov r1, rINST, lsr #12 @ r1<- B 1607 and r0, r0, #15 1608 GET_VREG(r3, r1) @ r3<- vB 1609 GET_VREG(r2, r0) @ r2<- vA 1610 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1611 cmp r2, r3 @ compare (vA, vB) 1612 blt 1f @ branch to 1 if comparison failed 1613 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1614 movs r9, r9, asl #1 @ convert to bytes, check sign 1615 bmi common_backwardBranch @ yes, do periodic checks 16161: 1617#if defined(WITH_JIT) 1618 GET_JIT_PROF_TABLE(r0) 1619 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1620 b common_testUpdateProfile 1621#else 1622 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1623 GET_INST_OPCODE(ip) @ extract opcode from rINST 1624 GOTO_OPCODE(ip) @ jump to next instruction 1625#endif 1626 1627 1628/* ------------------------------ */ 1629 .balign 64 1630.L_OP_IF_GT: /* 0x36 */ 1631/* File: armv5te/OP_IF_GT.S */ 1632/* File: armv5te/bincmp.S */ 1633 /* 1634 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1635 * fragment that specifies the *reverse* comparison to perform, e.g. 1636 * for "if-le" you would use "gt". 1637 * 1638 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1639 */ 1640 /* if-cmp vA, vB, +CCCC */ 1641 mov r0, rINST, lsr #8 @ r0<- A+ 1642 mov r1, rINST, lsr #12 @ r1<- B 1643 and r0, r0, #15 1644 GET_VREG(r3, r1) @ r3<- vB 1645 GET_VREG(r2, r0) @ r2<- vA 1646 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1647 cmp r2, r3 @ compare (vA, vB) 1648 ble 1f @ branch to 1 if comparison failed 1649 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1650 movs r9, r9, asl #1 @ convert to bytes, check sign 1651 bmi common_backwardBranch @ yes, do periodic checks 16521: 1653#if defined(WITH_JIT) 1654 GET_JIT_PROF_TABLE(r0) 1655 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1656 b common_testUpdateProfile 1657#else 1658 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1659 GET_INST_OPCODE(ip) @ extract opcode from rINST 1660 GOTO_OPCODE(ip) @ jump to next instruction 1661#endif 1662 1663 1664/* ------------------------------ */ 1665 .balign 64 1666.L_OP_IF_LE: /* 0x37 */ 1667/* File: armv5te/OP_IF_LE.S */ 1668/* File: armv5te/bincmp.S */ 1669 /* 1670 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1671 * fragment that specifies the *reverse* comparison to perform, e.g. 1672 * for "if-le" you would use "gt". 1673 * 1674 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1675 */ 1676 /* if-cmp vA, vB, +CCCC */ 1677 mov r0, rINST, lsr #8 @ r0<- A+ 1678 mov r1, rINST, lsr #12 @ r1<- B 1679 and r0, r0, #15 1680 GET_VREG(r3, r1) @ r3<- vB 1681 GET_VREG(r2, r0) @ r2<- vA 1682 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1683 cmp r2, r3 @ compare (vA, vB) 1684 bgt 1f @ branch to 1 if comparison failed 1685 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1686 movs r9, r9, asl #1 @ convert to bytes, check sign 1687 bmi common_backwardBranch @ yes, do periodic checks 16881: 1689#if defined(WITH_JIT) 1690 GET_JIT_PROF_TABLE(r0) 1691 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1692 b common_testUpdateProfile 1693#else 1694 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1695 GET_INST_OPCODE(ip) @ extract opcode from rINST 1696 GOTO_OPCODE(ip) @ jump to next instruction 1697#endif 1698 1699 1700/* ------------------------------ */ 1701 .balign 64 1702.L_OP_IF_EQZ: /* 0x38 */ 1703/* File: armv5te/OP_IF_EQZ.S */ 1704/* File: armv5te/zcmp.S */ 1705 /* 1706 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1707 * fragment that specifies the *reverse* comparison to perform, e.g. 1708 * for "if-le" you would use "gt". 1709 * 1710 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1711 */ 1712 /* if-cmp vAA, +BBBB */ 1713 mov r0, rINST, lsr #8 @ r0<- AA 1714 GET_VREG(r2, r0) @ r2<- vAA 1715 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1716 cmp r2, #0 @ compare (vA, 0) 1717 bne 1f @ branch to 1 if comparison failed 1718 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1719 movs r9, r9, asl #1 @ convert to bytes, check sign 1720 bmi common_backwardBranch @ backward branch, do periodic checks 17211: 1722#if defined(WITH_JIT) 1723 GET_JIT_PROF_TABLE(r0) 1724 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1725 cmp r0,#0 1726 bne common_updateProfile 1727 GET_INST_OPCODE(ip) @ extract opcode from rINST 1728 GOTO_OPCODE(ip) @ jump to next instruction 1729#else 1730 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1731 GET_INST_OPCODE(ip) @ extract opcode from rINST 1732 GOTO_OPCODE(ip) @ jump to next instruction 1733#endif 1734 1735 1736/* ------------------------------ */ 1737 .balign 64 1738.L_OP_IF_NEZ: /* 0x39 */ 1739/* File: armv5te/OP_IF_NEZ.S */ 1740/* File: armv5te/zcmp.S */ 1741 /* 1742 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1743 * fragment that specifies the *reverse* comparison to perform, e.g. 1744 * for "if-le" you would use "gt". 1745 * 1746 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1747 */ 1748 /* if-cmp vAA, +BBBB */ 1749 mov r0, rINST, lsr #8 @ r0<- AA 1750 GET_VREG(r2, r0) @ r2<- vAA 1751 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1752 cmp r2, #0 @ compare (vA, 0) 1753 beq 1f @ branch to 1 if comparison failed 1754 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1755 movs r9, r9, asl #1 @ convert to bytes, check sign 1756 bmi common_backwardBranch @ backward branch, do periodic checks 17571: 1758#if defined(WITH_JIT) 1759 GET_JIT_PROF_TABLE(r0) 1760 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1761 cmp r0,#0 1762 bne common_updateProfile 1763 GET_INST_OPCODE(ip) @ extract opcode from rINST 1764 GOTO_OPCODE(ip) @ jump to next instruction 1765#else 1766 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1767 GET_INST_OPCODE(ip) @ extract opcode from rINST 1768 GOTO_OPCODE(ip) @ jump to next instruction 1769#endif 1770 1771 1772/* ------------------------------ */ 1773 .balign 64 1774.L_OP_IF_LTZ: /* 0x3a */ 1775/* File: armv5te/OP_IF_LTZ.S */ 1776/* File: armv5te/zcmp.S */ 1777 /* 1778 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1779 * fragment that specifies the *reverse* comparison to perform, e.g. 1780 * for "if-le" you would use "gt". 1781 * 1782 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1783 */ 1784 /* if-cmp vAA, +BBBB */ 1785 mov r0, rINST, lsr #8 @ r0<- AA 1786 GET_VREG(r2, r0) @ r2<- vAA 1787 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1788 cmp r2, #0 @ compare (vA, 0) 1789 bge 1f @ branch to 1 if comparison failed 1790 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1791 movs r9, r9, asl #1 @ convert to bytes, check sign 1792 bmi common_backwardBranch @ backward branch, do periodic checks 17931: 1794#if defined(WITH_JIT) 1795 GET_JIT_PROF_TABLE(r0) 1796 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1797 cmp r0,#0 1798 bne common_updateProfile 1799 GET_INST_OPCODE(ip) @ extract opcode from rINST 1800 GOTO_OPCODE(ip) @ jump to next instruction 1801#else 1802 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1803 GET_INST_OPCODE(ip) @ extract opcode from rINST 1804 GOTO_OPCODE(ip) @ jump to next instruction 1805#endif 1806 1807 1808/* ------------------------------ */ 1809 .balign 64 1810.L_OP_IF_GEZ: /* 0x3b */ 1811/* File: armv5te/OP_IF_GEZ.S */ 1812/* File: armv5te/zcmp.S */ 1813 /* 1814 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1815 * fragment that specifies the *reverse* comparison to perform, e.g. 1816 * for "if-le" you would use "gt". 1817 * 1818 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1819 */ 1820 /* if-cmp vAA, +BBBB */ 1821 mov r0, rINST, lsr #8 @ r0<- AA 1822 GET_VREG(r2, r0) @ r2<- vAA 1823 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1824 cmp r2, #0 @ compare (vA, 0) 1825 blt 1f @ branch to 1 if comparison failed 1826 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1827 movs r9, r9, asl #1 @ convert to bytes, check sign 1828 bmi common_backwardBranch @ backward branch, do periodic checks 18291: 1830#if defined(WITH_JIT) 1831 GET_JIT_PROF_TABLE(r0) 1832 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1833 cmp r0,#0 1834 bne common_updateProfile 1835 GET_INST_OPCODE(ip) @ extract opcode from rINST 1836 GOTO_OPCODE(ip) @ jump to next instruction 1837#else 1838 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1839 GET_INST_OPCODE(ip) @ extract opcode from rINST 1840 GOTO_OPCODE(ip) @ jump to next instruction 1841#endif 1842 1843 1844/* ------------------------------ */ 1845 .balign 64 1846.L_OP_IF_GTZ: /* 0x3c */ 1847/* File: armv5te/OP_IF_GTZ.S */ 1848/* File: armv5te/zcmp.S */ 1849 /* 1850 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1851 * fragment that specifies the *reverse* comparison to perform, e.g. 1852 * for "if-le" you would use "gt". 1853 * 1854 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1855 */ 1856 /* if-cmp vAA, +BBBB */ 1857 mov r0, rINST, lsr #8 @ r0<- AA 1858 GET_VREG(r2, r0) @ r2<- vAA 1859 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1860 cmp r2, #0 @ compare (vA, 0) 1861 ble 1f @ branch to 1 if comparison failed 1862 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1863 movs r9, r9, asl #1 @ convert to bytes, check sign 1864 bmi common_backwardBranch @ backward branch, do periodic checks 18651: 1866#if defined(WITH_JIT) 1867 GET_JIT_PROF_TABLE(r0) 1868 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1869 cmp r0,#0 1870 bne common_updateProfile 1871 GET_INST_OPCODE(ip) @ extract opcode from rINST 1872 GOTO_OPCODE(ip) @ jump to next instruction 1873#else 1874 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1875 GET_INST_OPCODE(ip) @ extract opcode from rINST 1876 GOTO_OPCODE(ip) @ jump to next instruction 1877#endif 1878 1879 1880/* ------------------------------ */ 1881 .balign 64 1882.L_OP_IF_LEZ: /* 0x3d */ 1883/* File: armv5te/OP_IF_LEZ.S */ 1884/* File: armv5te/zcmp.S */ 1885 /* 1886 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1887 * fragment that specifies the *reverse* comparison to perform, e.g. 1888 * for "if-le" you would use "gt". 1889 * 1890 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1891 */ 1892 /* if-cmp vAA, +BBBB */ 1893 mov r0, rINST, lsr #8 @ r0<- AA 1894 GET_VREG(r2, r0) @ r2<- vAA 1895 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1896 cmp r2, #0 @ compare (vA, 0) 1897 bgt 1f @ branch to 1 if comparison failed 1898 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1899 movs r9, r9, asl #1 @ convert to bytes, check sign 1900 bmi common_backwardBranch @ backward branch, do periodic checks 19011: 1902#if defined(WITH_JIT) 1903 GET_JIT_PROF_TABLE(r0) 1904 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1905 cmp r0,#0 1906 bne common_updateProfile 1907 GET_INST_OPCODE(ip) @ extract opcode from rINST 1908 GOTO_OPCODE(ip) @ jump to next instruction 1909#else 1910 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1911 GET_INST_OPCODE(ip) @ extract opcode from rINST 1912 GOTO_OPCODE(ip) @ jump to next instruction 1913#endif 1914 1915 1916/* ------------------------------ */ 1917 .balign 64 1918.L_OP_UNUSED_3E: /* 0x3e */ 1919/* File: armv5te/OP_UNUSED_3E.S */ 1920/* File: armv5te/unused.S */ 1921 bl common_abort 1922 1923 1924/* ------------------------------ */ 1925 .balign 64 1926.L_OP_UNUSED_3F: /* 0x3f */ 1927/* File: armv5te/OP_UNUSED_3F.S */ 1928/* File: armv5te/unused.S */ 1929 bl common_abort 1930 1931 1932/* ------------------------------ */ 1933 .balign 64 1934.L_OP_UNUSED_40: /* 0x40 */ 1935/* File: armv5te/OP_UNUSED_40.S */ 1936/* File: armv5te/unused.S */ 1937 bl common_abort 1938 1939 1940/* ------------------------------ */ 1941 .balign 64 1942.L_OP_UNUSED_41: /* 0x41 */ 1943/* File: armv5te/OP_UNUSED_41.S */ 1944/* File: armv5te/unused.S */ 1945 bl common_abort 1946 1947 1948/* ------------------------------ */ 1949 .balign 64 1950.L_OP_UNUSED_42: /* 0x42 */ 1951/* File: armv5te/OP_UNUSED_42.S */ 1952/* File: armv5te/unused.S */ 1953 bl common_abort 1954 1955 1956/* ------------------------------ */ 1957 .balign 64 1958.L_OP_UNUSED_43: /* 0x43 */ 1959/* File: armv5te/OP_UNUSED_43.S */ 1960/* File: armv5te/unused.S */ 1961 bl common_abort 1962 1963 1964/* ------------------------------ */ 1965 .balign 64 1966.L_OP_AGET: /* 0x44 */ 1967/* File: armv5te/OP_AGET.S */ 1968 /* 1969 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1970 * 1971 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1972 * instructions. We use a pair of FETCH_Bs instead. 1973 * 1974 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 1975 */ 1976 /* op vAA, vBB, vCC */ 1977 FETCH_B(r2, 1, 0) @ r2<- BB 1978 mov r9, rINST, lsr #8 @ r9<- AA 1979 FETCH_B(r3, 1, 1) @ r3<- CC 1980 GET_VREG(r0, r2) @ r0<- vBB (array object) 1981 GET_VREG(r1, r3) @ r1<- vCC (requested index) 1982 cmp r0, #0 @ null array object? 1983 beq common_errNullObject @ yes, bail 1984 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 1985 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 1986 cmp r1, r3 @ compare unsigned index, length 1987 bcs common_errArrayIndex @ index >= length, bail 1988 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1989 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 1990 GET_INST_OPCODE(ip) @ extract opcode from rINST 1991 SET_VREG(r2, r9) @ vAA<- r2 1992 GOTO_OPCODE(ip) @ jump to next instruction 1993 1994/* ------------------------------ */ 1995 .balign 64 1996.L_OP_AGET_WIDE: /* 0x45 */ 1997/* File: armv5te/OP_AGET_WIDE.S */ 1998 /* 1999 * Array get, 64 bits. vAA <- vBB[vCC]. 2000 * 2001 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2002 */ 2003 /* aget-wide vAA, vBB, vCC */ 2004 FETCH(r0, 1) @ r0<- CCBB 2005 mov r9, rINST, lsr #8 @ r9<- AA 2006 and r2, r0, #255 @ r2<- BB 2007 mov r3, r0, lsr #8 @ r3<- CC 2008 GET_VREG(r0, r2) @ r0<- vBB (array object) 2009 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2010 cmp r0, #0 @ null array object? 2011 beq common_errNullObject @ yes, bail 2012 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2013 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2014 cmp r1, r3 @ compare unsigned index, length 2015 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2016 b common_errArrayIndex @ index >= length, bail 2017 @ May want to swap the order of these two branches depending on how the 2018 @ branch prediction (if any) handles conditional forward branches vs. 2019 @ unconditional forward branches. 2020 2021/* ------------------------------ */ 2022 .balign 64 2023.L_OP_AGET_OBJECT: /* 0x46 */ 2024/* File: armv5te/OP_AGET_OBJECT.S */ 2025/* File: armv5te/OP_AGET.S */ 2026 /* 2027 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2028 * 2029 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2030 * instructions. We use a pair of FETCH_Bs instead. 2031 * 2032 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2033 */ 2034 /* op vAA, vBB, vCC */ 2035 FETCH_B(r2, 1, 0) @ r2<- BB 2036 mov r9, rINST, lsr #8 @ r9<- AA 2037 FETCH_B(r3, 1, 1) @ r3<- CC 2038 GET_VREG(r0, r2) @ r0<- vBB (array object) 2039 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2040 cmp r0, #0 @ null array object? 2041 beq common_errNullObject @ yes, bail 2042 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2043 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2044 cmp r1, r3 @ compare unsigned index, length 2045 bcs common_errArrayIndex @ index >= length, bail 2046 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2047 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2048 GET_INST_OPCODE(ip) @ extract opcode from rINST 2049 SET_VREG(r2, r9) @ vAA<- r2 2050 GOTO_OPCODE(ip) @ jump to next instruction 2051 2052 2053/* ------------------------------ */ 2054 .balign 64 2055.L_OP_AGET_BOOLEAN: /* 0x47 */ 2056/* File: armv5te/OP_AGET_BOOLEAN.S */ 2057/* File: armv5te/OP_AGET.S */ 2058 /* 2059 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2060 * 2061 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2062 * instructions. We use a pair of FETCH_Bs instead. 2063 * 2064 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2065 */ 2066 /* op vAA, vBB, vCC */ 2067 FETCH_B(r2, 1, 0) @ r2<- BB 2068 mov r9, rINST, lsr #8 @ r9<- AA 2069 FETCH_B(r3, 1, 1) @ r3<- CC 2070 GET_VREG(r0, r2) @ r0<- vBB (array object) 2071 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2072 cmp r0, #0 @ null array object? 2073 beq common_errNullObject @ yes, bail 2074 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2075 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2076 cmp r1, r3 @ compare unsigned index, length 2077 bcs common_errArrayIndex @ index >= length, bail 2078 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2079 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2080 GET_INST_OPCODE(ip) @ extract opcode from rINST 2081 SET_VREG(r2, r9) @ vAA<- r2 2082 GOTO_OPCODE(ip) @ jump to next instruction 2083 2084 2085/* ------------------------------ */ 2086 .balign 64 2087.L_OP_AGET_BYTE: /* 0x48 */ 2088/* File: armv5te/OP_AGET_BYTE.S */ 2089/* File: armv5te/OP_AGET.S */ 2090 /* 2091 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2092 * 2093 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2094 * instructions. We use a pair of FETCH_Bs instead. 2095 * 2096 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2097 */ 2098 /* op vAA, vBB, vCC */ 2099 FETCH_B(r2, 1, 0) @ r2<- BB 2100 mov r9, rINST, lsr #8 @ r9<- AA 2101 FETCH_B(r3, 1, 1) @ r3<- CC 2102 GET_VREG(r0, r2) @ r0<- vBB (array object) 2103 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2104 cmp r0, #0 @ null array object? 2105 beq common_errNullObject @ yes, bail 2106 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2107 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2108 cmp r1, r3 @ compare unsigned index, length 2109 bcs common_errArrayIndex @ index >= length, bail 2110 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2111 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2112 GET_INST_OPCODE(ip) @ extract opcode from rINST 2113 SET_VREG(r2, r9) @ vAA<- r2 2114 GOTO_OPCODE(ip) @ jump to next instruction 2115 2116 2117/* ------------------------------ */ 2118 .balign 64 2119.L_OP_AGET_CHAR: /* 0x49 */ 2120/* File: armv5te/OP_AGET_CHAR.S */ 2121/* File: armv5te/OP_AGET.S */ 2122 /* 2123 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2124 * 2125 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2126 * instructions. We use a pair of FETCH_Bs instead. 2127 * 2128 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2129 */ 2130 /* op vAA, vBB, vCC */ 2131 FETCH_B(r2, 1, 0) @ r2<- BB 2132 mov r9, rINST, lsr #8 @ r9<- AA 2133 FETCH_B(r3, 1, 1) @ r3<- CC 2134 GET_VREG(r0, r2) @ r0<- vBB (array object) 2135 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2136 cmp r0, #0 @ null array object? 2137 beq common_errNullObject @ yes, bail 2138 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2139 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2140 cmp r1, r3 @ compare unsigned index, length 2141 bcs common_errArrayIndex @ index >= length, bail 2142 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2143 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2144 GET_INST_OPCODE(ip) @ extract opcode from rINST 2145 SET_VREG(r2, r9) @ vAA<- r2 2146 GOTO_OPCODE(ip) @ jump to next instruction 2147 2148 2149/* ------------------------------ */ 2150 .balign 64 2151.L_OP_AGET_SHORT: /* 0x4a */ 2152/* File: armv5te/OP_AGET_SHORT.S */ 2153/* File: armv5te/OP_AGET.S */ 2154 /* 2155 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2156 * 2157 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2158 * instructions. We use a pair of FETCH_Bs instead. 2159 * 2160 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2161 */ 2162 /* op vAA, vBB, vCC */ 2163 FETCH_B(r2, 1, 0) @ r2<- BB 2164 mov r9, rINST, lsr #8 @ r9<- AA 2165 FETCH_B(r3, 1, 1) @ r3<- CC 2166 GET_VREG(r0, r2) @ r0<- vBB (array object) 2167 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2168 cmp r0, #0 @ null array object? 2169 beq common_errNullObject @ yes, bail 2170 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2171 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2172 cmp r1, r3 @ compare unsigned index, length 2173 bcs common_errArrayIndex @ index >= length, bail 2174 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2175 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2176 GET_INST_OPCODE(ip) @ extract opcode from rINST 2177 SET_VREG(r2, r9) @ vAA<- r2 2178 GOTO_OPCODE(ip) @ jump to next instruction 2179 2180 2181/* ------------------------------ */ 2182 .balign 64 2183.L_OP_APUT: /* 0x4b */ 2184/* File: armv5te/OP_APUT.S */ 2185 /* 2186 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2187 * 2188 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2189 * instructions. We use a pair of FETCH_Bs instead. 2190 * 2191 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2192 */ 2193 /* op vAA, vBB, vCC */ 2194 FETCH_B(r2, 1, 0) @ r2<- BB 2195 mov r9, rINST, lsr #8 @ r9<- AA 2196 FETCH_B(r3, 1, 1) @ r3<- CC 2197 GET_VREG(r0, r2) @ r0<- vBB (array object) 2198 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2199 cmp r0, #0 @ null array object? 2200 beq common_errNullObject @ yes, bail 2201 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2202 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2203 cmp r1, r3 @ compare unsigned index, length 2204 bcs common_errArrayIndex @ index >= length, bail 2205 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2206 GET_VREG(r2, r9) @ r2<- vAA 2207 GET_INST_OPCODE(ip) @ extract opcode from rINST 2208 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2209 GOTO_OPCODE(ip) @ jump to next instruction 2210 2211/* ------------------------------ */ 2212 .balign 64 2213.L_OP_APUT_WIDE: /* 0x4c */ 2214/* File: armv5te/OP_APUT_WIDE.S */ 2215 /* 2216 * Array put, 64 bits. vBB[vCC] <- vAA. 2217 * 2218 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2219 */ 2220 /* aput-wide vAA, vBB, vCC */ 2221 FETCH(r0, 1) @ r0<- CCBB 2222 mov r9, rINST, lsr #8 @ r9<- AA 2223 and r2, r0, #255 @ r2<- BB 2224 mov r3, r0, lsr #8 @ r3<- CC 2225 GET_VREG(r0, r2) @ r0<- vBB (array object) 2226 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2227 cmp r0, #0 @ null array object? 2228 beq common_errNullObject @ yes, bail 2229 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2230 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2231 cmp r1, r3 @ compare unsigned index, length 2232 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2233 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2234 b common_errArrayIndex @ index >= length, bail 2235 @ May want to swap the order of these two branches depending on how the 2236 @ branch prediction (if any) handles conditional forward branches vs. 2237 @ unconditional forward branches. 2238 2239/* ------------------------------ */ 2240 .balign 64 2241.L_OP_APUT_OBJECT: /* 0x4d */ 2242/* File: armv5te/OP_APUT_OBJECT.S */ 2243 /* 2244 * Store an object into an array. vBB[vCC] <- vAA. 2245 * 2246 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2247 * instructions. We use a pair of FETCH_Bs instead. 2248 */ 2249 /* op vAA, vBB, vCC */ 2250 FETCH(r0, 1) @ r0<- CCBB 2251 mov r9, rINST, lsr #8 @ r9<- AA 2252 and r2, r0, #255 @ r2<- BB 2253 mov r3, r0, lsr #8 @ r3<- CC 2254 GET_VREG(r1, r2) @ r1<- vBB (array object) 2255 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2256 cmp r1, #0 @ null array object? 2257 GET_VREG(r9, r9) @ r9<- vAA 2258 beq common_errNullObject @ yes, bail 2259 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2260 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2261 cmp r0, r3 @ compare unsigned index, length 2262 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2263 b common_errArrayIndex @ index >= length, bail 2264 2265 2266/* ------------------------------ */ 2267 .balign 64 2268.L_OP_APUT_BOOLEAN: /* 0x4e */ 2269/* File: armv5te/OP_APUT_BOOLEAN.S */ 2270/* File: armv5te/OP_APUT.S */ 2271 /* 2272 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2273 * 2274 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2275 * instructions. We use a pair of FETCH_Bs instead. 2276 * 2277 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2278 */ 2279 /* op vAA, vBB, vCC */ 2280 FETCH_B(r2, 1, 0) @ r2<- BB 2281 mov r9, rINST, lsr #8 @ r9<- AA 2282 FETCH_B(r3, 1, 1) @ r3<- CC 2283 GET_VREG(r0, r2) @ r0<- vBB (array object) 2284 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2285 cmp r0, #0 @ null array object? 2286 beq common_errNullObject @ yes, bail 2287 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2288 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2289 cmp r1, r3 @ compare unsigned index, length 2290 bcs common_errArrayIndex @ index >= length, bail 2291 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2292 GET_VREG(r2, r9) @ r2<- vAA 2293 GET_INST_OPCODE(ip) @ extract opcode from rINST 2294 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2295 GOTO_OPCODE(ip) @ jump to next instruction 2296 2297 2298/* ------------------------------ */ 2299 .balign 64 2300.L_OP_APUT_BYTE: /* 0x4f */ 2301/* File: armv5te/OP_APUT_BYTE.S */ 2302/* File: armv5te/OP_APUT.S */ 2303 /* 2304 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2305 * 2306 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2307 * instructions. We use a pair of FETCH_Bs instead. 2308 * 2309 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2310 */ 2311 /* op vAA, vBB, vCC */ 2312 FETCH_B(r2, 1, 0) @ r2<- BB 2313 mov r9, rINST, lsr #8 @ r9<- AA 2314 FETCH_B(r3, 1, 1) @ r3<- CC 2315 GET_VREG(r0, r2) @ r0<- vBB (array object) 2316 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2317 cmp r0, #0 @ null array object? 2318 beq common_errNullObject @ yes, bail 2319 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2320 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2321 cmp r1, r3 @ compare unsigned index, length 2322 bcs common_errArrayIndex @ index >= length, bail 2323 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2324 GET_VREG(r2, r9) @ r2<- vAA 2325 GET_INST_OPCODE(ip) @ extract opcode from rINST 2326 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2327 GOTO_OPCODE(ip) @ jump to next instruction 2328 2329 2330/* ------------------------------ */ 2331 .balign 64 2332.L_OP_APUT_CHAR: /* 0x50 */ 2333/* File: armv5te/OP_APUT_CHAR.S */ 2334/* File: armv5te/OP_APUT.S */ 2335 /* 2336 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2337 * 2338 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2339 * instructions. We use a pair of FETCH_Bs instead. 2340 * 2341 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2342 */ 2343 /* op vAA, vBB, vCC */ 2344 FETCH_B(r2, 1, 0) @ r2<- BB 2345 mov r9, rINST, lsr #8 @ r9<- AA 2346 FETCH_B(r3, 1, 1) @ r3<- CC 2347 GET_VREG(r0, r2) @ r0<- vBB (array object) 2348 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2349 cmp r0, #0 @ null array object? 2350 beq common_errNullObject @ yes, bail 2351 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2352 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2353 cmp r1, r3 @ compare unsigned index, length 2354 bcs common_errArrayIndex @ index >= length, bail 2355 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2356 GET_VREG(r2, r9) @ r2<- vAA 2357 GET_INST_OPCODE(ip) @ extract opcode from rINST 2358 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2359 GOTO_OPCODE(ip) @ jump to next instruction 2360 2361 2362/* ------------------------------ */ 2363 .balign 64 2364.L_OP_APUT_SHORT: /* 0x51 */ 2365/* File: armv5te/OP_APUT_SHORT.S */ 2366/* File: armv5te/OP_APUT.S */ 2367 /* 2368 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2369 * 2370 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2371 * instructions. We use a pair of FETCH_Bs instead. 2372 * 2373 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2374 */ 2375 /* op vAA, vBB, vCC */ 2376 FETCH_B(r2, 1, 0) @ r2<- BB 2377 mov r9, rINST, lsr #8 @ r9<- AA 2378 FETCH_B(r3, 1, 1) @ r3<- CC 2379 GET_VREG(r0, r2) @ r0<- vBB (array object) 2380 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2381 cmp r0, #0 @ null array object? 2382 beq common_errNullObject @ yes, bail 2383 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2384 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2385 cmp r1, r3 @ compare unsigned index, length 2386 bcs common_errArrayIndex @ index >= length, bail 2387 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2388 GET_VREG(r2, r9) @ r2<- vAA 2389 GET_INST_OPCODE(ip) @ extract opcode from rINST 2390 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2391 GOTO_OPCODE(ip) @ jump to next instruction 2392 2393 2394/* ------------------------------ */ 2395 .balign 64 2396.L_OP_IGET: /* 0x52 */ 2397/* File: armv5te/OP_IGET.S */ 2398 /* 2399 * General 32-bit instance field get. 2400 * 2401 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2402 */ 2403 /* op vA, vB, field@CCCC */ 2404 mov r0, rINST, lsr #12 @ r0<- B 2405 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2406 FETCH(r1, 1) @ r1<- field ref CCCC 2407 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2408 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2409 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2410 cmp r0, #0 @ is resolved entry null? 2411 bne .LOP_IGET_finish @ no, already resolved 24128: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2413 EXPORT_PC() @ resolve() could throw 2414 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2415 bl dvmResolveInstField @ r0<- resolved InstField ptr 2416 cmp r0, #0 2417 bne .LOP_IGET_finish 2418 b common_exceptionThrown 2419 2420/* ------------------------------ */ 2421 .balign 64 2422.L_OP_IGET_WIDE: /* 0x53 */ 2423/* File: armv5te/OP_IGET_WIDE.S */ 2424 /* 2425 * Wide 32-bit instance field get. 2426 */ 2427 /* iget-wide vA, vB, field@CCCC */ 2428 mov r0, rINST, lsr #12 @ r0<- B 2429 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2430 FETCH(r1, 1) @ r1<- field ref CCCC 2431 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2432 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2433 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2434 cmp r0, #0 @ is resolved entry null? 2435 bne .LOP_IGET_WIDE_finish @ no, already resolved 24368: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2437 EXPORT_PC() @ resolve() could throw 2438 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2439 bl dvmResolveInstField @ r0<- resolved InstField ptr 2440 cmp r0, #0 2441 bne .LOP_IGET_WIDE_finish 2442 b common_exceptionThrown 2443 2444/* ------------------------------ */ 2445 .balign 64 2446.L_OP_IGET_OBJECT: /* 0x54 */ 2447/* File: armv5te/OP_IGET_OBJECT.S */ 2448/* File: armv5te/OP_IGET.S */ 2449 /* 2450 * General 32-bit instance field get. 2451 * 2452 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2453 */ 2454 /* op vA, vB, field@CCCC */ 2455 mov r0, rINST, lsr #12 @ r0<- B 2456 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2457 FETCH(r1, 1) @ r1<- field ref CCCC 2458 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2459 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2460 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2461 cmp r0, #0 @ is resolved entry null? 2462 bne .LOP_IGET_OBJECT_finish @ no, already resolved 24638: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2464 EXPORT_PC() @ resolve() could throw 2465 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2466 bl dvmResolveInstField @ r0<- resolved InstField ptr 2467 cmp r0, #0 2468 bne .LOP_IGET_OBJECT_finish 2469 b common_exceptionThrown 2470 2471 2472/* ------------------------------ */ 2473 .balign 64 2474.L_OP_IGET_BOOLEAN: /* 0x55 */ 2475/* File: armv5te/OP_IGET_BOOLEAN.S */ 2476@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2477/* File: armv5te/OP_IGET.S */ 2478 /* 2479 * General 32-bit instance field get. 2480 * 2481 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2482 */ 2483 /* op vA, vB, field@CCCC */ 2484 mov r0, rINST, lsr #12 @ r0<- B 2485 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2486 FETCH(r1, 1) @ r1<- field ref CCCC 2487 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2488 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2489 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2490 cmp r0, #0 @ is resolved entry null? 2491 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 24928: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2493 EXPORT_PC() @ resolve() could throw 2494 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2495 bl dvmResolveInstField @ r0<- resolved InstField ptr 2496 cmp r0, #0 2497 bne .LOP_IGET_BOOLEAN_finish 2498 b common_exceptionThrown 2499 2500 2501/* ------------------------------ */ 2502 .balign 64 2503.L_OP_IGET_BYTE: /* 0x56 */ 2504/* File: armv5te/OP_IGET_BYTE.S */ 2505@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2506/* File: armv5te/OP_IGET.S */ 2507 /* 2508 * General 32-bit instance field get. 2509 * 2510 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2511 */ 2512 /* op vA, vB, field@CCCC */ 2513 mov r0, rINST, lsr #12 @ r0<- B 2514 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2515 FETCH(r1, 1) @ r1<- field ref CCCC 2516 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2517 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2518 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2519 cmp r0, #0 @ is resolved entry null? 2520 bne .LOP_IGET_BYTE_finish @ no, already resolved 25218: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2522 EXPORT_PC() @ resolve() could throw 2523 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2524 bl dvmResolveInstField @ r0<- resolved InstField ptr 2525 cmp r0, #0 2526 bne .LOP_IGET_BYTE_finish 2527 b common_exceptionThrown 2528 2529 2530/* ------------------------------ */ 2531 .balign 64 2532.L_OP_IGET_CHAR: /* 0x57 */ 2533/* File: armv5te/OP_IGET_CHAR.S */ 2534@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2535/* File: armv5te/OP_IGET.S */ 2536 /* 2537 * General 32-bit instance field get. 2538 * 2539 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2540 */ 2541 /* op vA, vB, field@CCCC */ 2542 mov r0, rINST, lsr #12 @ r0<- B 2543 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2544 FETCH(r1, 1) @ r1<- field ref CCCC 2545 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2546 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2547 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2548 cmp r0, #0 @ is resolved entry null? 2549 bne .LOP_IGET_CHAR_finish @ no, already resolved 25508: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2551 EXPORT_PC() @ resolve() could throw 2552 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2553 bl dvmResolveInstField @ r0<- resolved InstField ptr 2554 cmp r0, #0 2555 bne .LOP_IGET_CHAR_finish 2556 b common_exceptionThrown 2557 2558 2559/* ------------------------------ */ 2560 .balign 64 2561.L_OP_IGET_SHORT: /* 0x58 */ 2562/* File: armv5te/OP_IGET_SHORT.S */ 2563@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2564/* File: armv5te/OP_IGET.S */ 2565 /* 2566 * General 32-bit instance field get. 2567 * 2568 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2569 */ 2570 /* op vA, vB, field@CCCC */ 2571 mov r0, rINST, lsr #12 @ r0<- B 2572 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2573 FETCH(r1, 1) @ r1<- field ref CCCC 2574 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2575 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2576 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2577 cmp r0, #0 @ is resolved entry null? 2578 bne .LOP_IGET_SHORT_finish @ no, already resolved 25798: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2580 EXPORT_PC() @ resolve() could throw 2581 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2582 bl dvmResolveInstField @ r0<- resolved InstField ptr 2583 cmp r0, #0 2584 bne .LOP_IGET_SHORT_finish 2585 b common_exceptionThrown 2586 2587 2588/* ------------------------------ */ 2589 .balign 64 2590.L_OP_IPUT: /* 0x59 */ 2591/* File: armv5te/OP_IPUT.S */ 2592 /* 2593 * General 32-bit instance field put. 2594 * 2595 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2596 */ 2597 /* op vA, vB, field@CCCC */ 2598 mov r0, rINST, lsr #12 @ r0<- B 2599 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2600 FETCH(r1, 1) @ r1<- field ref CCCC 2601 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2602 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2603 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2604 cmp r0, #0 @ is resolved entry null? 2605 bne .LOP_IPUT_finish @ no, already resolved 26068: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2607 EXPORT_PC() @ resolve() could throw 2608 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2609 bl dvmResolveInstField @ r0<- resolved InstField ptr 2610 cmp r0, #0 @ success? 2611 bne .LOP_IPUT_finish @ yes, finish up 2612 b common_exceptionThrown 2613 2614/* ------------------------------ */ 2615 .balign 64 2616.L_OP_IPUT_WIDE: /* 0x5a */ 2617/* File: armv5te/OP_IPUT_WIDE.S */ 2618 /* iput-wide vA, vB, field@CCCC */ 2619 mov r0, rINST, lsr #12 @ r0<- B 2620 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2621 FETCH(r1, 1) @ r1<- field ref CCCC 2622 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2623 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2624 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2625 cmp r0, #0 @ is resolved entry null? 2626 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26278: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2628 EXPORT_PC() @ resolve() could throw 2629 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2630 bl dvmResolveInstField @ r0<- resolved InstField ptr 2631 cmp r0, #0 @ success? 2632 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2633 b common_exceptionThrown 2634 2635/* ------------------------------ */ 2636 .balign 64 2637.L_OP_IPUT_OBJECT: /* 0x5b */ 2638/* File: armv5te/OP_IPUT_OBJECT.S */ 2639/* File: armv5te/OP_IPUT.S */ 2640 /* 2641 * General 32-bit instance field put. 2642 * 2643 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2644 */ 2645 /* op vA, vB, field@CCCC */ 2646 mov r0, rINST, lsr #12 @ r0<- B 2647 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2648 FETCH(r1, 1) @ r1<- field ref CCCC 2649 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2650 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2651 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2652 cmp r0, #0 @ is resolved entry null? 2653 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26548: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2655 EXPORT_PC() @ resolve() could throw 2656 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2657 bl dvmResolveInstField @ r0<- resolved InstField ptr 2658 cmp r0, #0 @ success? 2659 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2660 b common_exceptionThrown 2661 2662 2663/* ------------------------------ */ 2664 .balign 64 2665.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2666/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2667@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2668/* File: armv5te/OP_IPUT.S */ 2669 /* 2670 * General 32-bit instance field put. 2671 * 2672 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2673 */ 2674 /* op vA, vB, field@CCCC */ 2675 mov r0, rINST, lsr #12 @ r0<- B 2676 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2677 FETCH(r1, 1) @ r1<- field ref CCCC 2678 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2679 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2680 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2681 cmp r0, #0 @ is resolved entry null? 2682 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 26838: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2684 EXPORT_PC() @ resolve() could throw 2685 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2686 bl dvmResolveInstField @ r0<- resolved InstField ptr 2687 cmp r0, #0 @ success? 2688 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2689 b common_exceptionThrown 2690 2691 2692/* ------------------------------ */ 2693 .balign 64 2694.L_OP_IPUT_BYTE: /* 0x5d */ 2695/* File: armv5te/OP_IPUT_BYTE.S */ 2696@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2697/* File: armv5te/OP_IPUT.S */ 2698 /* 2699 * General 32-bit instance field put. 2700 * 2701 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2702 */ 2703 /* op vA, vB, field@CCCC */ 2704 mov r0, rINST, lsr #12 @ r0<- B 2705 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2706 FETCH(r1, 1) @ r1<- field ref CCCC 2707 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2708 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2709 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2710 cmp r0, #0 @ is resolved entry null? 2711 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27128: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2713 EXPORT_PC() @ resolve() could throw 2714 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2715 bl dvmResolveInstField @ r0<- resolved InstField ptr 2716 cmp r0, #0 @ success? 2717 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2718 b common_exceptionThrown 2719 2720 2721/* ------------------------------ */ 2722 .balign 64 2723.L_OP_IPUT_CHAR: /* 0x5e */ 2724/* File: armv5te/OP_IPUT_CHAR.S */ 2725@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2726/* File: armv5te/OP_IPUT.S */ 2727 /* 2728 * General 32-bit instance field put. 2729 * 2730 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2731 */ 2732 /* op vA, vB, field@CCCC */ 2733 mov r0, rINST, lsr #12 @ r0<- B 2734 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2735 FETCH(r1, 1) @ r1<- field ref CCCC 2736 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2737 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2738 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2739 cmp r0, #0 @ is resolved entry null? 2740 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27418: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2742 EXPORT_PC() @ resolve() could throw 2743 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2744 bl dvmResolveInstField @ r0<- resolved InstField ptr 2745 cmp r0, #0 @ success? 2746 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2747 b common_exceptionThrown 2748 2749 2750/* ------------------------------ */ 2751 .balign 64 2752.L_OP_IPUT_SHORT: /* 0x5f */ 2753/* File: armv5te/OP_IPUT_SHORT.S */ 2754@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2755/* File: armv5te/OP_IPUT.S */ 2756 /* 2757 * General 32-bit instance field put. 2758 * 2759 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2760 */ 2761 /* op vA, vB, field@CCCC */ 2762 mov r0, rINST, lsr #12 @ r0<- B 2763 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2764 FETCH(r1, 1) @ r1<- field ref CCCC 2765 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2766 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2767 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2768 cmp r0, #0 @ is resolved entry null? 2769 bne .LOP_IPUT_SHORT_finish @ no, already resolved 27708: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2771 EXPORT_PC() @ resolve() could throw 2772 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2773 bl dvmResolveInstField @ r0<- resolved InstField ptr 2774 cmp r0, #0 @ success? 2775 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2776 b common_exceptionThrown 2777 2778 2779/* ------------------------------ */ 2780 .balign 64 2781.L_OP_SGET: /* 0x60 */ 2782/* File: armv5te/OP_SGET.S */ 2783 /* 2784 * General 32-bit SGET handler. 2785 * 2786 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2787 */ 2788 /* op vAA, field@BBBB */ 2789 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2790 FETCH(r1, 1) @ r1<- field ref BBBB 2791 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2792 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2793 cmp r0, #0 @ is resolved entry null? 2794 beq .LOP_SGET_resolve @ yes, do resolve 2795.LOP_SGET_finish: @ field ptr in r0 2796 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2797 @ no-op @ acquiring load 2798 mov r2, rINST, lsr #8 @ r2<- AA 2799 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2800 SET_VREG(r1, r2) @ fp[AA]<- r1 2801 GET_INST_OPCODE(ip) @ extract opcode from rINST 2802 GOTO_OPCODE(ip) @ jump to next instruction 2803 2804/* ------------------------------ */ 2805 .balign 64 2806.L_OP_SGET_WIDE: /* 0x61 */ 2807/* File: armv5te/OP_SGET_WIDE.S */ 2808 /* 2809 * 64-bit SGET handler. 2810 */ 2811 /* sget-wide vAA, field@BBBB */ 2812 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2813 FETCH(r1, 1) @ r1<- field ref BBBB 2814 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2815 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2816 cmp r0, #0 @ is resolved entry null? 2817 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2818.LOP_SGET_WIDE_finish: 2819 mov r9, rINST, lsr #8 @ r9<- AA 2820 .if 0 2821 add r0, r0, #offStaticField_value @ r0<- pointer to data 2822 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 2823 .else 2824 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2825 .endif 2826 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2827 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2828 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2829 GET_INST_OPCODE(ip) @ extract opcode from rINST 2830 GOTO_OPCODE(ip) @ jump to next instruction 2831 2832/* ------------------------------ */ 2833 .balign 64 2834.L_OP_SGET_OBJECT: /* 0x62 */ 2835/* File: armv5te/OP_SGET_OBJECT.S */ 2836/* File: armv5te/OP_SGET.S */ 2837 /* 2838 * General 32-bit SGET handler. 2839 * 2840 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2841 */ 2842 /* op vAA, field@BBBB */ 2843 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2844 FETCH(r1, 1) @ r1<- field ref BBBB 2845 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2846 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2847 cmp r0, #0 @ is resolved entry null? 2848 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2849.LOP_SGET_OBJECT_finish: @ field ptr in r0 2850 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2851 @ no-op @ acquiring load 2852 mov r2, rINST, lsr #8 @ r2<- AA 2853 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2854 SET_VREG(r1, r2) @ fp[AA]<- r1 2855 GET_INST_OPCODE(ip) @ extract opcode from rINST 2856 GOTO_OPCODE(ip) @ jump to next instruction 2857 2858 2859/* ------------------------------ */ 2860 .balign 64 2861.L_OP_SGET_BOOLEAN: /* 0x63 */ 2862/* File: armv5te/OP_SGET_BOOLEAN.S */ 2863/* File: armv5te/OP_SGET.S */ 2864 /* 2865 * General 32-bit SGET handler. 2866 * 2867 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2868 */ 2869 /* op vAA, field@BBBB */ 2870 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2871 FETCH(r1, 1) @ r1<- field ref BBBB 2872 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2873 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2874 cmp r0, #0 @ is resolved entry null? 2875 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2876.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2877 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2878 @ no-op @ acquiring load 2879 mov r2, rINST, lsr #8 @ r2<- AA 2880 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2881 SET_VREG(r1, r2) @ fp[AA]<- r1 2882 GET_INST_OPCODE(ip) @ extract opcode from rINST 2883 GOTO_OPCODE(ip) @ jump to next instruction 2884 2885 2886/* ------------------------------ */ 2887 .balign 64 2888.L_OP_SGET_BYTE: /* 0x64 */ 2889/* File: armv5te/OP_SGET_BYTE.S */ 2890/* File: armv5te/OP_SGET.S */ 2891 /* 2892 * General 32-bit SGET handler. 2893 * 2894 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2895 */ 2896 /* op vAA, field@BBBB */ 2897 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2898 FETCH(r1, 1) @ r1<- field ref BBBB 2899 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2900 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2901 cmp r0, #0 @ is resolved entry null? 2902 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2903.LOP_SGET_BYTE_finish: @ field ptr in r0 2904 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2905 @ no-op @ acquiring load 2906 mov r2, rINST, lsr #8 @ r2<- AA 2907 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2908 SET_VREG(r1, r2) @ fp[AA]<- r1 2909 GET_INST_OPCODE(ip) @ extract opcode from rINST 2910 GOTO_OPCODE(ip) @ jump to next instruction 2911 2912 2913/* ------------------------------ */ 2914 .balign 64 2915.L_OP_SGET_CHAR: /* 0x65 */ 2916/* File: armv5te/OP_SGET_CHAR.S */ 2917/* File: armv5te/OP_SGET.S */ 2918 /* 2919 * General 32-bit SGET handler. 2920 * 2921 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2922 */ 2923 /* op vAA, field@BBBB */ 2924 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2925 FETCH(r1, 1) @ r1<- field ref BBBB 2926 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2927 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2928 cmp r0, #0 @ is resolved entry null? 2929 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2930.LOP_SGET_CHAR_finish: @ field ptr in r0 2931 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2932 @ no-op @ acquiring load 2933 mov r2, rINST, lsr #8 @ r2<- AA 2934 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2935 SET_VREG(r1, r2) @ fp[AA]<- r1 2936 GET_INST_OPCODE(ip) @ extract opcode from rINST 2937 GOTO_OPCODE(ip) @ jump to next instruction 2938 2939 2940/* ------------------------------ */ 2941 .balign 64 2942.L_OP_SGET_SHORT: /* 0x66 */ 2943/* File: armv5te/OP_SGET_SHORT.S */ 2944/* File: armv5te/OP_SGET.S */ 2945 /* 2946 * General 32-bit SGET handler. 2947 * 2948 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2949 */ 2950 /* op vAA, field@BBBB */ 2951 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2952 FETCH(r1, 1) @ r1<- field ref BBBB 2953 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2954 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2955 cmp r0, #0 @ is resolved entry null? 2956 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2957.LOP_SGET_SHORT_finish: @ field ptr in r0 2958 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2959 @ no-op @ acquiring load 2960 mov r2, rINST, lsr #8 @ r2<- AA 2961 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2962 SET_VREG(r1, r2) @ fp[AA]<- r1 2963 GET_INST_OPCODE(ip) @ extract opcode from rINST 2964 GOTO_OPCODE(ip) @ jump to next instruction 2965 2966 2967/* ------------------------------ */ 2968 .balign 64 2969.L_OP_SPUT: /* 0x67 */ 2970/* File: armv5te/OP_SPUT.S */ 2971 /* 2972 * General 32-bit SPUT handler. 2973 * 2974 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 2975 */ 2976 /* op vAA, field@BBBB */ 2977 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2978 FETCH(r1, 1) @ r1<- field ref BBBB 2979 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2980 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2981 cmp r0, #0 @ is resolved entry null? 2982 beq .LOP_SPUT_resolve @ yes, do resolve 2983.LOP_SPUT_finish: @ field ptr in r0 2984 mov r2, rINST, lsr #8 @ r2<- AA 2985 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2986 GET_VREG(r1, r2) @ r1<- fp[AA] 2987 GET_INST_OPCODE(ip) @ extract opcode from rINST 2988 @ no-op @ releasing store 2989 str r1, [r0, #offStaticField_value] @ field<- vAA 2990 GOTO_OPCODE(ip) @ jump to next instruction 2991 2992/* ------------------------------ */ 2993 .balign 64 2994.L_OP_SPUT_WIDE: /* 0x68 */ 2995/* File: armv5te/OP_SPUT_WIDE.S */ 2996 /* 2997 * 64-bit SPUT handler. 2998 */ 2999 /* sput-wide vAA, field@BBBB */ 3000 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 3001 FETCH(r1, 1) @ r1<- field ref BBBB 3002 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 3003 mov r9, rINST, lsr #8 @ r9<- AA 3004 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 3005 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3006 cmp r2, #0 @ is resolved entry null? 3007 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3008.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 3009 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3010 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 3011 GET_INST_OPCODE(r10) @ extract opcode from rINST 3012 .if 0 3013 add r2, r2, #offStaticField_value @ r2<- pointer to data 3014 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 3015 .else 3016 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3017 .endif 3018 GOTO_OPCODE(r10) @ jump to next instruction 3019 3020/* ------------------------------ */ 3021 .balign 64 3022.L_OP_SPUT_OBJECT: /* 0x69 */ 3023/* File: armv5te/OP_SPUT_OBJECT.S */ 3024/* File: armv5te/OP_SPUT.S */ 3025 /* 3026 * General 32-bit SPUT handler. 3027 * 3028 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3029 */ 3030 /* op vAA, field@BBBB */ 3031 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3032 FETCH(r1, 1) @ r1<- field ref BBBB 3033 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3034 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3035 cmp r0, #0 @ is resolved entry null? 3036 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3037.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3038 mov r2, rINST, lsr #8 @ r2<- AA 3039 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3040 GET_VREG(r1, r2) @ r1<- fp[AA] 3041 GET_INST_OPCODE(ip) @ extract opcode from rINST 3042 @ no-op @ releasing store 3043 str r1, [r0, #offStaticField_value] @ field<- vAA 3044 GOTO_OPCODE(ip) @ jump to next instruction 3045 3046 3047/* ------------------------------ */ 3048 .balign 64 3049.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3050/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3051/* File: armv5te/OP_SPUT.S */ 3052 /* 3053 * General 32-bit SPUT handler. 3054 * 3055 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3056 */ 3057 /* op vAA, field@BBBB */ 3058 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3059 FETCH(r1, 1) @ r1<- field ref BBBB 3060 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3061 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3062 cmp r0, #0 @ is resolved entry null? 3063 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3064.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3065 mov r2, rINST, lsr #8 @ r2<- AA 3066 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3067 GET_VREG(r1, r2) @ r1<- fp[AA] 3068 GET_INST_OPCODE(ip) @ extract opcode from rINST 3069 @ no-op @ releasing store 3070 str r1, [r0, #offStaticField_value] @ field<- vAA 3071 GOTO_OPCODE(ip) @ jump to next instruction 3072 3073 3074/* ------------------------------ */ 3075 .balign 64 3076.L_OP_SPUT_BYTE: /* 0x6b */ 3077/* File: armv5te/OP_SPUT_BYTE.S */ 3078/* File: armv5te/OP_SPUT.S */ 3079 /* 3080 * General 32-bit SPUT handler. 3081 * 3082 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3083 */ 3084 /* op vAA, field@BBBB */ 3085 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3086 FETCH(r1, 1) @ r1<- field ref BBBB 3087 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3088 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3089 cmp r0, #0 @ is resolved entry null? 3090 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3091.LOP_SPUT_BYTE_finish: @ field ptr in r0 3092 mov r2, rINST, lsr #8 @ r2<- AA 3093 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3094 GET_VREG(r1, r2) @ r1<- fp[AA] 3095 GET_INST_OPCODE(ip) @ extract opcode from rINST 3096 @ no-op @ releasing store 3097 str r1, [r0, #offStaticField_value] @ field<- vAA 3098 GOTO_OPCODE(ip) @ jump to next instruction 3099 3100 3101/* ------------------------------ */ 3102 .balign 64 3103.L_OP_SPUT_CHAR: /* 0x6c */ 3104/* File: armv5te/OP_SPUT_CHAR.S */ 3105/* File: armv5te/OP_SPUT.S */ 3106 /* 3107 * General 32-bit SPUT handler. 3108 * 3109 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3110 */ 3111 /* op vAA, field@BBBB */ 3112 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3113 FETCH(r1, 1) @ r1<- field ref BBBB 3114 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3115 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3116 cmp r0, #0 @ is resolved entry null? 3117 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3118.LOP_SPUT_CHAR_finish: @ field ptr in r0 3119 mov r2, rINST, lsr #8 @ r2<- AA 3120 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3121 GET_VREG(r1, r2) @ r1<- fp[AA] 3122 GET_INST_OPCODE(ip) @ extract opcode from rINST 3123 @ no-op @ releasing store 3124 str r1, [r0, #offStaticField_value] @ field<- vAA 3125 GOTO_OPCODE(ip) @ jump to next instruction 3126 3127 3128/* ------------------------------ */ 3129 .balign 64 3130.L_OP_SPUT_SHORT: /* 0x6d */ 3131/* File: armv5te/OP_SPUT_SHORT.S */ 3132/* File: armv5te/OP_SPUT.S */ 3133 /* 3134 * General 32-bit SPUT handler. 3135 * 3136 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3137 */ 3138 /* op vAA, field@BBBB */ 3139 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3140 FETCH(r1, 1) @ r1<- field ref BBBB 3141 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3142 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3143 cmp r0, #0 @ is resolved entry null? 3144 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3145.LOP_SPUT_SHORT_finish: @ field ptr in r0 3146 mov r2, rINST, lsr #8 @ r2<- AA 3147 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3148 GET_VREG(r1, r2) @ r1<- fp[AA] 3149 GET_INST_OPCODE(ip) @ extract opcode from rINST 3150 @ no-op @ releasing store 3151 str r1, [r0, #offStaticField_value] @ field<- vAA 3152 GOTO_OPCODE(ip) @ jump to next instruction 3153 3154 3155/* ------------------------------ */ 3156 .balign 64 3157.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3158/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3159 /* 3160 * Handle a virtual method call. 3161 * 3162 * for: invoke-virtual, invoke-virtual/range 3163 */ 3164 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3165 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3166 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3167 FETCH(r1, 1) @ r1<- BBBB 3168 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3169 FETCH(r10, 2) @ r10<- GFED or CCCC 3170 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3171 .if (!0) 3172 and r10, r10, #15 @ r10<- D (or stays CCCC) 3173 .endif 3174 cmp r0, #0 @ already resolved? 3175 EXPORT_PC() @ must export for invoke 3176 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3177 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3178 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3179 mov r2, #METHOD_VIRTUAL @ resolver method type 3180 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3181 cmp r0, #0 @ got null? 3182 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3183 b common_exceptionThrown @ yes, handle exception 3184 3185/* ------------------------------ */ 3186 .balign 64 3187.L_OP_INVOKE_SUPER: /* 0x6f */ 3188/* File: armv5te/OP_INVOKE_SUPER.S */ 3189 /* 3190 * Handle a "super" method call. 3191 * 3192 * for: invoke-super, invoke-super/range 3193 */ 3194 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3195 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3196 FETCH(r10, 2) @ r10<- GFED or CCCC 3197 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3198 .if (!0) 3199 and r10, r10, #15 @ r10<- D (or stays CCCC) 3200 .endif 3201 FETCH(r1, 1) @ r1<- BBBB 3202 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3203 GET_VREG(r2, r10) @ r2<- "this" ptr 3204 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3205 cmp r2, #0 @ null "this"? 3206 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3207 beq common_errNullObject @ null "this", throw exception 3208 cmp r0, #0 @ already resolved? 3209 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3210 EXPORT_PC() @ must export for invoke 3211 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3212 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3213 3214/* ------------------------------ */ 3215 .balign 64 3216.L_OP_INVOKE_DIRECT: /* 0x70 */ 3217/* File: armv5te/OP_INVOKE_DIRECT.S */ 3218 /* 3219 * Handle a direct method call. 3220 * 3221 * (We could defer the "is 'this' pointer null" test to the common 3222 * method invocation code, and use a flag to indicate that static 3223 * calls don't count. If we do this as part of copying the arguments 3224 * out we could avoiding loading the first arg twice.) 3225 * 3226 * for: invoke-direct, invoke-direct/range 3227 */ 3228 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3229 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3230 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3231 FETCH(r1, 1) @ r1<- BBBB 3232 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3233 FETCH(r10, 2) @ r10<- GFED or CCCC 3234 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3235 .if (!0) 3236 and r10, r10, #15 @ r10<- D (or stays CCCC) 3237 .endif 3238 cmp r0, #0 @ already resolved? 3239 EXPORT_PC() @ must export for invoke 3240 GET_VREG(r2, r10) @ r2<- "this" ptr 3241 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3242.LOP_INVOKE_DIRECT_finish: 3243 cmp r2, #0 @ null "this" ref? 3244 bne common_invokeMethodNoRange @ no, continue on 3245 b common_errNullObject @ yes, throw exception 3246 3247/* ------------------------------ */ 3248 .balign 64 3249.L_OP_INVOKE_STATIC: /* 0x71 */ 3250/* File: armv5te/OP_INVOKE_STATIC.S */ 3251 /* 3252 * Handle a static method call. 3253 * 3254 * for: invoke-static, invoke-static/range 3255 */ 3256 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3257 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3258 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3259 FETCH(r1, 1) @ r1<- BBBB 3260 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3261 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3262 cmp r0, #0 @ already resolved? 3263 EXPORT_PC() @ must export for invoke 3264 bne common_invokeMethodNoRange @ yes, continue on 32650: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3266 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3267 mov r2, #METHOD_STATIC @ resolver method type 3268 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3269 cmp r0, #0 @ got null? 3270 bne common_invokeMethodNoRange @ no, continue 3271 b common_exceptionThrown @ yes, handle exception 3272 3273/* ------------------------------ */ 3274 .balign 64 3275.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3276/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3277 /* 3278 * Handle an interface method call. 3279 * 3280 * for: invoke-interface, invoke-interface/range 3281 */ 3282 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3283 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3284 FETCH(r2, 2) @ r2<- FEDC or CCCC 3285 FETCH(r1, 1) @ r1<- BBBB 3286 .if (!0) 3287 and r2, r2, #15 @ r2<- C (or stays CCCC) 3288 .endif 3289 EXPORT_PC() @ must export for invoke 3290 GET_VREG(r0, r2) @ r0<- first arg ("this") 3291 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3292 cmp r0, #0 @ null obj? 3293 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3294 beq common_errNullObject @ yes, fail 3295 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3296 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3297 cmp r0, #0 @ failed? 3298 beq common_exceptionThrown @ yes, handle exception 3299 b common_invokeMethodNoRange @ jump to common handler 3300 3301/* ------------------------------ */ 3302 .balign 64 3303.L_OP_UNUSED_73: /* 0x73 */ 3304/* File: armv5te/OP_UNUSED_73.S */ 3305/* File: armv5te/unused.S */ 3306 bl common_abort 3307 3308 3309/* ------------------------------ */ 3310 .balign 64 3311.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3312/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3313/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3314 /* 3315 * Handle a virtual method call. 3316 * 3317 * for: invoke-virtual, invoke-virtual/range 3318 */ 3319 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3320 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3321 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3322 FETCH(r1, 1) @ r1<- BBBB 3323 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3324 FETCH(r10, 2) @ r10<- GFED or CCCC 3325 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3326 .if (!1) 3327 and r10, r10, #15 @ r10<- D (or stays CCCC) 3328 .endif 3329 cmp r0, #0 @ already resolved? 3330 EXPORT_PC() @ must export for invoke 3331 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3332 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3333 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3334 mov r2, #METHOD_VIRTUAL @ resolver method type 3335 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3336 cmp r0, #0 @ got null? 3337 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3338 b common_exceptionThrown @ yes, handle exception 3339 3340 3341/* ------------------------------ */ 3342 .balign 64 3343.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3344/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3345/* File: armv5te/OP_INVOKE_SUPER.S */ 3346 /* 3347 * Handle a "super" method call. 3348 * 3349 * for: invoke-super, invoke-super/range 3350 */ 3351 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3352 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3353 FETCH(r10, 2) @ r10<- GFED or CCCC 3354 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3355 .if (!1) 3356 and r10, r10, #15 @ r10<- D (or stays CCCC) 3357 .endif 3358 FETCH(r1, 1) @ r1<- BBBB 3359 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3360 GET_VREG(r2, r10) @ r2<- "this" ptr 3361 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3362 cmp r2, #0 @ null "this"? 3363 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3364 beq common_errNullObject @ null "this", throw exception 3365 cmp r0, #0 @ already resolved? 3366 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3367 EXPORT_PC() @ must export for invoke 3368 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3369 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3370 3371 3372/* ------------------------------ */ 3373 .balign 64 3374.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3375/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3376/* File: armv5te/OP_INVOKE_DIRECT.S */ 3377 /* 3378 * Handle a direct method call. 3379 * 3380 * (We could defer the "is 'this' pointer null" test to the common 3381 * method invocation code, and use a flag to indicate that static 3382 * calls don't count. If we do this as part of copying the arguments 3383 * out we could avoiding loading the first arg twice.) 3384 * 3385 * for: invoke-direct, invoke-direct/range 3386 */ 3387 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3388 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3389 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3390 FETCH(r1, 1) @ r1<- BBBB 3391 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3392 FETCH(r10, 2) @ r10<- GFED or CCCC 3393 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3394 .if (!1) 3395 and r10, r10, #15 @ r10<- D (or stays CCCC) 3396 .endif 3397 cmp r0, #0 @ already resolved? 3398 EXPORT_PC() @ must export for invoke 3399 GET_VREG(r2, r10) @ r2<- "this" ptr 3400 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3401.LOP_INVOKE_DIRECT_RANGE_finish: 3402 cmp r2, #0 @ null "this" ref? 3403 bne common_invokeMethodRange @ no, continue on 3404 b common_errNullObject @ yes, throw exception 3405 3406 3407/* ------------------------------ */ 3408 .balign 64 3409.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3410/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3411/* File: armv5te/OP_INVOKE_STATIC.S */ 3412 /* 3413 * Handle a static method call. 3414 * 3415 * for: invoke-static, invoke-static/range 3416 */ 3417 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3418 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3419 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3420 FETCH(r1, 1) @ r1<- BBBB 3421 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3422 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3423 cmp r0, #0 @ already resolved? 3424 EXPORT_PC() @ must export for invoke 3425 bne common_invokeMethodRange @ yes, continue on 34260: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3427 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3428 mov r2, #METHOD_STATIC @ resolver method type 3429 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3430 cmp r0, #0 @ got null? 3431 bne common_invokeMethodRange @ no, continue 3432 b common_exceptionThrown @ yes, handle exception 3433 3434 3435/* ------------------------------ */ 3436 .balign 64 3437.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3438/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3439/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3440 /* 3441 * Handle an interface method call. 3442 * 3443 * for: invoke-interface, invoke-interface/range 3444 */ 3445 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3446 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3447 FETCH(r2, 2) @ r2<- FEDC or CCCC 3448 FETCH(r1, 1) @ r1<- BBBB 3449 .if (!1) 3450 and r2, r2, #15 @ r2<- C (or stays CCCC) 3451 .endif 3452 EXPORT_PC() @ must export for invoke 3453 GET_VREG(r0, r2) @ r0<- first arg ("this") 3454 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3455 cmp r0, #0 @ null obj? 3456 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3457 beq common_errNullObject @ yes, fail 3458 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3459 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3460 cmp r0, #0 @ failed? 3461 beq common_exceptionThrown @ yes, handle exception 3462 b common_invokeMethodRange @ jump to common handler 3463 3464 3465/* ------------------------------ */ 3466 .balign 64 3467.L_OP_UNUSED_79: /* 0x79 */ 3468/* File: armv5te/OP_UNUSED_79.S */ 3469/* File: armv5te/unused.S */ 3470 bl common_abort 3471 3472 3473/* ------------------------------ */ 3474 .balign 64 3475.L_OP_UNUSED_7A: /* 0x7a */ 3476/* File: armv5te/OP_UNUSED_7A.S */ 3477/* File: armv5te/unused.S */ 3478 bl common_abort 3479 3480 3481/* ------------------------------ */ 3482 .balign 64 3483.L_OP_NEG_INT: /* 0x7b */ 3484/* File: armv5te/OP_NEG_INT.S */ 3485/* File: armv5te/unop.S */ 3486 /* 3487 * Generic 32-bit unary operation. Provide an "instr" line that 3488 * specifies an instruction that performs "result = op r0". 3489 * This could be an ARM instruction or a function call. 3490 * 3491 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3492 * int-to-byte, int-to-char, int-to-short 3493 */ 3494 /* unop vA, vB */ 3495 mov r3, rINST, lsr #12 @ r3<- B 3496 mov r9, rINST, lsr #8 @ r9<- A+ 3497 GET_VREG(r0, r3) @ r0<- vB 3498 and r9, r9, #15 3499 @ optional op; may set condition codes 3500 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3501 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3502 GET_INST_OPCODE(ip) @ extract opcode from rINST 3503 SET_VREG(r0, r9) @ vAA<- r0 3504 GOTO_OPCODE(ip) @ jump to next instruction 3505 /* 9-10 instructions */ 3506 3507 3508/* ------------------------------ */ 3509 .balign 64 3510.L_OP_NOT_INT: /* 0x7c */ 3511/* File: armv5te/OP_NOT_INT.S */ 3512/* File: armv5te/unop.S */ 3513 /* 3514 * Generic 32-bit unary operation. Provide an "instr" line that 3515 * specifies an instruction that performs "result = op r0". 3516 * This could be an ARM instruction or a function call. 3517 * 3518 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3519 * int-to-byte, int-to-char, int-to-short 3520 */ 3521 /* unop vA, vB */ 3522 mov r3, rINST, lsr #12 @ r3<- B 3523 mov r9, rINST, lsr #8 @ r9<- A+ 3524 GET_VREG(r0, r3) @ r0<- vB 3525 and r9, r9, #15 3526 @ optional op; may set condition codes 3527 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3528 mvn r0, r0 @ r0<- op, r0-r3 changed 3529 GET_INST_OPCODE(ip) @ extract opcode from rINST 3530 SET_VREG(r0, r9) @ vAA<- r0 3531 GOTO_OPCODE(ip) @ jump to next instruction 3532 /* 9-10 instructions */ 3533 3534 3535/* ------------------------------ */ 3536 .balign 64 3537.L_OP_NEG_LONG: /* 0x7d */ 3538/* File: armv5te/OP_NEG_LONG.S */ 3539/* File: armv5te/unopWide.S */ 3540 /* 3541 * Generic 64-bit unary operation. Provide an "instr" line that 3542 * specifies an instruction that performs "result = op r0/r1". 3543 * This could be an ARM instruction or a function call. 3544 * 3545 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3546 */ 3547 /* unop vA, vB */ 3548 mov r9, rINST, lsr #8 @ r9<- A+ 3549 mov r3, rINST, lsr #12 @ r3<- B 3550 and r9, r9, #15 3551 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3552 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3553 ldmia r3, {r0-r1} @ r0/r1<- vAA 3554 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3555 rsbs r0, r0, #0 @ optional op; may set condition codes 3556 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3557 GET_INST_OPCODE(ip) @ extract opcode from rINST 3558 stmia r9, {r0-r1} @ vAA<- r0/r1 3559 GOTO_OPCODE(ip) @ jump to next instruction 3560 /* 12-13 instructions */ 3561 3562 3563/* ------------------------------ */ 3564 .balign 64 3565.L_OP_NOT_LONG: /* 0x7e */ 3566/* File: armv5te/OP_NOT_LONG.S */ 3567/* File: armv5te/unopWide.S */ 3568 /* 3569 * Generic 64-bit unary operation. Provide an "instr" line that 3570 * specifies an instruction that performs "result = op r0/r1". 3571 * This could be an ARM instruction or a function call. 3572 * 3573 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3574 */ 3575 /* unop vA, vB */ 3576 mov r9, rINST, lsr #8 @ r9<- A+ 3577 mov r3, rINST, lsr #12 @ r3<- B 3578 and r9, r9, #15 3579 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3580 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3581 ldmia r3, {r0-r1} @ r0/r1<- vAA 3582 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3583 mvn r0, r0 @ optional op; may set condition codes 3584 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3585 GET_INST_OPCODE(ip) @ extract opcode from rINST 3586 stmia r9, {r0-r1} @ vAA<- r0/r1 3587 GOTO_OPCODE(ip) @ jump to next instruction 3588 /* 12-13 instructions */ 3589 3590 3591/* ------------------------------ */ 3592 .balign 64 3593.L_OP_NEG_FLOAT: /* 0x7f */ 3594/* File: armv5te/OP_NEG_FLOAT.S */ 3595/* File: armv5te/unop.S */ 3596 /* 3597 * Generic 32-bit unary operation. Provide an "instr" line that 3598 * specifies an instruction that performs "result = op r0". 3599 * This could be an ARM instruction or a function call. 3600 * 3601 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3602 * int-to-byte, int-to-char, int-to-short 3603 */ 3604 /* unop vA, vB */ 3605 mov r3, rINST, lsr #12 @ r3<- B 3606 mov r9, rINST, lsr #8 @ r9<- A+ 3607 GET_VREG(r0, r3) @ r0<- vB 3608 and r9, r9, #15 3609 @ optional op; may set condition codes 3610 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3611 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3612 GET_INST_OPCODE(ip) @ extract opcode from rINST 3613 SET_VREG(r0, r9) @ vAA<- r0 3614 GOTO_OPCODE(ip) @ jump to next instruction 3615 /* 9-10 instructions */ 3616 3617 3618/* ------------------------------ */ 3619 .balign 64 3620.L_OP_NEG_DOUBLE: /* 0x80 */ 3621/* File: armv5te/OP_NEG_DOUBLE.S */ 3622/* File: armv5te/unopWide.S */ 3623 /* 3624 * Generic 64-bit unary operation. Provide an "instr" line that 3625 * specifies an instruction that performs "result = op r0/r1". 3626 * This could be an ARM instruction or a function call. 3627 * 3628 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3629 */ 3630 /* unop vA, vB */ 3631 mov r9, rINST, lsr #8 @ r9<- A+ 3632 mov r3, rINST, lsr #12 @ r3<- B 3633 and r9, r9, #15 3634 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3635 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3636 ldmia r3, {r0-r1} @ r0/r1<- vAA 3637 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3638 @ optional op; may set condition codes 3639 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3640 GET_INST_OPCODE(ip) @ extract opcode from rINST 3641 stmia r9, {r0-r1} @ vAA<- r0/r1 3642 GOTO_OPCODE(ip) @ jump to next instruction 3643 /* 12-13 instructions */ 3644 3645 3646/* ------------------------------ */ 3647 .balign 64 3648.L_OP_INT_TO_LONG: /* 0x81 */ 3649/* File: armv5te/OP_INT_TO_LONG.S */ 3650/* File: armv5te/unopWider.S */ 3651 /* 3652 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3653 * that specifies an instruction that performs "result = op r0", where 3654 * "result" is a 64-bit quantity in r0/r1. 3655 * 3656 * For: int-to-long, int-to-double, float-to-long, float-to-double 3657 */ 3658 /* unop vA, vB */ 3659 mov r9, rINST, lsr #8 @ r9<- A+ 3660 mov r3, rINST, lsr #12 @ r3<- B 3661 and r9, r9, #15 3662 GET_VREG(r0, r3) @ r0<- vB 3663 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3664 @ optional op; may set condition codes 3665 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3666 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3667 GET_INST_OPCODE(ip) @ extract opcode from rINST 3668 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3669 GOTO_OPCODE(ip) @ jump to next instruction 3670 /* 10-11 instructions */ 3671 3672 3673/* ------------------------------ */ 3674 .balign 64 3675.L_OP_INT_TO_FLOAT: /* 0x82 */ 3676/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3677/* File: arm-vfp/funop.S */ 3678 /* 3679 * Generic 32-bit unary floating-point operation. Provide an "instr" 3680 * line that specifies an instruction that performs "s1 = op s0". 3681 * 3682 * for: int-to-float, float-to-int 3683 */ 3684 /* unop vA, vB */ 3685 mov r3, rINST, lsr #12 @ r3<- B 3686 mov r9, rINST, lsr #8 @ r9<- A+ 3687 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3688 flds s0, [r3] @ s0<- vB 3689 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3690 and r9, r9, #15 @ r9<- A 3691 fsitos s1, s0 @ s1<- op 3692 GET_INST_OPCODE(ip) @ extract opcode from rINST 3693 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3694 fsts s1, [r9] @ vA<- s1 3695 GOTO_OPCODE(ip) @ jump to next instruction 3696 3697 3698/* ------------------------------ */ 3699 .balign 64 3700.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3701/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3702/* File: arm-vfp/funopWider.S */ 3703 /* 3704 * Generic 32bit-to-64bit floating point unary operation. Provide an 3705 * "instr" line that specifies an instruction that performs "d0 = op s0". 3706 * 3707 * For: int-to-double, float-to-double 3708 */ 3709 /* unop vA, vB */ 3710 mov r3, rINST, lsr #12 @ r3<- B 3711 mov r9, rINST, lsr #8 @ r9<- A+ 3712 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3713 flds s0, [r3] @ s0<- vB 3714 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3715 and r9, r9, #15 @ r9<- A 3716 fsitod d0, s0 @ d0<- op 3717 GET_INST_OPCODE(ip) @ extract opcode from rINST 3718 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3719 fstd d0, [r9] @ vA<- d0 3720 GOTO_OPCODE(ip) @ jump to next instruction 3721 3722 3723/* ------------------------------ */ 3724 .balign 64 3725.L_OP_LONG_TO_INT: /* 0x84 */ 3726/* File: armv5te/OP_LONG_TO_INT.S */ 3727/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3728/* File: armv5te/OP_MOVE.S */ 3729 /* for move, move-object, long-to-int */ 3730 /* op vA, vB */ 3731 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3732 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3733 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3734 GET_VREG(r2, r1) @ r2<- fp[B] 3735 and r0, r0, #15 3736 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3737 SET_VREG(r2, r0) @ fp[A]<- r2 3738 GOTO_OPCODE(ip) @ execute next instruction 3739 3740 3741/* ------------------------------ */ 3742 .balign 64 3743.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3744/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3745/* File: armv5te/unopNarrower.S */ 3746 /* 3747 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3748 * that specifies an instruction that performs "result = op r0/r1", where 3749 * "result" is a 32-bit quantity in r0. 3750 * 3751 * For: long-to-float, double-to-int, double-to-float 3752 * 3753 * (This would work for long-to-int, but that instruction is actually 3754 * an exact match for OP_MOVE.) 3755 */ 3756 /* unop vA, vB */ 3757 mov r3, rINST, lsr #12 @ r3<- B 3758 mov r9, rINST, lsr #8 @ r9<- A+ 3759 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3760 and r9, r9, #15 3761 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3762 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3763 @ optional op; may set condition codes 3764 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3765 GET_INST_OPCODE(ip) @ extract opcode from rINST 3766 SET_VREG(r0, r9) @ vA<- r0 3767 GOTO_OPCODE(ip) @ jump to next instruction 3768 /* 10-11 instructions */ 3769 3770 3771/* ------------------------------ */ 3772 .balign 64 3773.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3774/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3775/* File: armv5te/unopWide.S */ 3776 /* 3777 * Generic 64-bit unary operation. Provide an "instr" line that 3778 * specifies an instruction that performs "result = op r0/r1". 3779 * This could be an ARM instruction or a function call. 3780 * 3781 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3782 */ 3783 /* unop vA, vB */ 3784 mov r9, rINST, lsr #8 @ r9<- A+ 3785 mov r3, rINST, lsr #12 @ r3<- B 3786 and r9, r9, #15 3787 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3788 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3789 ldmia r3, {r0-r1} @ r0/r1<- vAA 3790 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3791 @ optional op; may set condition codes 3792 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3793 GET_INST_OPCODE(ip) @ extract opcode from rINST 3794 stmia r9, {r0-r1} @ vAA<- r0/r1 3795 GOTO_OPCODE(ip) @ jump to next instruction 3796 /* 12-13 instructions */ 3797 3798 3799/* ------------------------------ */ 3800 .balign 64 3801.L_OP_FLOAT_TO_INT: /* 0x87 */ 3802/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3803/* File: arm-vfp/funop.S */ 3804 /* 3805 * Generic 32-bit unary floating-point operation. Provide an "instr" 3806 * line that specifies an instruction that performs "s1 = op s0". 3807 * 3808 * for: int-to-float, float-to-int 3809 */ 3810 /* unop vA, vB */ 3811 mov r3, rINST, lsr #12 @ r3<- B 3812 mov r9, rINST, lsr #8 @ r9<- A+ 3813 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3814 flds s0, [r3] @ s0<- vB 3815 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3816 and r9, r9, #15 @ r9<- A 3817 ftosizs s1, s0 @ s1<- op 3818 GET_INST_OPCODE(ip) @ extract opcode from rINST 3819 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3820 fsts s1, [r9] @ vA<- s1 3821 GOTO_OPCODE(ip) @ jump to next instruction 3822 3823 3824/* ------------------------------ */ 3825 .balign 64 3826.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3827/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3828@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3829/* File: armv5te/unopWider.S */ 3830 /* 3831 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3832 * that specifies an instruction that performs "result = op r0", where 3833 * "result" is a 64-bit quantity in r0/r1. 3834 * 3835 * For: int-to-long, int-to-double, float-to-long, float-to-double 3836 */ 3837 /* unop vA, vB */ 3838 mov r9, rINST, lsr #8 @ r9<- A+ 3839 mov r3, rINST, lsr #12 @ r3<- B 3840 and r9, r9, #15 3841 GET_VREG(r0, r3) @ r0<- vB 3842 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3843 @ optional op; may set condition codes 3844 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3845 bl f2l_doconv @ r0<- op, r0-r3 changed 3846 GET_INST_OPCODE(ip) @ extract opcode from rINST 3847 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3848 GOTO_OPCODE(ip) @ jump to next instruction 3849 /* 10-11 instructions */ 3850 3851 3852 3853/* ------------------------------ */ 3854 .balign 64 3855.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3856/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3857/* File: arm-vfp/funopWider.S */ 3858 /* 3859 * Generic 32bit-to-64bit floating point unary operation. Provide an 3860 * "instr" line that specifies an instruction that performs "d0 = op s0". 3861 * 3862 * For: int-to-double, float-to-double 3863 */ 3864 /* unop vA, vB */ 3865 mov r3, rINST, lsr #12 @ r3<- B 3866 mov r9, rINST, lsr #8 @ r9<- A+ 3867 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3868 flds s0, [r3] @ s0<- vB 3869 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3870 and r9, r9, #15 @ r9<- A 3871 fcvtds d0, s0 @ d0<- op 3872 GET_INST_OPCODE(ip) @ extract opcode from rINST 3873 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3874 fstd d0, [r9] @ vA<- d0 3875 GOTO_OPCODE(ip) @ jump to next instruction 3876 3877 3878/* ------------------------------ */ 3879 .balign 64 3880.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3881/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3882/* File: arm-vfp/funopNarrower.S */ 3883 /* 3884 * Generic 64bit-to-32bit unary floating point operation. Provide an 3885 * "instr" line that specifies an instruction that performs "s0 = op d0". 3886 * 3887 * For: double-to-int, double-to-float 3888 */ 3889 /* unop vA, vB */ 3890 mov r3, rINST, lsr #12 @ r3<- B 3891 mov r9, rINST, lsr #8 @ r9<- A+ 3892 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3893 fldd d0, [r3] @ d0<- vB 3894 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3895 and r9, r9, #15 @ r9<- A 3896 ftosizd s0, d0 @ s0<- op 3897 GET_INST_OPCODE(ip) @ extract opcode from rINST 3898 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3899 fsts s0, [r9] @ vA<- s0 3900 GOTO_OPCODE(ip) @ jump to next instruction 3901 3902 3903/* ------------------------------ */ 3904 .balign 64 3905.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3906/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 3907@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3908/* File: armv5te/unopWide.S */ 3909 /* 3910 * Generic 64-bit unary operation. Provide an "instr" line that 3911 * specifies an instruction that performs "result = op r0/r1". 3912 * This could be an ARM instruction or a function call. 3913 * 3914 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3915 */ 3916 /* unop vA, vB */ 3917 mov r9, rINST, lsr #8 @ r9<- A+ 3918 mov r3, rINST, lsr #12 @ r3<- B 3919 and r9, r9, #15 3920 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3921 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3922 ldmia r3, {r0-r1} @ r0/r1<- vAA 3923 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3924 @ optional op; may set condition codes 3925 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3926 GET_INST_OPCODE(ip) @ extract opcode from rINST 3927 stmia r9, {r0-r1} @ vAA<- r0/r1 3928 GOTO_OPCODE(ip) @ jump to next instruction 3929 /* 12-13 instructions */ 3930 3931 3932 3933/* ------------------------------ */ 3934 .balign 64 3935.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3936/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3937/* File: arm-vfp/funopNarrower.S */ 3938 /* 3939 * Generic 64bit-to-32bit unary floating point operation. Provide an 3940 * "instr" line that specifies an instruction that performs "s0 = op d0". 3941 * 3942 * For: double-to-int, double-to-float 3943 */ 3944 /* unop vA, vB */ 3945 mov r3, rINST, lsr #12 @ r3<- B 3946 mov r9, rINST, lsr #8 @ r9<- A+ 3947 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3948 fldd d0, [r3] @ d0<- vB 3949 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3950 and r9, r9, #15 @ r9<- A 3951 fcvtsd s0, d0 @ s0<- op 3952 GET_INST_OPCODE(ip) @ extract opcode from rINST 3953 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3954 fsts s0, [r9] @ vA<- s0 3955 GOTO_OPCODE(ip) @ jump to next instruction 3956 3957 3958/* ------------------------------ */ 3959 .balign 64 3960.L_OP_INT_TO_BYTE: /* 0x8d */ 3961/* File: armv5te/OP_INT_TO_BYTE.S */ 3962/* File: armv5te/unop.S */ 3963 /* 3964 * Generic 32-bit unary operation. Provide an "instr" line that 3965 * specifies an instruction that performs "result = op r0". 3966 * This could be an ARM instruction or a function call. 3967 * 3968 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3969 * int-to-byte, int-to-char, int-to-short 3970 */ 3971 /* unop vA, vB */ 3972 mov r3, rINST, lsr #12 @ r3<- B 3973 mov r9, rINST, lsr #8 @ r9<- A+ 3974 GET_VREG(r0, r3) @ r0<- vB 3975 and r9, r9, #15 3976 mov r0, r0, asl #24 @ optional op; may set condition codes 3977 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3978 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 3979 GET_INST_OPCODE(ip) @ extract opcode from rINST 3980 SET_VREG(r0, r9) @ vAA<- r0 3981 GOTO_OPCODE(ip) @ jump to next instruction 3982 /* 9-10 instructions */ 3983 3984 3985/* ------------------------------ */ 3986 .balign 64 3987.L_OP_INT_TO_CHAR: /* 0x8e */ 3988/* File: armv5te/OP_INT_TO_CHAR.S */ 3989/* File: armv5te/unop.S */ 3990 /* 3991 * Generic 32-bit unary operation. Provide an "instr" line that 3992 * specifies an instruction that performs "result = op r0". 3993 * This could be an ARM instruction or a function call. 3994 * 3995 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3996 * int-to-byte, int-to-char, int-to-short 3997 */ 3998 /* unop vA, vB */ 3999 mov r3, rINST, lsr #12 @ r3<- B 4000 mov r9, rINST, lsr #8 @ r9<- A+ 4001 GET_VREG(r0, r3) @ r0<- vB 4002 and r9, r9, #15 4003 mov r0, r0, asl #16 @ optional op; may set condition codes 4004 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4005 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4006 GET_INST_OPCODE(ip) @ extract opcode from rINST 4007 SET_VREG(r0, r9) @ vAA<- r0 4008 GOTO_OPCODE(ip) @ jump to next instruction 4009 /* 9-10 instructions */ 4010 4011 4012/* ------------------------------ */ 4013 .balign 64 4014.L_OP_INT_TO_SHORT: /* 0x8f */ 4015/* File: armv5te/OP_INT_TO_SHORT.S */ 4016/* File: armv5te/unop.S */ 4017 /* 4018 * Generic 32-bit unary operation. Provide an "instr" line that 4019 * specifies an instruction that performs "result = op r0". 4020 * This could be an ARM instruction or a function call. 4021 * 4022 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4023 * int-to-byte, int-to-char, int-to-short 4024 */ 4025 /* unop vA, vB */ 4026 mov r3, rINST, lsr #12 @ r3<- B 4027 mov r9, rINST, lsr #8 @ r9<- A+ 4028 GET_VREG(r0, r3) @ r0<- vB 4029 and r9, r9, #15 4030 mov r0, r0, asl #16 @ optional op; may set condition codes 4031 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4032 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4033 GET_INST_OPCODE(ip) @ extract opcode from rINST 4034 SET_VREG(r0, r9) @ vAA<- r0 4035 GOTO_OPCODE(ip) @ jump to next instruction 4036 /* 9-10 instructions */ 4037 4038 4039/* ------------------------------ */ 4040 .balign 64 4041.L_OP_ADD_INT: /* 0x90 */ 4042/* File: armv5te/OP_ADD_INT.S */ 4043/* File: armv5te/binop.S */ 4044 /* 4045 * Generic 32-bit binary operation. Provide an "instr" line that 4046 * specifies an instruction that performs "result = r0 op r1". 4047 * This could be an ARM instruction or a function call. (If the result 4048 * comes back in a register other than r0, you can override "result".) 4049 * 4050 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4051 * vCC (r1). Useful for integer division and modulus. Note that we 4052 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4053 * handles it correctly. 4054 * 4055 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4056 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4057 * mul-float, div-float, rem-float 4058 */ 4059 /* binop vAA, vBB, vCC */ 4060 FETCH(r0, 1) @ r0<- CCBB 4061 mov r9, rINST, lsr #8 @ r9<- AA 4062 mov r3, r0, lsr #8 @ r3<- CC 4063 and r2, r0, #255 @ r2<- BB 4064 GET_VREG(r1, r3) @ r1<- vCC 4065 GET_VREG(r0, r2) @ r0<- vBB 4066 .if 0 4067 cmp r1, #0 @ is second operand zero? 4068 beq common_errDivideByZero 4069 .endif 4070 4071 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4072 @ optional op; may set condition codes 4073 add r0, r0, r1 @ r0<- op, r0-r3 changed 4074 GET_INST_OPCODE(ip) @ extract opcode from rINST 4075 SET_VREG(r0, r9) @ vAA<- r0 4076 GOTO_OPCODE(ip) @ jump to next instruction 4077 /* 11-14 instructions */ 4078 4079 4080/* ------------------------------ */ 4081 .balign 64 4082.L_OP_SUB_INT: /* 0x91 */ 4083/* File: armv5te/OP_SUB_INT.S */ 4084/* File: armv5te/binop.S */ 4085 /* 4086 * Generic 32-bit binary operation. Provide an "instr" line that 4087 * specifies an instruction that performs "result = r0 op r1". 4088 * This could be an ARM instruction or a function call. (If the result 4089 * comes back in a register other than r0, you can override "result".) 4090 * 4091 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4092 * vCC (r1). Useful for integer division and modulus. Note that we 4093 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4094 * handles it correctly. 4095 * 4096 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4097 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4098 * mul-float, div-float, rem-float 4099 */ 4100 /* binop vAA, vBB, vCC */ 4101 FETCH(r0, 1) @ r0<- CCBB 4102 mov r9, rINST, lsr #8 @ r9<- AA 4103 mov r3, r0, lsr #8 @ r3<- CC 4104 and r2, r0, #255 @ r2<- BB 4105 GET_VREG(r1, r3) @ r1<- vCC 4106 GET_VREG(r0, r2) @ r0<- vBB 4107 .if 0 4108 cmp r1, #0 @ is second operand zero? 4109 beq common_errDivideByZero 4110 .endif 4111 4112 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4113 @ optional op; may set condition codes 4114 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4115 GET_INST_OPCODE(ip) @ extract opcode from rINST 4116 SET_VREG(r0, r9) @ vAA<- r0 4117 GOTO_OPCODE(ip) @ jump to next instruction 4118 /* 11-14 instructions */ 4119 4120 4121/* ------------------------------ */ 4122 .balign 64 4123.L_OP_MUL_INT: /* 0x92 */ 4124/* File: armv5te/OP_MUL_INT.S */ 4125/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4126/* File: armv5te/binop.S */ 4127 /* 4128 * Generic 32-bit binary operation. Provide an "instr" line that 4129 * specifies an instruction that performs "result = r0 op r1". 4130 * This could be an ARM instruction or a function call. (If the result 4131 * comes back in a register other than r0, you can override "result".) 4132 * 4133 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4134 * vCC (r1). Useful for integer division and modulus. Note that we 4135 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4136 * handles it correctly. 4137 * 4138 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4139 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4140 * mul-float, div-float, rem-float 4141 */ 4142 /* binop vAA, vBB, vCC */ 4143 FETCH(r0, 1) @ r0<- CCBB 4144 mov r9, rINST, lsr #8 @ r9<- AA 4145 mov r3, r0, lsr #8 @ r3<- CC 4146 and r2, r0, #255 @ r2<- BB 4147 GET_VREG(r1, r3) @ r1<- vCC 4148 GET_VREG(r0, r2) @ r0<- vBB 4149 .if 0 4150 cmp r1, #0 @ is second operand zero? 4151 beq common_errDivideByZero 4152 .endif 4153 4154 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4155 @ optional op; may set condition codes 4156 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4157 GET_INST_OPCODE(ip) @ extract opcode from rINST 4158 SET_VREG(r0, r9) @ vAA<- r0 4159 GOTO_OPCODE(ip) @ jump to next instruction 4160 /* 11-14 instructions */ 4161 4162 4163/* ------------------------------ */ 4164 .balign 64 4165.L_OP_DIV_INT: /* 0x93 */ 4166/* File: armv5te/OP_DIV_INT.S */ 4167/* File: armv5te/binop.S */ 4168 /* 4169 * Generic 32-bit binary operation. Provide an "instr" line that 4170 * specifies an instruction that performs "result = r0 op r1". 4171 * This could be an ARM instruction or a function call. (If the result 4172 * comes back in a register other than r0, you can override "result".) 4173 * 4174 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4175 * vCC (r1). Useful for integer division and modulus. Note that we 4176 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4177 * handles it correctly. 4178 * 4179 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4180 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4181 * mul-float, div-float, rem-float 4182 */ 4183 /* binop vAA, vBB, vCC */ 4184 FETCH(r0, 1) @ r0<- CCBB 4185 mov r9, rINST, lsr #8 @ r9<- AA 4186 mov r3, r0, lsr #8 @ r3<- CC 4187 and r2, r0, #255 @ r2<- BB 4188 GET_VREG(r1, r3) @ r1<- vCC 4189 GET_VREG(r0, r2) @ r0<- vBB 4190 .if 1 4191 cmp r1, #0 @ is second operand zero? 4192 beq common_errDivideByZero 4193 .endif 4194 4195 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4196 @ optional op; may set condition codes 4197 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4198 GET_INST_OPCODE(ip) @ extract opcode from rINST 4199 SET_VREG(r0, r9) @ vAA<- r0 4200 GOTO_OPCODE(ip) @ jump to next instruction 4201 /* 11-14 instructions */ 4202 4203 4204/* ------------------------------ */ 4205 .balign 64 4206.L_OP_REM_INT: /* 0x94 */ 4207/* File: armv5te/OP_REM_INT.S */ 4208/* idivmod returns quotient in r0 and remainder in r1 */ 4209/* File: armv5te/binop.S */ 4210 /* 4211 * Generic 32-bit binary operation. Provide an "instr" line that 4212 * specifies an instruction that performs "result = r0 op r1". 4213 * This could be an ARM instruction or a function call. (If the result 4214 * comes back in a register other than r0, you can override "result".) 4215 * 4216 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4217 * vCC (r1). Useful for integer division and modulus. Note that we 4218 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4219 * handles it correctly. 4220 * 4221 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4222 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4223 * mul-float, div-float, rem-float 4224 */ 4225 /* binop vAA, vBB, vCC */ 4226 FETCH(r0, 1) @ r0<- CCBB 4227 mov r9, rINST, lsr #8 @ r9<- AA 4228 mov r3, r0, lsr #8 @ r3<- CC 4229 and r2, r0, #255 @ r2<- BB 4230 GET_VREG(r1, r3) @ r1<- vCC 4231 GET_VREG(r0, r2) @ r0<- vBB 4232 .if 1 4233 cmp r1, #0 @ is second operand zero? 4234 beq common_errDivideByZero 4235 .endif 4236 4237 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4238 @ optional op; may set condition codes 4239 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4240 GET_INST_OPCODE(ip) @ extract opcode from rINST 4241 SET_VREG(r1, r9) @ vAA<- r1 4242 GOTO_OPCODE(ip) @ jump to next instruction 4243 /* 11-14 instructions */ 4244 4245 4246/* ------------------------------ */ 4247 .balign 64 4248.L_OP_AND_INT: /* 0x95 */ 4249/* File: armv5te/OP_AND_INT.S */ 4250/* File: armv5te/binop.S */ 4251 /* 4252 * Generic 32-bit binary operation. Provide an "instr" line that 4253 * specifies an instruction that performs "result = r0 op r1". 4254 * This could be an ARM instruction or a function call. (If the result 4255 * comes back in a register other than r0, you can override "result".) 4256 * 4257 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4258 * vCC (r1). Useful for integer division and modulus. Note that we 4259 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4260 * handles it correctly. 4261 * 4262 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4263 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4264 * mul-float, div-float, rem-float 4265 */ 4266 /* binop vAA, vBB, vCC */ 4267 FETCH(r0, 1) @ r0<- CCBB 4268 mov r9, rINST, lsr #8 @ r9<- AA 4269 mov r3, r0, lsr #8 @ r3<- CC 4270 and r2, r0, #255 @ r2<- BB 4271 GET_VREG(r1, r3) @ r1<- vCC 4272 GET_VREG(r0, r2) @ r0<- vBB 4273 .if 0 4274 cmp r1, #0 @ is second operand zero? 4275 beq common_errDivideByZero 4276 .endif 4277 4278 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4279 @ optional op; may set condition codes 4280 and r0, r0, r1 @ r0<- op, r0-r3 changed 4281 GET_INST_OPCODE(ip) @ extract opcode from rINST 4282 SET_VREG(r0, r9) @ vAA<- r0 4283 GOTO_OPCODE(ip) @ jump to next instruction 4284 /* 11-14 instructions */ 4285 4286 4287/* ------------------------------ */ 4288 .balign 64 4289.L_OP_OR_INT: /* 0x96 */ 4290/* File: armv5te/OP_OR_INT.S */ 4291/* File: armv5te/binop.S */ 4292 /* 4293 * Generic 32-bit binary operation. Provide an "instr" line that 4294 * specifies an instruction that performs "result = r0 op r1". 4295 * This could be an ARM instruction or a function call. (If the result 4296 * comes back in a register other than r0, you can override "result".) 4297 * 4298 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4299 * vCC (r1). Useful for integer division and modulus. Note that we 4300 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4301 * handles it correctly. 4302 * 4303 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4304 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4305 * mul-float, div-float, rem-float 4306 */ 4307 /* binop vAA, vBB, vCC */ 4308 FETCH(r0, 1) @ r0<- CCBB 4309 mov r9, rINST, lsr #8 @ r9<- AA 4310 mov r3, r0, lsr #8 @ r3<- CC 4311 and r2, r0, #255 @ r2<- BB 4312 GET_VREG(r1, r3) @ r1<- vCC 4313 GET_VREG(r0, r2) @ r0<- vBB 4314 .if 0 4315 cmp r1, #0 @ is second operand zero? 4316 beq common_errDivideByZero 4317 .endif 4318 4319 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4320 @ optional op; may set condition codes 4321 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4322 GET_INST_OPCODE(ip) @ extract opcode from rINST 4323 SET_VREG(r0, r9) @ vAA<- r0 4324 GOTO_OPCODE(ip) @ jump to next instruction 4325 /* 11-14 instructions */ 4326 4327 4328/* ------------------------------ */ 4329 .balign 64 4330.L_OP_XOR_INT: /* 0x97 */ 4331/* File: armv5te/OP_XOR_INT.S */ 4332/* File: armv5te/binop.S */ 4333 /* 4334 * Generic 32-bit binary operation. Provide an "instr" line that 4335 * specifies an instruction that performs "result = r0 op r1". 4336 * This could be an ARM instruction or a function call. (If the result 4337 * comes back in a register other than r0, you can override "result".) 4338 * 4339 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4340 * vCC (r1). Useful for integer division and modulus. Note that we 4341 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4342 * handles it correctly. 4343 * 4344 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4345 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4346 * mul-float, div-float, rem-float 4347 */ 4348 /* binop vAA, vBB, vCC */ 4349 FETCH(r0, 1) @ r0<- CCBB 4350 mov r9, rINST, lsr #8 @ r9<- AA 4351 mov r3, r0, lsr #8 @ r3<- CC 4352 and r2, r0, #255 @ r2<- BB 4353 GET_VREG(r1, r3) @ r1<- vCC 4354 GET_VREG(r0, r2) @ r0<- vBB 4355 .if 0 4356 cmp r1, #0 @ is second operand zero? 4357 beq common_errDivideByZero 4358 .endif 4359 4360 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4361 @ optional op; may set condition codes 4362 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4363 GET_INST_OPCODE(ip) @ extract opcode from rINST 4364 SET_VREG(r0, r9) @ vAA<- r0 4365 GOTO_OPCODE(ip) @ jump to next instruction 4366 /* 11-14 instructions */ 4367 4368 4369/* ------------------------------ */ 4370 .balign 64 4371.L_OP_SHL_INT: /* 0x98 */ 4372/* File: armv5te/OP_SHL_INT.S */ 4373/* File: armv5te/binop.S */ 4374 /* 4375 * Generic 32-bit binary operation. Provide an "instr" line that 4376 * specifies an instruction that performs "result = r0 op r1". 4377 * This could be an ARM instruction or a function call. (If the result 4378 * comes back in a register other than r0, you can override "result".) 4379 * 4380 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4381 * vCC (r1). Useful for integer division and modulus. Note that we 4382 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4383 * handles it correctly. 4384 * 4385 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4386 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4387 * mul-float, div-float, rem-float 4388 */ 4389 /* binop vAA, vBB, vCC */ 4390 FETCH(r0, 1) @ r0<- CCBB 4391 mov r9, rINST, lsr #8 @ r9<- AA 4392 mov r3, r0, lsr #8 @ r3<- CC 4393 and r2, r0, #255 @ r2<- BB 4394 GET_VREG(r1, r3) @ r1<- vCC 4395 GET_VREG(r0, r2) @ r0<- vBB 4396 .if 0 4397 cmp r1, #0 @ is second operand zero? 4398 beq common_errDivideByZero 4399 .endif 4400 4401 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4402 and r1, r1, #31 @ optional op; may set condition codes 4403 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4404 GET_INST_OPCODE(ip) @ extract opcode from rINST 4405 SET_VREG(r0, r9) @ vAA<- r0 4406 GOTO_OPCODE(ip) @ jump to next instruction 4407 /* 11-14 instructions */ 4408 4409 4410/* ------------------------------ */ 4411 .balign 64 4412.L_OP_SHR_INT: /* 0x99 */ 4413/* File: armv5te/OP_SHR_INT.S */ 4414/* File: armv5te/binop.S */ 4415 /* 4416 * Generic 32-bit binary operation. Provide an "instr" line that 4417 * specifies an instruction that performs "result = r0 op r1". 4418 * This could be an ARM instruction or a function call. (If the result 4419 * comes back in a register other than r0, you can override "result".) 4420 * 4421 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4422 * vCC (r1). Useful for integer division and modulus. Note that we 4423 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4424 * handles it correctly. 4425 * 4426 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4427 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4428 * mul-float, div-float, rem-float 4429 */ 4430 /* binop vAA, vBB, vCC */ 4431 FETCH(r0, 1) @ r0<- CCBB 4432 mov r9, rINST, lsr #8 @ r9<- AA 4433 mov r3, r0, lsr #8 @ r3<- CC 4434 and r2, r0, #255 @ r2<- BB 4435 GET_VREG(r1, r3) @ r1<- vCC 4436 GET_VREG(r0, r2) @ r0<- vBB 4437 .if 0 4438 cmp r1, #0 @ is second operand zero? 4439 beq common_errDivideByZero 4440 .endif 4441 4442 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4443 and r1, r1, #31 @ optional op; may set condition codes 4444 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4445 GET_INST_OPCODE(ip) @ extract opcode from rINST 4446 SET_VREG(r0, r9) @ vAA<- r0 4447 GOTO_OPCODE(ip) @ jump to next instruction 4448 /* 11-14 instructions */ 4449 4450 4451/* ------------------------------ */ 4452 .balign 64 4453.L_OP_USHR_INT: /* 0x9a */ 4454/* File: armv5te/OP_USHR_INT.S */ 4455/* File: armv5te/binop.S */ 4456 /* 4457 * Generic 32-bit binary operation. Provide an "instr" line that 4458 * specifies an instruction that performs "result = r0 op r1". 4459 * This could be an ARM instruction or a function call. (If the result 4460 * comes back in a register other than r0, you can override "result".) 4461 * 4462 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4463 * vCC (r1). Useful for integer division and modulus. Note that we 4464 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4465 * handles it correctly. 4466 * 4467 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4468 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4469 * mul-float, div-float, rem-float 4470 */ 4471 /* binop vAA, vBB, vCC */ 4472 FETCH(r0, 1) @ r0<- CCBB 4473 mov r9, rINST, lsr #8 @ r9<- AA 4474 mov r3, r0, lsr #8 @ r3<- CC 4475 and r2, r0, #255 @ r2<- BB 4476 GET_VREG(r1, r3) @ r1<- vCC 4477 GET_VREG(r0, r2) @ r0<- vBB 4478 .if 0 4479 cmp r1, #0 @ is second operand zero? 4480 beq common_errDivideByZero 4481 .endif 4482 4483 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4484 and r1, r1, #31 @ optional op; may set condition codes 4485 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4486 GET_INST_OPCODE(ip) @ extract opcode from rINST 4487 SET_VREG(r0, r9) @ vAA<- r0 4488 GOTO_OPCODE(ip) @ jump to next instruction 4489 /* 11-14 instructions */ 4490 4491 4492/* ------------------------------ */ 4493 .balign 64 4494.L_OP_ADD_LONG: /* 0x9b */ 4495/* File: armv5te/OP_ADD_LONG.S */ 4496/* File: armv5te/binopWide.S */ 4497 /* 4498 * Generic 64-bit binary operation. Provide an "instr" line that 4499 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4500 * This could be an ARM instruction or a function call. (If the result 4501 * comes back in a register other than r0, you can override "result".) 4502 * 4503 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4504 * vCC (r1). Useful for integer division and modulus. 4505 * 4506 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4507 * xor-long, add-double, sub-double, mul-double, div-double, 4508 * rem-double 4509 * 4510 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4511 */ 4512 /* binop vAA, vBB, vCC */ 4513 FETCH(r0, 1) @ r0<- CCBB 4514 mov r9, rINST, lsr #8 @ r9<- AA 4515 and r2, r0, #255 @ r2<- BB 4516 mov r3, r0, lsr #8 @ r3<- CC 4517 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4518 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4519 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4520 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4521 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4522 .if 0 4523 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4524 beq common_errDivideByZero 4525 .endif 4526 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4527 4528 adds r0, r0, r2 @ optional op; may set condition codes 4529 adc r1, r1, r3 @ result<- op, r0-r3 changed 4530 GET_INST_OPCODE(ip) @ extract opcode from rINST 4531 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4532 GOTO_OPCODE(ip) @ jump to next instruction 4533 /* 14-17 instructions */ 4534 4535 4536/* ------------------------------ */ 4537 .balign 64 4538.L_OP_SUB_LONG: /* 0x9c */ 4539/* File: armv5te/OP_SUB_LONG.S */ 4540/* File: armv5te/binopWide.S */ 4541 /* 4542 * Generic 64-bit binary operation. Provide an "instr" line that 4543 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4544 * This could be an ARM instruction or a function call. (If the result 4545 * comes back in a register other than r0, you can override "result".) 4546 * 4547 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4548 * vCC (r1). Useful for integer division and modulus. 4549 * 4550 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4551 * xor-long, add-double, sub-double, mul-double, div-double, 4552 * rem-double 4553 * 4554 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4555 */ 4556 /* binop vAA, vBB, vCC */ 4557 FETCH(r0, 1) @ r0<- CCBB 4558 mov r9, rINST, lsr #8 @ r9<- AA 4559 and r2, r0, #255 @ r2<- BB 4560 mov r3, r0, lsr #8 @ r3<- CC 4561 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4562 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4563 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4564 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4565 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4566 .if 0 4567 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4568 beq common_errDivideByZero 4569 .endif 4570 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4571 4572 subs r0, r0, r2 @ optional op; may set condition codes 4573 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4574 GET_INST_OPCODE(ip) @ extract opcode from rINST 4575 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4576 GOTO_OPCODE(ip) @ jump to next instruction 4577 /* 14-17 instructions */ 4578 4579 4580/* ------------------------------ */ 4581 .balign 64 4582.L_OP_MUL_LONG: /* 0x9d */ 4583/* File: armv5te/OP_MUL_LONG.S */ 4584 /* 4585 * Signed 64-bit integer multiply. 4586 * 4587 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4588 * WX 4589 * x YZ 4590 * -------- 4591 * ZW ZX 4592 * YW YX 4593 * 4594 * The low word of the result holds ZX, the high word holds 4595 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4596 * it doesn't fit in the low 64 bits. 4597 * 4598 * Unlike most ARM math operations, multiply instructions have 4599 * restrictions on using the same register more than once (Rd and Rm 4600 * cannot be the same). 4601 */ 4602 /* mul-long vAA, vBB, vCC */ 4603 FETCH(r0, 1) @ r0<- CCBB 4604 and r2, r0, #255 @ r2<- BB 4605 mov r3, r0, lsr #8 @ r3<- CC 4606 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4607 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4608 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4609 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4610 mul ip, r2, r1 @ ip<- ZxW 4611 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4612 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4613 mov r0, rINST, lsr #8 @ r0<- AA 4614 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4615 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4616 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4617 b .LOP_MUL_LONG_finish 4618 4619/* ------------------------------ */ 4620 .balign 64 4621.L_OP_DIV_LONG: /* 0x9e */ 4622/* File: armv5te/OP_DIV_LONG.S */ 4623/* File: armv5te/binopWide.S */ 4624 /* 4625 * Generic 64-bit binary operation. Provide an "instr" line that 4626 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4627 * This could be an ARM instruction or a function call. (If the result 4628 * comes back in a register other than r0, you can override "result".) 4629 * 4630 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4631 * vCC (r1). Useful for integer division and modulus. 4632 * 4633 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4634 * xor-long, add-double, sub-double, mul-double, div-double, 4635 * rem-double 4636 * 4637 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4638 */ 4639 /* binop vAA, vBB, vCC */ 4640 FETCH(r0, 1) @ r0<- CCBB 4641 mov r9, rINST, lsr #8 @ r9<- AA 4642 and r2, r0, #255 @ r2<- BB 4643 mov r3, r0, lsr #8 @ r3<- CC 4644 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4645 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4646 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4647 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4648 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4649 .if 1 4650 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4651 beq common_errDivideByZero 4652 .endif 4653 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4654 4655 @ optional op; may set condition codes 4656 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4657 GET_INST_OPCODE(ip) @ extract opcode from rINST 4658 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4659 GOTO_OPCODE(ip) @ jump to next instruction 4660 /* 14-17 instructions */ 4661 4662 4663/* ------------------------------ */ 4664 .balign 64 4665.L_OP_REM_LONG: /* 0x9f */ 4666/* File: armv5te/OP_REM_LONG.S */ 4667/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4668/* File: armv5te/binopWide.S */ 4669 /* 4670 * Generic 64-bit binary operation. Provide an "instr" line that 4671 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4672 * This could be an ARM instruction or a function call. (If the result 4673 * comes back in a register other than r0, you can override "result".) 4674 * 4675 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4676 * vCC (r1). Useful for integer division and modulus. 4677 * 4678 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4679 * xor-long, add-double, sub-double, mul-double, div-double, 4680 * rem-double 4681 * 4682 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4683 */ 4684 /* binop vAA, vBB, vCC */ 4685 FETCH(r0, 1) @ r0<- CCBB 4686 mov r9, rINST, lsr #8 @ r9<- AA 4687 and r2, r0, #255 @ r2<- BB 4688 mov r3, r0, lsr #8 @ r3<- CC 4689 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4690 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4691 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4692 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4693 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4694 .if 1 4695 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4696 beq common_errDivideByZero 4697 .endif 4698 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4699 4700 @ optional op; may set condition codes 4701 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4702 GET_INST_OPCODE(ip) @ extract opcode from rINST 4703 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4704 GOTO_OPCODE(ip) @ jump to next instruction 4705 /* 14-17 instructions */ 4706 4707 4708/* ------------------------------ */ 4709 .balign 64 4710.L_OP_AND_LONG: /* 0xa0 */ 4711/* File: armv5te/OP_AND_LONG.S */ 4712/* File: armv5te/binopWide.S */ 4713 /* 4714 * Generic 64-bit binary operation. Provide an "instr" line that 4715 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4716 * This could be an ARM instruction or a function call. (If the result 4717 * comes back in a register other than r0, you can override "result".) 4718 * 4719 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4720 * vCC (r1). Useful for integer division and modulus. 4721 * 4722 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4723 * xor-long, add-double, sub-double, mul-double, div-double, 4724 * rem-double 4725 * 4726 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4727 */ 4728 /* binop vAA, vBB, vCC */ 4729 FETCH(r0, 1) @ r0<- CCBB 4730 mov r9, rINST, lsr #8 @ r9<- AA 4731 and r2, r0, #255 @ r2<- BB 4732 mov r3, r0, lsr #8 @ r3<- CC 4733 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4734 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4735 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4736 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4737 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4738 .if 0 4739 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4740 beq common_errDivideByZero 4741 .endif 4742 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4743 4744 and r0, r0, r2 @ optional op; may set condition codes 4745 and r1, r1, r3 @ result<- op, r0-r3 changed 4746 GET_INST_OPCODE(ip) @ extract opcode from rINST 4747 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4748 GOTO_OPCODE(ip) @ jump to next instruction 4749 /* 14-17 instructions */ 4750 4751 4752/* ------------------------------ */ 4753 .balign 64 4754.L_OP_OR_LONG: /* 0xa1 */ 4755/* File: armv5te/OP_OR_LONG.S */ 4756/* File: armv5te/binopWide.S */ 4757 /* 4758 * Generic 64-bit binary operation. Provide an "instr" line that 4759 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4760 * This could be an ARM instruction or a function call. (If the result 4761 * comes back in a register other than r0, you can override "result".) 4762 * 4763 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4764 * vCC (r1). Useful for integer division and modulus. 4765 * 4766 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4767 * xor-long, add-double, sub-double, mul-double, div-double, 4768 * rem-double 4769 * 4770 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4771 */ 4772 /* binop vAA, vBB, vCC */ 4773 FETCH(r0, 1) @ r0<- CCBB 4774 mov r9, rINST, lsr #8 @ r9<- AA 4775 and r2, r0, #255 @ r2<- BB 4776 mov r3, r0, lsr #8 @ r3<- CC 4777 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4778 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4779 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4780 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4781 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4782 .if 0 4783 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4784 beq common_errDivideByZero 4785 .endif 4786 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4787 4788 orr r0, r0, r2 @ optional op; may set condition codes 4789 orr r1, r1, r3 @ result<- op, r0-r3 changed 4790 GET_INST_OPCODE(ip) @ extract opcode from rINST 4791 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4792 GOTO_OPCODE(ip) @ jump to next instruction 4793 /* 14-17 instructions */ 4794 4795 4796/* ------------------------------ */ 4797 .balign 64 4798.L_OP_XOR_LONG: /* 0xa2 */ 4799/* File: armv5te/OP_XOR_LONG.S */ 4800/* File: armv5te/binopWide.S */ 4801 /* 4802 * Generic 64-bit binary operation. Provide an "instr" line that 4803 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4804 * This could be an ARM instruction or a function call. (If the result 4805 * comes back in a register other than r0, you can override "result".) 4806 * 4807 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4808 * vCC (r1). Useful for integer division and modulus. 4809 * 4810 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4811 * xor-long, add-double, sub-double, mul-double, div-double, 4812 * rem-double 4813 * 4814 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4815 */ 4816 /* binop vAA, vBB, vCC */ 4817 FETCH(r0, 1) @ r0<- CCBB 4818 mov r9, rINST, lsr #8 @ r9<- AA 4819 and r2, r0, #255 @ r2<- BB 4820 mov r3, r0, lsr #8 @ r3<- CC 4821 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4822 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4823 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4824 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4825 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4826 .if 0 4827 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4828 beq common_errDivideByZero 4829 .endif 4830 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4831 4832 eor r0, r0, r2 @ optional op; may set condition codes 4833 eor r1, r1, r3 @ result<- op, r0-r3 changed 4834 GET_INST_OPCODE(ip) @ extract opcode from rINST 4835 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4836 GOTO_OPCODE(ip) @ jump to next instruction 4837 /* 14-17 instructions */ 4838 4839 4840/* ------------------------------ */ 4841 .balign 64 4842.L_OP_SHL_LONG: /* 0xa3 */ 4843/* File: armv5te/OP_SHL_LONG.S */ 4844 /* 4845 * Long integer shift. This is different from the generic 32/64-bit 4846 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4847 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4848 * 6 bits of the shift distance. 4849 */ 4850 /* shl-long vAA, vBB, vCC */ 4851 FETCH(r0, 1) @ r0<- CCBB 4852 mov r9, rINST, lsr #8 @ r9<- AA 4853 and r3, r0, #255 @ r3<- BB 4854 mov r0, r0, lsr #8 @ r0<- CC 4855 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4856 GET_VREG(r2, r0) @ r2<- vCC 4857 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4858 and r2, r2, #63 @ r2<- r2 & 0x3f 4859 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4860 4861 mov r1, r1, asl r2 @ r1<- r1 << r2 4862 rsb r3, r2, #32 @ r3<- 32 - r2 4863 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4864 subs ip, r2, #32 @ ip<- r2 - 32 4865 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4866 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4867 b .LOP_SHL_LONG_finish 4868 4869/* ------------------------------ */ 4870 .balign 64 4871.L_OP_SHR_LONG: /* 0xa4 */ 4872/* File: armv5te/OP_SHR_LONG.S */ 4873 /* 4874 * Long integer shift. This is different from the generic 32/64-bit 4875 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4876 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4877 * 6 bits of the shift distance. 4878 */ 4879 /* shr-long vAA, vBB, vCC */ 4880 FETCH(r0, 1) @ r0<- CCBB 4881 mov r9, rINST, lsr #8 @ r9<- AA 4882 and r3, r0, #255 @ r3<- BB 4883 mov r0, r0, lsr #8 @ r0<- CC 4884 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4885 GET_VREG(r2, r0) @ r2<- vCC 4886 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4887 and r2, r2, #63 @ r0<- r0 & 0x3f 4888 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4889 4890 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4891 rsb r3, r2, #32 @ r3<- 32 - r2 4892 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4893 subs ip, r2, #32 @ ip<- r2 - 32 4894 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4895 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4896 b .LOP_SHR_LONG_finish 4897 4898/* ------------------------------ */ 4899 .balign 64 4900.L_OP_USHR_LONG: /* 0xa5 */ 4901/* File: armv5te/OP_USHR_LONG.S */ 4902 /* 4903 * Long integer shift. This is different from the generic 32/64-bit 4904 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4905 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4906 * 6 bits of the shift distance. 4907 */ 4908 /* ushr-long vAA, vBB, vCC */ 4909 FETCH(r0, 1) @ r0<- CCBB 4910 mov r9, rINST, lsr #8 @ r9<- AA 4911 and r3, r0, #255 @ r3<- BB 4912 mov r0, r0, lsr #8 @ r0<- CC 4913 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4914 GET_VREG(r2, r0) @ r2<- vCC 4915 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4916 and r2, r2, #63 @ r0<- r0 & 0x3f 4917 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4918 4919 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4920 rsb r3, r2, #32 @ r3<- 32 - r2 4921 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4922 subs ip, r2, #32 @ ip<- r2 - 32 4923 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4924 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4925 b .LOP_USHR_LONG_finish 4926 4927/* ------------------------------ */ 4928 .balign 64 4929.L_OP_ADD_FLOAT: /* 0xa6 */ 4930/* File: arm-vfp/OP_ADD_FLOAT.S */ 4931/* File: arm-vfp/fbinop.S */ 4932 /* 4933 * Generic 32-bit floating-point operation. Provide an "instr" line that 4934 * specifies an instruction that performs "s2 = s0 op s1". Because we 4935 * use the "softfp" ABI, this must be an instruction, not a function call. 4936 * 4937 * For: add-float, sub-float, mul-float, div-float 4938 */ 4939 /* floatop vAA, vBB, vCC */ 4940 FETCH(r0, 1) @ r0<- CCBB 4941 mov r9, rINST, lsr #8 @ r9<- AA 4942 mov r3, r0, lsr #8 @ r3<- CC 4943 and r2, r0, #255 @ r2<- BB 4944 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4945 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4946 flds s1, [r3] @ s1<- vCC 4947 flds s0, [r2] @ s0<- vBB 4948 4949 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4950 fadds s2, s0, s1 @ s2<- op 4951 GET_INST_OPCODE(ip) @ extract opcode from rINST 4952 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4953 fsts s2, [r9] @ vAA<- s2 4954 GOTO_OPCODE(ip) @ jump to next instruction 4955 4956 4957/* ------------------------------ */ 4958 .balign 64 4959.L_OP_SUB_FLOAT: /* 0xa7 */ 4960/* File: arm-vfp/OP_SUB_FLOAT.S */ 4961/* File: arm-vfp/fbinop.S */ 4962 /* 4963 * Generic 32-bit floating-point operation. Provide an "instr" line that 4964 * specifies an instruction that performs "s2 = s0 op s1". Because we 4965 * use the "softfp" ABI, this must be an instruction, not a function call. 4966 * 4967 * For: add-float, sub-float, mul-float, div-float 4968 */ 4969 /* floatop vAA, vBB, vCC */ 4970 FETCH(r0, 1) @ r0<- CCBB 4971 mov r9, rINST, lsr #8 @ r9<- AA 4972 mov r3, r0, lsr #8 @ r3<- CC 4973 and r2, r0, #255 @ r2<- BB 4974 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4975 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4976 flds s1, [r3] @ s1<- vCC 4977 flds s0, [r2] @ s0<- vBB 4978 4979 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4980 fsubs s2, s0, s1 @ s2<- op 4981 GET_INST_OPCODE(ip) @ extract opcode from rINST 4982 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4983 fsts s2, [r9] @ vAA<- s2 4984 GOTO_OPCODE(ip) @ jump to next instruction 4985 4986 4987/* ------------------------------ */ 4988 .balign 64 4989.L_OP_MUL_FLOAT: /* 0xa8 */ 4990/* File: arm-vfp/OP_MUL_FLOAT.S */ 4991/* File: arm-vfp/fbinop.S */ 4992 /* 4993 * Generic 32-bit floating-point operation. Provide an "instr" line that 4994 * specifies an instruction that performs "s2 = s0 op s1". Because we 4995 * use the "softfp" ABI, this must be an instruction, not a function call. 4996 * 4997 * For: add-float, sub-float, mul-float, div-float 4998 */ 4999 /* floatop vAA, vBB, vCC */ 5000 FETCH(r0, 1) @ r0<- CCBB 5001 mov r9, rINST, lsr #8 @ r9<- AA 5002 mov r3, r0, lsr #8 @ r3<- CC 5003 and r2, r0, #255 @ r2<- BB 5004 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5005 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5006 flds s1, [r3] @ s1<- vCC 5007 flds s0, [r2] @ s0<- vBB 5008 5009 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5010 fmuls s2, s0, s1 @ s2<- op 5011 GET_INST_OPCODE(ip) @ extract opcode from rINST 5012 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5013 fsts s2, [r9] @ vAA<- s2 5014 GOTO_OPCODE(ip) @ jump to next instruction 5015 5016 5017/* ------------------------------ */ 5018 .balign 64 5019.L_OP_DIV_FLOAT: /* 0xa9 */ 5020/* File: arm-vfp/OP_DIV_FLOAT.S */ 5021/* File: arm-vfp/fbinop.S */ 5022 /* 5023 * Generic 32-bit floating-point operation. Provide an "instr" line that 5024 * specifies an instruction that performs "s2 = s0 op s1". Because we 5025 * use the "softfp" ABI, this must be an instruction, not a function call. 5026 * 5027 * For: add-float, sub-float, mul-float, div-float 5028 */ 5029 /* floatop vAA, vBB, vCC */ 5030 FETCH(r0, 1) @ r0<- CCBB 5031 mov r9, rINST, lsr #8 @ r9<- AA 5032 mov r3, r0, lsr #8 @ r3<- CC 5033 and r2, r0, #255 @ r2<- BB 5034 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5035 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5036 flds s1, [r3] @ s1<- vCC 5037 flds s0, [r2] @ s0<- vBB 5038 5039 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5040 fdivs s2, s0, s1 @ s2<- op 5041 GET_INST_OPCODE(ip) @ extract opcode from rINST 5042 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5043 fsts s2, [r9] @ vAA<- s2 5044 GOTO_OPCODE(ip) @ jump to next instruction 5045 5046 5047/* ------------------------------ */ 5048 .balign 64 5049.L_OP_REM_FLOAT: /* 0xaa */ 5050/* File: armv5te/OP_REM_FLOAT.S */ 5051/* EABI doesn't define a float remainder function, but libm does */ 5052/* File: armv5te/binop.S */ 5053 /* 5054 * Generic 32-bit binary operation. Provide an "instr" line that 5055 * specifies an instruction that performs "result = r0 op r1". 5056 * This could be an ARM instruction or a function call. (If the result 5057 * comes back in a register other than r0, you can override "result".) 5058 * 5059 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5060 * vCC (r1). Useful for integer division and modulus. Note that we 5061 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5062 * handles it correctly. 5063 * 5064 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5065 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5066 * mul-float, div-float, rem-float 5067 */ 5068 /* binop vAA, vBB, vCC */ 5069 FETCH(r0, 1) @ r0<- CCBB 5070 mov r9, rINST, lsr #8 @ r9<- AA 5071 mov r3, r0, lsr #8 @ r3<- CC 5072 and r2, r0, #255 @ r2<- BB 5073 GET_VREG(r1, r3) @ r1<- vCC 5074 GET_VREG(r0, r2) @ r0<- vBB 5075 .if 0 5076 cmp r1, #0 @ is second operand zero? 5077 beq common_errDivideByZero 5078 .endif 5079 5080 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5081 @ optional op; may set condition codes 5082 bl fmodf @ r0<- op, r0-r3 changed 5083 GET_INST_OPCODE(ip) @ extract opcode from rINST 5084 SET_VREG(r0, r9) @ vAA<- r0 5085 GOTO_OPCODE(ip) @ jump to next instruction 5086 /* 11-14 instructions */ 5087 5088 5089/* ------------------------------ */ 5090 .balign 64 5091.L_OP_ADD_DOUBLE: /* 0xab */ 5092/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5093/* File: arm-vfp/fbinopWide.S */ 5094 /* 5095 * Generic 64-bit double-precision floating point binary operation. 5096 * Provide an "instr" line that specifies an instruction that performs 5097 * "d2 = d0 op d1". 5098 * 5099 * for: add-double, sub-double, mul-double, div-double 5100 */ 5101 /* doubleop vAA, vBB, vCC */ 5102 FETCH(r0, 1) @ r0<- CCBB 5103 mov r9, rINST, lsr #8 @ r9<- AA 5104 mov r3, r0, lsr #8 @ r3<- CC 5105 and r2, r0, #255 @ r2<- BB 5106 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5107 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5108 fldd d1, [r3] @ d1<- vCC 5109 fldd d0, [r2] @ d0<- vBB 5110 5111 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5112 faddd d2, d0, d1 @ s2<- op 5113 GET_INST_OPCODE(ip) @ extract opcode from rINST 5114 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5115 fstd d2, [r9] @ vAA<- d2 5116 GOTO_OPCODE(ip) @ jump to next instruction 5117 5118 5119/* ------------------------------ */ 5120 .balign 64 5121.L_OP_SUB_DOUBLE: /* 0xac */ 5122/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5123/* File: arm-vfp/fbinopWide.S */ 5124 /* 5125 * Generic 64-bit double-precision floating point binary operation. 5126 * Provide an "instr" line that specifies an instruction that performs 5127 * "d2 = d0 op d1". 5128 * 5129 * for: add-double, sub-double, mul-double, div-double 5130 */ 5131 /* doubleop vAA, vBB, vCC */ 5132 FETCH(r0, 1) @ r0<- CCBB 5133 mov r9, rINST, lsr #8 @ r9<- AA 5134 mov r3, r0, lsr #8 @ r3<- CC 5135 and r2, r0, #255 @ r2<- BB 5136 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5137 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5138 fldd d1, [r3] @ d1<- vCC 5139 fldd d0, [r2] @ d0<- vBB 5140 5141 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5142 fsubd d2, d0, d1 @ s2<- op 5143 GET_INST_OPCODE(ip) @ extract opcode from rINST 5144 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5145 fstd d2, [r9] @ vAA<- d2 5146 GOTO_OPCODE(ip) @ jump to next instruction 5147 5148 5149/* ------------------------------ */ 5150 .balign 64 5151.L_OP_MUL_DOUBLE: /* 0xad */ 5152/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5153/* File: arm-vfp/fbinopWide.S */ 5154 /* 5155 * Generic 64-bit double-precision floating point binary operation. 5156 * Provide an "instr" line that specifies an instruction that performs 5157 * "d2 = d0 op d1". 5158 * 5159 * for: add-double, sub-double, mul-double, div-double 5160 */ 5161 /* doubleop vAA, vBB, vCC */ 5162 FETCH(r0, 1) @ r0<- CCBB 5163 mov r9, rINST, lsr #8 @ r9<- AA 5164 mov r3, r0, lsr #8 @ r3<- CC 5165 and r2, r0, #255 @ r2<- BB 5166 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5167 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5168 fldd d1, [r3] @ d1<- vCC 5169 fldd d0, [r2] @ d0<- vBB 5170 5171 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5172 fmuld d2, d0, d1 @ s2<- op 5173 GET_INST_OPCODE(ip) @ extract opcode from rINST 5174 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5175 fstd d2, [r9] @ vAA<- d2 5176 GOTO_OPCODE(ip) @ jump to next instruction 5177 5178 5179/* ------------------------------ */ 5180 .balign 64 5181.L_OP_DIV_DOUBLE: /* 0xae */ 5182/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5183/* File: arm-vfp/fbinopWide.S */ 5184 /* 5185 * Generic 64-bit double-precision floating point binary operation. 5186 * Provide an "instr" line that specifies an instruction that performs 5187 * "d2 = d0 op d1". 5188 * 5189 * for: add-double, sub-double, mul-double, div-double 5190 */ 5191 /* doubleop vAA, vBB, vCC */ 5192 FETCH(r0, 1) @ r0<- CCBB 5193 mov r9, rINST, lsr #8 @ r9<- AA 5194 mov r3, r0, lsr #8 @ r3<- CC 5195 and r2, r0, #255 @ r2<- BB 5196 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5197 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5198 fldd d1, [r3] @ d1<- vCC 5199 fldd d0, [r2] @ d0<- vBB 5200 5201 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5202 fdivd d2, d0, d1 @ s2<- op 5203 GET_INST_OPCODE(ip) @ extract opcode from rINST 5204 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5205 fstd d2, [r9] @ vAA<- d2 5206 GOTO_OPCODE(ip) @ jump to next instruction 5207 5208 5209/* ------------------------------ */ 5210 .balign 64 5211.L_OP_REM_DOUBLE: /* 0xaf */ 5212/* File: armv5te/OP_REM_DOUBLE.S */ 5213/* EABI doesn't define a double remainder function, but libm does */ 5214/* File: armv5te/binopWide.S */ 5215 /* 5216 * Generic 64-bit binary operation. Provide an "instr" line that 5217 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5218 * This could be an ARM instruction or a function call. (If the result 5219 * comes back in a register other than r0, you can override "result".) 5220 * 5221 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5222 * vCC (r1). Useful for integer division and modulus. 5223 * 5224 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5225 * xor-long, add-double, sub-double, mul-double, div-double, 5226 * rem-double 5227 * 5228 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5229 */ 5230 /* binop vAA, vBB, vCC */ 5231 FETCH(r0, 1) @ r0<- CCBB 5232 mov r9, rINST, lsr #8 @ r9<- AA 5233 and r2, r0, #255 @ r2<- BB 5234 mov r3, r0, lsr #8 @ r3<- CC 5235 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5236 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5237 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5238 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5239 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5240 .if 0 5241 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5242 beq common_errDivideByZero 5243 .endif 5244 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5245 5246 @ optional op; may set condition codes 5247 bl fmod @ result<- op, r0-r3 changed 5248 GET_INST_OPCODE(ip) @ extract opcode from rINST 5249 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5250 GOTO_OPCODE(ip) @ jump to next instruction 5251 /* 14-17 instructions */ 5252 5253 5254/* ------------------------------ */ 5255 .balign 64 5256.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5257/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5258/* File: armv5te/binop2addr.S */ 5259 /* 5260 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5261 * that specifies an instruction that performs "result = r0 op r1". 5262 * This could be an ARM instruction or a function call. (If the result 5263 * comes back in a register other than r0, you can override "result".) 5264 * 5265 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5266 * vCC (r1). Useful for integer division and modulus. 5267 * 5268 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5269 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5270 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5271 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5272 */ 5273 /* binop/2addr vA, vB */ 5274 mov r9, rINST, lsr #8 @ r9<- A+ 5275 mov r3, rINST, lsr #12 @ r3<- B 5276 and r9, r9, #15 5277 GET_VREG(r1, r3) @ r1<- vB 5278 GET_VREG(r0, r9) @ r0<- vA 5279 .if 0 5280 cmp r1, #0 @ is second operand zero? 5281 beq common_errDivideByZero 5282 .endif 5283 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5284 5285 @ optional op; may set condition codes 5286 add r0, r0, r1 @ r0<- op, r0-r3 changed 5287 GET_INST_OPCODE(ip) @ extract opcode from rINST 5288 SET_VREG(r0, r9) @ vAA<- r0 5289 GOTO_OPCODE(ip) @ jump to next instruction 5290 /* 10-13 instructions */ 5291 5292 5293/* ------------------------------ */ 5294 .balign 64 5295.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5296/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5297/* File: armv5te/binop2addr.S */ 5298 /* 5299 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5300 * that specifies an instruction that performs "result = r0 op r1". 5301 * This could be an ARM instruction or a function call. (If the result 5302 * comes back in a register other than r0, you can override "result".) 5303 * 5304 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5305 * vCC (r1). Useful for integer division and modulus. 5306 * 5307 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5308 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5309 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5310 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5311 */ 5312 /* binop/2addr vA, vB */ 5313 mov r9, rINST, lsr #8 @ r9<- A+ 5314 mov r3, rINST, lsr #12 @ r3<- B 5315 and r9, r9, #15 5316 GET_VREG(r1, r3) @ r1<- vB 5317 GET_VREG(r0, r9) @ r0<- vA 5318 .if 0 5319 cmp r1, #0 @ is second operand zero? 5320 beq common_errDivideByZero 5321 .endif 5322 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5323 5324 @ optional op; may set condition codes 5325 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5326 GET_INST_OPCODE(ip) @ extract opcode from rINST 5327 SET_VREG(r0, r9) @ vAA<- r0 5328 GOTO_OPCODE(ip) @ jump to next instruction 5329 /* 10-13 instructions */ 5330 5331 5332/* ------------------------------ */ 5333 .balign 64 5334.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5335/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5336/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5337/* File: armv5te/binop2addr.S */ 5338 /* 5339 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5340 * that specifies an instruction that performs "result = r0 op r1". 5341 * This could be an ARM instruction or a function call. (If the result 5342 * comes back in a register other than r0, you can override "result".) 5343 * 5344 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5345 * vCC (r1). Useful for integer division and modulus. 5346 * 5347 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5348 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5349 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5350 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5351 */ 5352 /* binop/2addr vA, vB */ 5353 mov r9, rINST, lsr #8 @ r9<- A+ 5354 mov r3, rINST, lsr #12 @ r3<- B 5355 and r9, r9, #15 5356 GET_VREG(r1, r3) @ r1<- vB 5357 GET_VREG(r0, r9) @ r0<- vA 5358 .if 0 5359 cmp r1, #0 @ is second operand zero? 5360 beq common_errDivideByZero 5361 .endif 5362 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5363 5364 @ optional op; may set condition codes 5365 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5366 GET_INST_OPCODE(ip) @ extract opcode from rINST 5367 SET_VREG(r0, r9) @ vAA<- r0 5368 GOTO_OPCODE(ip) @ jump to next instruction 5369 /* 10-13 instructions */ 5370 5371 5372/* ------------------------------ */ 5373 .balign 64 5374.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5375/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5376/* File: armv5te/binop2addr.S */ 5377 /* 5378 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5379 * that specifies an instruction that performs "result = r0 op r1". 5380 * This could be an ARM instruction or a function call. (If the result 5381 * comes back in a register other than r0, you can override "result".) 5382 * 5383 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5384 * vCC (r1). Useful for integer division and modulus. 5385 * 5386 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5387 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5388 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5389 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5390 */ 5391 /* binop/2addr vA, vB */ 5392 mov r9, rINST, lsr #8 @ r9<- A+ 5393 mov r3, rINST, lsr #12 @ r3<- B 5394 and r9, r9, #15 5395 GET_VREG(r1, r3) @ r1<- vB 5396 GET_VREG(r0, r9) @ r0<- vA 5397 .if 1 5398 cmp r1, #0 @ is second operand zero? 5399 beq common_errDivideByZero 5400 .endif 5401 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5402 5403 @ optional op; may set condition codes 5404 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5405 GET_INST_OPCODE(ip) @ extract opcode from rINST 5406 SET_VREG(r0, r9) @ vAA<- r0 5407 GOTO_OPCODE(ip) @ jump to next instruction 5408 /* 10-13 instructions */ 5409 5410 5411/* ------------------------------ */ 5412 .balign 64 5413.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5414/* File: armv5te/OP_REM_INT_2ADDR.S */ 5415/* idivmod returns quotient in r0 and remainder in r1 */ 5416/* File: armv5te/binop2addr.S */ 5417 /* 5418 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5419 * that specifies an instruction that performs "result = r0 op r1". 5420 * This could be an ARM instruction or a function call. (If the result 5421 * comes back in a register other than r0, you can override "result".) 5422 * 5423 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5424 * vCC (r1). Useful for integer division and modulus. 5425 * 5426 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5427 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5428 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5429 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5430 */ 5431 /* binop/2addr vA, vB */ 5432 mov r9, rINST, lsr #8 @ r9<- A+ 5433 mov r3, rINST, lsr #12 @ r3<- B 5434 and r9, r9, #15 5435 GET_VREG(r1, r3) @ r1<- vB 5436 GET_VREG(r0, r9) @ r0<- vA 5437 .if 1 5438 cmp r1, #0 @ is second operand zero? 5439 beq common_errDivideByZero 5440 .endif 5441 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5442 5443 @ optional op; may set condition codes 5444 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5445 GET_INST_OPCODE(ip) @ extract opcode from rINST 5446 SET_VREG(r1, r9) @ vAA<- r1 5447 GOTO_OPCODE(ip) @ jump to next instruction 5448 /* 10-13 instructions */ 5449 5450 5451/* ------------------------------ */ 5452 .balign 64 5453.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5454/* File: armv5te/OP_AND_INT_2ADDR.S */ 5455/* File: armv5te/binop2addr.S */ 5456 /* 5457 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5458 * that specifies an instruction that performs "result = r0 op r1". 5459 * This could be an ARM instruction or a function call. (If the result 5460 * comes back in a register other than r0, you can override "result".) 5461 * 5462 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5463 * vCC (r1). Useful for integer division and modulus. 5464 * 5465 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5466 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5467 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5468 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5469 */ 5470 /* binop/2addr vA, vB */ 5471 mov r9, rINST, lsr #8 @ r9<- A+ 5472 mov r3, rINST, lsr #12 @ r3<- B 5473 and r9, r9, #15 5474 GET_VREG(r1, r3) @ r1<- vB 5475 GET_VREG(r0, r9) @ r0<- vA 5476 .if 0 5477 cmp r1, #0 @ is second operand zero? 5478 beq common_errDivideByZero 5479 .endif 5480 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5481 5482 @ optional op; may set condition codes 5483 and r0, r0, r1 @ r0<- op, r0-r3 changed 5484 GET_INST_OPCODE(ip) @ extract opcode from rINST 5485 SET_VREG(r0, r9) @ vAA<- r0 5486 GOTO_OPCODE(ip) @ jump to next instruction 5487 /* 10-13 instructions */ 5488 5489 5490/* ------------------------------ */ 5491 .balign 64 5492.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5493/* File: armv5te/OP_OR_INT_2ADDR.S */ 5494/* File: armv5te/binop2addr.S */ 5495 /* 5496 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5497 * that specifies an instruction that performs "result = r0 op r1". 5498 * This could be an ARM instruction or a function call. (If the result 5499 * comes back in a register other than r0, you can override "result".) 5500 * 5501 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5502 * vCC (r1). Useful for integer division and modulus. 5503 * 5504 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5505 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5506 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5507 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5508 */ 5509 /* binop/2addr vA, vB */ 5510 mov r9, rINST, lsr #8 @ r9<- A+ 5511 mov r3, rINST, lsr #12 @ r3<- B 5512 and r9, r9, #15 5513 GET_VREG(r1, r3) @ r1<- vB 5514 GET_VREG(r0, r9) @ r0<- vA 5515 .if 0 5516 cmp r1, #0 @ is second operand zero? 5517 beq common_errDivideByZero 5518 .endif 5519 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5520 5521 @ optional op; may set condition codes 5522 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5523 GET_INST_OPCODE(ip) @ extract opcode from rINST 5524 SET_VREG(r0, r9) @ vAA<- r0 5525 GOTO_OPCODE(ip) @ jump to next instruction 5526 /* 10-13 instructions */ 5527 5528 5529/* ------------------------------ */ 5530 .balign 64 5531.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5532/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5533/* File: armv5te/binop2addr.S */ 5534 /* 5535 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5536 * that specifies an instruction that performs "result = r0 op r1". 5537 * This could be an ARM instruction or a function call. (If the result 5538 * comes back in a register other than r0, you can override "result".) 5539 * 5540 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5541 * vCC (r1). Useful for integer division and modulus. 5542 * 5543 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5544 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5545 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5546 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5547 */ 5548 /* binop/2addr vA, vB */ 5549 mov r9, rINST, lsr #8 @ r9<- A+ 5550 mov r3, rINST, lsr #12 @ r3<- B 5551 and r9, r9, #15 5552 GET_VREG(r1, r3) @ r1<- vB 5553 GET_VREG(r0, r9) @ r0<- vA 5554 .if 0 5555 cmp r1, #0 @ is second operand zero? 5556 beq common_errDivideByZero 5557 .endif 5558 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5559 5560 @ optional op; may set condition codes 5561 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5562 GET_INST_OPCODE(ip) @ extract opcode from rINST 5563 SET_VREG(r0, r9) @ vAA<- r0 5564 GOTO_OPCODE(ip) @ jump to next instruction 5565 /* 10-13 instructions */ 5566 5567 5568/* ------------------------------ */ 5569 .balign 64 5570.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5571/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5572/* File: armv5te/binop2addr.S */ 5573 /* 5574 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5575 * that specifies an instruction that performs "result = r0 op r1". 5576 * This could be an ARM instruction or a function call. (If the result 5577 * comes back in a register other than r0, you can override "result".) 5578 * 5579 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5580 * vCC (r1). Useful for integer division and modulus. 5581 * 5582 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5583 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5584 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5585 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5586 */ 5587 /* binop/2addr vA, vB */ 5588 mov r9, rINST, lsr #8 @ r9<- A+ 5589 mov r3, rINST, lsr #12 @ r3<- B 5590 and r9, r9, #15 5591 GET_VREG(r1, r3) @ r1<- vB 5592 GET_VREG(r0, r9) @ r0<- vA 5593 .if 0 5594 cmp r1, #0 @ is second operand zero? 5595 beq common_errDivideByZero 5596 .endif 5597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5598 5599 and r1, r1, #31 @ optional op; may set condition codes 5600 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5601 GET_INST_OPCODE(ip) @ extract opcode from rINST 5602 SET_VREG(r0, r9) @ vAA<- r0 5603 GOTO_OPCODE(ip) @ jump to next instruction 5604 /* 10-13 instructions */ 5605 5606 5607/* ------------------------------ */ 5608 .balign 64 5609.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5610/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5611/* File: armv5te/binop2addr.S */ 5612 /* 5613 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5614 * that specifies an instruction that performs "result = r0 op r1". 5615 * This could be an ARM instruction or a function call. (If the result 5616 * comes back in a register other than r0, you can override "result".) 5617 * 5618 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5619 * vCC (r1). Useful for integer division and modulus. 5620 * 5621 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5622 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5623 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5624 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5625 */ 5626 /* binop/2addr vA, vB */ 5627 mov r9, rINST, lsr #8 @ r9<- A+ 5628 mov r3, rINST, lsr #12 @ r3<- B 5629 and r9, r9, #15 5630 GET_VREG(r1, r3) @ r1<- vB 5631 GET_VREG(r0, r9) @ r0<- vA 5632 .if 0 5633 cmp r1, #0 @ is second operand zero? 5634 beq common_errDivideByZero 5635 .endif 5636 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5637 5638 and r1, r1, #31 @ optional op; may set condition codes 5639 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5640 GET_INST_OPCODE(ip) @ extract opcode from rINST 5641 SET_VREG(r0, r9) @ vAA<- r0 5642 GOTO_OPCODE(ip) @ jump to next instruction 5643 /* 10-13 instructions */ 5644 5645 5646/* ------------------------------ */ 5647 .balign 64 5648.L_OP_USHR_INT_2ADDR: /* 0xba */ 5649/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5650/* File: armv5te/binop2addr.S */ 5651 /* 5652 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5653 * that specifies an instruction that performs "result = r0 op r1". 5654 * This could be an ARM instruction or a function call. (If the result 5655 * comes back in a register other than r0, you can override "result".) 5656 * 5657 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5658 * vCC (r1). Useful for integer division and modulus. 5659 * 5660 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5661 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5662 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5663 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5664 */ 5665 /* binop/2addr vA, vB */ 5666 mov r9, rINST, lsr #8 @ r9<- A+ 5667 mov r3, rINST, lsr #12 @ r3<- B 5668 and r9, r9, #15 5669 GET_VREG(r1, r3) @ r1<- vB 5670 GET_VREG(r0, r9) @ r0<- vA 5671 .if 0 5672 cmp r1, #0 @ is second operand zero? 5673 beq common_errDivideByZero 5674 .endif 5675 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5676 5677 and r1, r1, #31 @ optional op; may set condition codes 5678 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5679 GET_INST_OPCODE(ip) @ extract opcode from rINST 5680 SET_VREG(r0, r9) @ vAA<- r0 5681 GOTO_OPCODE(ip) @ jump to next instruction 5682 /* 10-13 instructions */ 5683 5684 5685/* ------------------------------ */ 5686 .balign 64 5687.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5688/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5689/* File: armv5te/binopWide2addr.S */ 5690 /* 5691 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5692 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5693 * This could be an ARM instruction or a function call. (If the result 5694 * comes back in a register other than r0, you can override "result".) 5695 * 5696 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5697 * vCC (r1). Useful for integer division and modulus. 5698 * 5699 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5700 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5701 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5702 * rem-double/2addr 5703 */ 5704 /* binop/2addr vA, vB */ 5705 mov r9, rINST, lsr #8 @ r9<- A+ 5706 mov r1, rINST, lsr #12 @ r1<- B 5707 and r9, r9, #15 5708 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5709 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5710 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5711 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5712 .if 0 5713 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5714 beq common_errDivideByZero 5715 .endif 5716 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5717 5718 adds r0, r0, r2 @ optional op; may set condition codes 5719 adc r1, r1, r3 @ result<- op, r0-r3 changed 5720 GET_INST_OPCODE(ip) @ extract opcode from rINST 5721 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5722 GOTO_OPCODE(ip) @ jump to next instruction 5723 /* 12-15 instructions */ 5724 5725 5726/* ------------------------------ */ 5727 .balign 64 5728.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5729/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5730/* File: armv5te/binopWide2addr.S */ 5731 /* 5732 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5733 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5734 * This could be an ARM instruction or a function call. (If the result 5735 * comes back in a register other than r0, you can override "result".) 5736 * 5737 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5738 * vCC (r1). Useful for integer division and modulus. 5739 * 5740 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5741 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5742 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5743 * rem-double/2addr 5744 */ 5745 /* binop/2addr vA, vB */ 5746 mov r9, rINST, lsr #8 @ r9<- A+ 5747 mov r1, rINST, lsr #12 @ r1<- B 5748 and r9, r9, #15 5749 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5750 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5751 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5752 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5753 .if 0 5754 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5755 beq common_errDivideByZero 5756 .endif 5757 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5758 5759 subs r0, r0, r2 @ optional op; may set condition codes 5760 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5761 GET_INST_OPCODE(ip) @ extract opcode from rINST 5762 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5763 GOTO_OPCODE(ip) @ jump to next instruction 5764 /* 12-15 instructions */ 5765 5766 5767/* ------------------------------ */ 5768 .balign 64 5769.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5770/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 5771 /* 5772 * Signed 64-bit integer multiply, "/2addr" version. 5773 * 5774 * See OP_MUL_LONG for an explanation. 5775 * 5776 * We get a little tight on registers, so to avoid looking up &fp[A] 5777 * again we stuff it into rINST. 5778 */ 5779 /* mul-long/2addr vA, vB */ 5780 mov r9, rINST, lsr #8 @ r9<- A+ 5781 mov r1, rINST, lsr #12 @ r1<- B 5782 and r9, r9, #15 5783 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5784 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5785 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5786 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5787 mul ip, r2, r1 @ ip<- ZxW 5788 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5789 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5790 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5791 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5792 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5793 GET_INST_OPCODE(ip) @ extract opcode from rINST 5794 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5795 GOTO_OPCODE(ip) @ jump to next instruction 5796 5797/* ------------------------------ */ 5798 .balign 64 5799.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5800/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 5801/* File: armv5te/binopWide2addr.S */ 5802 /* 5803 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5804 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5805 * This could be an ARM instruction or a function call. (If the result 5806 * comes back in a register other than r0, you can override "result".) 5807 * 5808 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5809 * vCC (r1). Useful for integer division and modulus. 5810 * 5811 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5812 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5813 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5814 * rem-double/2addr 5815 */ 5816 /* binop/2addr vA, vB */ 5817 mov r9, rINST, lsr #8 @ r9<- A+ 5818 mov r1, rINST, lsr #12 @ r1<- B 5819 and r9, r9, #15 5820 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5821 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5822 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5823 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5824 .if 1 5825 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5826 beq common_errDivideByZero 5827 .endif 5828 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5829 5830 @ optional op; may set condition codes 5831 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5832 GET_INST_OPCODE(ip) @ extract opcode from rINST 5833 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5834 GOTO_OPCODE(ip) @ jump to next instruction 5835 /* 12-15 instructions */ 5836 5837 5838/* ------------------------------ */ 5839 .balign 64 5840.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5841/* File: armv5te/OP_REM_LONG_2ADDR.S */ 5842/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5843/* File: armv5te/binopWide2addr.S */ 5844 /* 5845 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5846 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5847 * This could be an ARM instruction or a function call. (If the result 5848 * comes back in a register other than r0, you can override "result".) 5849 * 5850 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5851 * vCC (r1). Useful for integer division and modulus. 5852 * 5853 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5854 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5855 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5856 * rem-double/2addr 5857 */ 5858 /* binop/2addr vA, vB */ 5859 mov r9, rINST, lsr #8 @ r9<- A+ 5860 mov r1, rINST, lsr #12 @ r1<- B 5861 and r9, r9, #15 5862 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5863 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5864 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5865 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5866 .if 1 5867 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5868 beq common_errDivideByZero 5869 .endif 5870 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5871 5872 @ optional op; may set condition codes 5873 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5874 GET_INST_OPCODE(ip) @ extract opcode from rINST 5875 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5876 GOTO_OPCODE(ip) @ jump to next instruction 5877 /* 12-15 instructions */ 5878 5879 5880/* ------------------------------ */ 5881 .balign 64 5882.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5883/* File: armv5te/OP_AND_LONG_2ADDR.S */ 5884/* File: armv5te/binopWide2addr.S */ 5885 /* 5886 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5887 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5888 * This could be an ARM instruction or a function call. (If the result 5889 * comes back in a register other than r0, you can override "result".) 5890 * 5891 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5892 * vCC (r1). Useful for integer division and modulus. 5893 * 5894 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5895 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5896 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5897 * rem-double/2addr 5898 */ 5899 /* binop/2addr vA, vB */ 5900 mov r9, rINST, lsr #8 @ r9<- A+ 5901 mov r1, rINST, lsr #12 @ r1<- B 5902 and r9, r9, #15 5903 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5904 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5905 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5906 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5907 .if 0 5908 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5909 beq common_errDivideByZero 5910 .endif 5911 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5912 5913 and r0, r0, r2 @ optional op; may set condition codes 5914 and r1, r1, r3 @ result<- op, r0-r3 changed 5915 GET_INST_OPCODE(ip) @ extract opcode from rINST 5916 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5917 GOTO_OPCODE(ip) @ jump to next instruction 5918 /* 12-15 instructions */ 5919 5920 5921/* ------------------------------ */ 5922 .balign 64 5923.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5924/* File: armv5te/OP_OR_LONG_2ADDR.S */ 5925/* File: armv5te/binopWide2addr.S */ 5926 /* 5927 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5928 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5929 * This could be an ARM instruction or a function call. (If the result 5930 * comes back in a register other than r0, you can override "result".) 5931 * 5932 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5933 * vCC (r1). Useful for integer division and modulus. 5934 * 5935 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5936 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5937 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5938 * rem-double/2addr 5939 */ 5940 /* binop/2addr vA, vB */ 5941 mov r9, rINST, lsr #8 @ r9<- A+ 5942 mov r1, rINST, lsr #12 @ r1<- B 5943 and r9, r9, #15 5944 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5945 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5946 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5947 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5948 .if 0 5949 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5950 beq common_errDivideByZero 5951 .endif 5952 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5953 5954 orr r0, r0, r2 @ optional op; may set condition codes 5955 orr r1, r1, r3 @ result<- op, r0-r3 changed 5956 GET_INST_OPCODE(ip) @ extract opcode from rINST 5957 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5958 GOTO_OPCODE(ip) @ jump to next instruction 5959 /* 12-15 instructions */ 5960 5961 5962/* ------------------------------ */ 5963 .balign 64 5964.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 5965/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 5966/* File: armv5te/binopWide2addr.S */ 5967 /* 5968 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5969 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5970 * This could be an ARM instruction or a function call. (If the result 5971 * comes back in a register other than r0, you can override "result".) 5972 * 5973 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5974 * vCC (r1). Useful for integer division and modulus. 5975 * 5976 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5977 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5978 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5979 * rem-double/2addr 5980 */ 5981 /* binop/2addr vA, vB */ 5982 mov r9, rINST, lsr #8 @ r9<- A+ 5983 mov r1, rINST, lsr #12 @ r1<- B 5984 and r9, r9, #15 5985 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5986 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5987 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5988 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5989 .if 0 5990 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5991 beq common_errDivideByZero 5992 .endif 5993 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5994 5995 eor r0, r0, r2 @ optional op; may set condition codes 5996 eor r1, r1, r3 @ result<- op, r0-r3 changed 5997 GET_INST_OPCODE(ip) @ extract opcode from rINST 5998 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5999 GOTO_OPCODE(ip) @ jump to next instruction 6000 /* 12-15 instructions */ 6001 6002 6003/* ------------------------------ */ 6004 .balign 64 6005.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6006/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6007 /* 6008 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6009 * 32-bit shift distance. 6010 */ 6011 /* shl-long/2addr vA, vB */ 6012 mov r9, rINST, lsr #8 @ r9<- A+ 6013 mov r3, rINST, lsr #12 @ r3<- B 6014 and r9, r9, #15 6015 GET_VREG(r2, r3) @ r2<- vB 6016 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6017 and r2, r2, #63 @ r2<- r2 & 0x3f 6018 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6019 6020 mov r1, r1, asl r2 @ r1<- r1 << r2 6021 rsb r3, r2, #32 @ r3<- 32 - r2 6022 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6023 subs ip, r2, #32 @ ip<- r2 - 32 6024 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6025 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6026 mov r0, r0, asl r2 @ r0<- r0 << r2 6027 b .LOP_SHL_LONG_2ADDR_finish 6028 6029/* ------------------------------ */ 6030 .balign 64 6031.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6032/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6033 /* 6034 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6035 * 32-bit shift distance. 6036 */ 6037 /* shr-long/2addr vA, vB */ 6038 mov r9, rINST, lsr #8 @ r9<- A+ 6039 mov r3, rINST, lsr #12 @ r3<- B 6040 and r9, r9, #15 6041 GET_VREG(r2, r3) @ r2<- vB 6042 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6043 and r2, r2, #63 @ r2<- r2 & 0x3f 6044 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6045 6046 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6047 rsb r3, r2, #32 @ r3<- 32 - r2 6048 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6049 subs ip, r2, #32 @ ip<- r2 - 32 6050 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6051 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6052 mov r1, r1, asr r2 @ r1<- r1 >> r2 6053 b .LOP_SHR_LONG_2ADDR_finish 6054 6055/* ------------------------------ */ 6056 .balign 64 6057.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6058/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6059 /* 6060 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6061 * 32-bit shift distance. 6062 */ 6063 /* ushr-long/2addr vA, vB */ 6064 mov r9, rINST, lsr #8 @ r9<- A+ 6065 mov r3, rINST, lsr #12 @ r3<- B 6066 and r9, r9, #15 6067 GET_VREG(r2, r3) @ r2<- vB 6068 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6069 and r2, r2, #63 @ r2<- r2 & 0x3f 6070 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6071 6072 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6073 rsb r3, r2, #32 @ r3<- 32 - r2 6074 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6075 subs ip, r2, #32 @ ip<- r2 - 32 6076 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6077 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6078 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6079 b .LOP_USHR_LONG_2ADDR_finish 6080 6081/* ------------------------------ */ 6082 .balign 64 6083.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6084/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6085/* File: arm-vfp/fbinop2addr.S */ 6086 /* 6087 * Generic 32-bit floating point "/2addr" binary operation. Provide 6088 * an "instr" line that specifies an instruction that performs 6089 * "s2 = s0 op s1". 6090 * 6091 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6092 */ 6093 /* binop/2addr vA, vB */ 6094 mov r3, rINST, lsr #12 @ r3<- B 6095 mov r9, rINST, lsr #8 @ r9<- A+ 6096 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6097 and r9, r9, #15 @ r9<- A 6098 flds s1, [r3] @ s1<- vB 6099 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6100 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6101 flds s0, [r9] @ s0<- vA 6102 6103 fadds s2, s0, s1 @ s2<- op 6104 GET_INST_OPCODE(ip) @ extract opcode from rINST 6105 fsts s2, [r9] @ vAA<- s2 6106 GOTO_OPCODE(ip) @ jump to next instruction 6107 6108 6109/* ------------------------------ */ 6110 .balign 64 6111.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6112/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6113/* File: arm-vfp/fbinop2addr.S */ 6114 /* 6115 * Generic 32-bit floating point "/2addr" binary operation. Provide 6116 * an "instr" line that specifies an instruction that performs 6117 * "s2 = s0 op s1". 6118 * 6119 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6120 */ 6121 /* binop/2addr vA, vB */ 6122 mov r3, rINST, lsr #12 @ r3<- B 6123 mov r9, rINST, lsr #8 @ r9<- A+ 6124 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6125 and r9, r9, #15 @ r9<- A 6126 flds s1, [r3] @ s1<- vB 6127 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6128 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6129 flds s0, [r9] @ s0<- vA 6130 6131 fsubs s2, s0, s1 @ s2<- op 6132 GET_INST_OPCODE(ip) @ extract opcode from rINST 6133 fsts s2, [r9] @ vAA<- s2 6134 GOTO_OPCODE(ip) @ jump to next instruction 6135 6136 6137/* ------------------------------ */ 6138 .balign 64 6139.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6140/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6141/* File: arm-vfp/fbinop2addr.S */ 6142 /* 6143 * Generic 32-bit floating point "/2addr" binary operation. Provide 6144 * an "instr" line that specifies an instruction that performs 6145 * "s2 = s0 op s1". 6146 * 6147 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6148 */ 6149 /* binop/2addr vA, vB */ 6150 mov r3, rINST, lsr #12 @ r3<- B 6151 mov r9, rINST, lsr #8 @ r9<- A+ 6152 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6153 and r9, r9, #15 @ r9<- A 6154 flds s1, [r3] @ s1<- vB 6155 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6156 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6157 flds s0, [r9] @ s0<- vA 6158 6159 fmuls s2, s0, s1 @ s2<- op 6160 GET_INST_OPCODE(ip) @ extract opcode from rINST 6161 fsts s2, [r9] @ vAA<- s2 6162 GOTO_OPCODE(ip) @ jump to next instruction 6163 6164 6165/* ------------------------------ */ 6166 .balign 64 6167.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6168/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6169/* File: arm-vfp/fbinop2addr.S */ 6170 /* 6171 * Generic 32-bit floating point "/2addr" binary operation. Provide 6172 * an "instr" line that specifies an instruction that performs 6173 * "s2 = s0 op s1". 6174 * 6175 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6176 */ 6177 /* binop/2addr vA, vB */ 6178 mov r3, rINST, lsr #12 @ r3<- B 6179 mov r9, rINST, lsr #8 @ r9<- A+ 6180 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6181 and r9, r9, #15 @ r9<- A 6182 flds s1, [r3] @ s1<- vB 6183 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6184 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6185 flds s0, [r9] @ s0<- vA 6186 6187 fdivs s2, s0, s1 @ s2<- op 6188 GET_INST_OPCODE(ip) @ extract opcode from rINST 6189 fsts s2, [r9] @ vAA<- s2 6190 GOTO_OPCODE(ip) @ jump to next instruction 6191 6192 6193/* ------------------------------ */ 6194 .balign 64 6195.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6196/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6197/* EABI doesn't define a float remainder function, but libm does */ 6198/* File: armv5te/binop2addr.S */ 6199 /* 6200 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6201 * that specifies an instruction that performs "result = r0 op r1". 6202 * This could be an ARM instruction or a function call. (If the result 6203 * comes back in a register other than r0, you can override "result".) 6204 * 6205 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6206 * vCC (r1). Useful for integer division and modulus. 6207 * 6208 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6209 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6210 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6211 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6212 */ 6213 /* binop/2addr vA, vB */ 6214 mov r9, rINST, lsr #8 @ r9<- A+ 6215 mov r3, rINST, lsr #12 @ r3<- B 6216 and r9, r9, #15 6217 GET_VREG(r1, r3) @ r1<- vB 6218 GET_VREG(r0, r9) @ r0<- vA 6219 .if 0 6220 cmp r1, #0 @ is second operand zero? 6221 beq common_errDivideByZero 6222 .endif 6223 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6224 6225 @ optional op; may set condition codes 6226 bl fmodf @ r0<- op, r0-r3 changed 6227 GET_INST_OPCODE(ip) @ extract opcode from rINST 6228 SET_VREG(r0, r9) @ vAA<- r0 6229 GOTO_OPCODE(ip) @ jump to next instruction 6230 /* 10-13 instructions */ 6231 6232 6233/* ------------------------------ */ 6234 .balign 64 6235.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6236/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6237/* File: arm-vfp/fbinopWide2addr.S */ 6238 /* 6239 * Generic 64-bit floating point "/2addr" binary operation. Provide 6240 * an "instr" line that specifies an instruction that performs 6241 * "d2 = d0 op d1". 6242 * 6243 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6244 * div-double/2addr 6245 */ 6246 /* binop/2addr vA, vB */ 6247 mov r3, rINST, lsr #12 @ r3<- B 6248 mov r9, rINST, lsr #8 @ r9<- A+ 6249 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6250 and r9, r9, #15 @ r9<- A 6251 fldd d1, [r3] @ d1<- vB 6252 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6253 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6254 fldd d0, [r9] @ d0<- vA 6255 6256 faddd d2, d0, d1 @ d2<- op 6257 GET_INST_OPCODE(ip) @ extract opcode from rINST 6258 fstd d2, [r9] @ vAA<- d2 6259 GOTO_OPCODE(ip) @ jump to next instruction 6260 6261 6262/* ------------------------------ */ 6263 .balign 64 6264.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6265/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6266/* File: arm-vfp/fbinopWide2addr.S */ 6267 /* 6268 * Generic 64-bit floating point "/2addr" binary operation. Provide 6269 * an "instr" line that specifies an instruction that performs 6270 * "d2 = d0 op d1". 6271 * 6272 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6273 * div-double/2addr 6274 */ 6275 /* binop/2addr vA, vB */ 6276 mov r3, rINST, lsr #12 @ r3<- B 6277 mov r9, rINST, lsr #8 @ r9<- A+ 6278 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6279 and r9, r9, #15 @ r9<- A 6280 fldd d1, [r3] @ d1<- vB 6281 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6282 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6283 fldd d0, [r9] @ d0<- vA 6284 6285 fsubd d2, d0, d1 @ d2<- op 6286 GET_INST_OPCODE(ip) @ extract opcode from rINST 6287 fstd d2, [r9] @ vAA<- d2 6288 GOTO_OPCODE(ip) @ jump to next instruction 6289 6290 6291/* ------------------------------ */ 6292 .balign 64 6293.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6294/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6295/* File: arm-vfp/fbinopWide2addr.S */ 6296 /* 6297 * Generic 64-bit floating point "/2addr" binary operation. Provide 6298 * an "instr" line that specifies an instruction that performs 6299 * "d2 = d0 op d1". 6300 * 6301 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6302 * div-double/2addr 6303 */ 6304 /* binop/2addr vA, vB */ 6305 mov r3, rINST, lsr #12 @ r3<- B 6306 mov r9, rINST, lsr #8 @ r9<- A+ 6307 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6308 and r9, r9, #15 @ r9<- A 6309 fldd d1, [r3] @ d1<- vB 6310 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6311 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6312 fldd d0, [r9] @ d0<- vA 6313 6314 fmuld d2, d0, d1 @ d2<- op 6315 GET_INST_OPCODE(ip) @ extract opcode from rINST 6316 fstd d2, [r9] @ vAA<- d2 6317 GOTO_OPCODE(ip) @ jump to next instruction 6318 6319 6320/* ------------------------------ */ 6321 .balign 64 6322.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6323/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6324/* File: arm-vfp/fbinopWide2addr.S */ 6325 /* 6326 * Generic 64-bit floating point "/2addr" binary operation. Provide 6327 * an "instr" line that specifies an instruction that performs 6328 * "d2 = d0 op d1". 6329 * 6330 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6331 * div-double/2addr 6332 */ 6333 /* binop/2addr vA, vB */ 6334 mov r3, rINST, lsr #12 @ r3<- B 6335 mov r9, rINST, lsr #8 @ r9<- A+ 6336 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6337 and r9, r9, #15 @ r9<- A 6338 fldd d1, [r3] @ d1<- vB 6339 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6340 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6341 fldd d0, [r9] @ d0<- vA 6342 6343 fdivd d2, d0, d1 @ d2<- op 6344 GET_INST_OPCODE(ip) @ extract opcode from rINST 6345 fstd d2, [r9] @ vAA<- d2 6346 GOTO_OPCODE(ip) @ jump to next instruction 6347 6348 6349/* ------------------------------ */ 6350 .balign 64 6351.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6352/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6353/* EABI doesn't define a double remainder function, but libm does */ 6354/* File: armv5te/binopWide2addr.S */ 6355 /* 6356 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6357 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6358 * This could be an ARM instruction or a function call. (If the result 6359 * comes back in a register other than r0, you can override "result".) 6360 * 6361 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6362 * vCC (r1). Useful for integer division and modulus. 6363 * 6364 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6365 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6366 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6367 * rem-double/2addr 6368 */ 6369 /* binop/2addr vA, vB */ 6370 mov r9, rINST, lsr #8 @ r9<- A+ 6371 mov r1, rINST, lsr #12 @ r1<- B 6372 and r9, r9, #15 6373 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6374 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6375 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6376 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6377 .if 0 6378 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6379 beq common_errDivideByZero 6380 .endif 6381 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6382 6383 @ optional op; may set condition codes 6384 bl fmod @ result<- op, r0-r3 changed 6385 GET_INST_OPCODE(ip) @ extract opcode from rINST 6386 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6387 GOTO_OPCODE(ip) @ jump to next instruction 6388 /* 12-15 instructions */ 6389 6390 6391/* ------------------------------ */ 6392 .balign 64 6393.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6394/* File: armv5te/OP_ADD_INT_LIT16.S */ 6395/* File: armv5te/binopLit16.S */ 6396 /* 6397 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6398 * that specifies an instruction that performs "result = r0 op r1". 6399 * This could be an ARM instruction or a function call. (If the result 6400 * comes back in a register other than r0, you can override "result".) 6401 * 6402 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6403 * vCC (r1). Useful for integer division and modulus. 6404 * 6405 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6406 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6407 */ 6408 /* binop/lit16 vA, vB, #+CCCC */ 6409 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6410 mov r2, rINST, lsr #12 @ r2<- B 6411 mov r9, rINST, lsr #8 @ r9<- A+ 6412 GET_VREG(r0, r2) @ r0<- vB 6413 and r9, r9, #15 6414 .if 0 6415 cmp r1, #0 @ is second operand zero? 6416 beq common_errDivideByZero 6417 .endif 6418 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6419 6420 add r0, r0, r1 @ r0<- op, r0-r3 changed 6421 GET_INST_OPCODE(ip) @ extract opcode from rINST 6422 SET_VREG(r0, r9) @ vAA<- r0 6423 GOTO_OPCODE(ip) @ jump to next instruction 6424 /* 10-13 instructions */ 6425 6426 6427/* ------------------------------ */ 6428 .balign 64 6429.L_OP_RSUB_INT: /* 0xd1 */ 6430/* File: armv5te/OP_RSUB_INT.S */ 6431/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6432/* File: armv5te/binopLit16.S */ 6433 /* 6434 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6435 * that specifies an instruction that performs "result = r0 op r1". 6436 * This could be an ARM instruction or a function call. (If the result 6437 * comes back in a register other than r0, you can override "result".) 6438 * 6439 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6440 * vCC (r1). Useful for integer division and modulus. 6441 * 6442 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6443 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6444 */ 6445 /* binop/lit16 vA, vB, #+CCCC */ 6446 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6447 mov r2, rINST, lsr #12 @ r2<- B 6448 mov r9, rINST, lsr #8 @ r9<- A+ 6449 GET_VREG(r0, r2) @ r0<- vB 6450 and r9, r9, #15 6451 .if 0 6452 cmp r1, #0 @ is second operand zero? 6453 beq common_errDivideByZero 6454 .endif 6455 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6456 6457 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6458 GET_INST_OPCODE(ip) @ extract opcode from rINST 6459 SET_VREG(r0, r9) @ vAA<- r0 6460 GOTO_OPCODE(ip) @ jump to next instruction 6461 /* 10-13 instructions */ 6462 6463 6464/* ------------------------------ */ 6465 .balign 64 6466.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6467/* File: armv5te/OP_MUL_INT_LIT16.S */ 6468/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6469/* File: armv5te/binopLit16.S */ 6470 /* 6471 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6472 * that specifies an instruction that performs "result = r0 op r1". 6473 * This could be an ARM instruction or a function call. (If the result 6474 * comes back in a register other than r0, you can override "result".) 6475 * 6476 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6477 * vCC (r1). Useful for integer division and modulus. 6478 * 6479 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6480 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6481 */ 6482 /* binop/lit16 vA, vB, #+CCCC */ 6483 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6484 mov r2, rINST, lsr #12 @ r2<- B 6485 mov r9, rINST, lsr #8 @ r9<- A+ 6486 GET_VREG(r0, r2) @ r0<- vB 6487 and r9, r9, #15 6488 .if 0 6489 cmp r1, #0 @ is second operand zero? 6490 beq common_errDivideByZero 6491 .endif 6492 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6493 6494 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6495 GET_INST_OPCODE(ip) @ extract opcode from rINST 6496 SET_VREG(r0, r9) @ vAA<- r0 6497 GOTO_OPCODE(ip) @ jump to next instruction 6498 /* 10-13 instructions */ 6499 6500 6501/* ------------------------------ */ 6502 .balign 64 6503.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6504/* File: armv5te/OP_DIV_INT_LIT16.S */ 6505/* File: armv5te/binopLit16.S */ 6506 /* 6507 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6508 * that specifies an instruction that performs "result = r0 op r1". 6509 * This could be an ARM instruction or a function call. (If the result 6510 * comes back in a register other than r0, you can override "result".) 6511 * 6512 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6513 * vCC (r1). Useful for integer division and modulus. 6514 * 6515 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6516 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6517 */ 6518 /* binop/lit16 vA, vB, #+CCCC */ 6519 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6520 mov r2, rINST, lsr #12 @ r2<- B 6521 mov r9, rINST, lsr #8 @ r9<- A+ 6522 GET_VREG(r0, r2) @ r0<- vB 6523 and r9, r9, #15 6524 .if 1 6525 cmp r1, #0 @ is second operand zero? 6526 beq common_errDivideByZero 6527 .endif 6528 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6529 6530 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6531 GET_INST_OPCODE(ip) @ extract opcode from rINST 6532 SET_VREG(r0, r9) @ vAA<- r0 6533 GOTO_OPCODE(ip) @ jump to next instruction 6534 /* 10-13 instructions */ 6535 6536 6537/* ------------------------------ */ 6538 .balign 64 6539.L_OP_REM_INT_LIT16: /* 0xd4 */ 6540/* File: armv5te/OP_REM_INT_LIT16.S */ 6541/* idivmod returns quotient in r0 and remainder in r1 */ 6542/* File: armv5te/binopLit16.S */ 6543 /* 6544 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6545 * that specifies an instruction that performs "result = r0 op r1". 6546 * This could be an ARM instruction or a function call. (If the result 6547 * comes back in a register other than r0, you can override "result".) 6548 * 6549 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6550 * vCC (r1). Useful for integer division and modulus. 6551 * 6552 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6553 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6554 */ 6555 /* binop/lit16 vA, vB, #+CCCC */ 6556 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6557 mov r2, rINST, lsr #12 @ r2<- B 6558 mov r9, rINST, lsr #8 @ r9<- A+ 6559 GET_VREG(r0, r2) @ r0<- vB 6560 and r9, r9, #15 6561 .if 1 6562 cmp r1, #0 @ is second operand zero? 6563 beq common_errDivideByZero 6564 .endif 6565 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6566 6567 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6568 GET_INST_OPCODE(ip) @ extract opcode from rINST 6569 SET_VREG(r1, r9) @ vAA<- r1 6570 GOTO_OPCODE(ip) @ jump to next instruction 6571 /* 10-13 instructions */ 6572 6573 6574/* ------------------------------ */ 6575 .balign 64 6576.L_OP_AND_INT_LIT16: /* 0xd5 */ 6577/* File: armv5te/OP_AND_INT_LIT16.S */ 6578/* File: armv5te/binopLit16.S */ 6579 /* 6580 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6581 * that specifies an instruction that performs "result = r0 op r1". 6582 * This could be an ARM instruction or a function call. (If the result 6583 * comes back in a register other than r0, you can override "result".) 6584 * 6585 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6586 * vCC (r1). Useful for integer division and modulus. 6587 * 6588 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6589 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6590 */ 6591 /* binop/lit16 vA, vB, #+CCCC */ 6592 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6593 mov r2, rINST, lsr #12 @ r2<- B 6594 mov r9, rINST, lsr #8 @ r9<- A+ 6595 GET_VREG(r0, r2) @ r0<- vB 6596 and r9, r9, #15 6597 .if 0 6598 cmp r1, #0 @ is second operand zero? 6599 beq common_errDivideByZero 6600 .endif 6601 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6602 6603 and r0, r0, r1 @ r0<- op, r0-r3 changed 6604 GET_INST_OPCODE(ip) @ extract opcode from rINST 6605 SET_VREG(r0, r9) @ vAA<- r0 6606 GOTO_OPCODE(ip) @ jump to next instruction 6607 /* 10-13 instructions */ 6608 6609 6610/* ------------------------------ */ 6611 .balign 64 6612.L_OP_OR_INT_LIT16: /* 0xd6 */ 6613/* File: armv5te/OP_OR_INT_LIT16.S */ 6614/* File: armv5te/binopLit16.S */ 6615 /* 6616 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6617 * that specifies an instruction that performs "result = r0 op r1". 6618 * This could be an ARM instruction or a function call. (If the result 6619 * comes back in a register other than r0, you can override "result".) 6620 * 6621 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6622 * vCC (r1). Useful for integer division and modulus. 6623 * 6624 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6625 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6626 */ 6627 /* binop/lit16 vA, vB, #+CCCC */ 6628 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6629 mov r2, rINST, lsr #12 @ r2<- B 6630 mov r9, rINST, lsr #8 @ r9<- A+ 6631 GET_VREG(r0, r2) @ r0<- vB 6632 and r9, r9, #15 6633 .if 0 6634 cmp r1, #0 @ is second operand zero? 6635 beq common_errDivideByZero 6636 .endif 6637 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6638 6639 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6640 GET_INST_OPCODE(ip) @ extract opcode from rINST 6641 SET_VREG(r0, r9) @ vAA<- r0 6642 GOTO_OPCODE(ip) @ jump to next instruction 6643 /* 10-13 instructions */ 6644 6645 6646/* ------------------------------ */ 6647 .balign 64 6648.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6649/* File: armv5te/OP_XOR_INT_LIT16.S */ 6650/* File: armv5te/binopLit16.S */ 6651 /* 6652 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6653 * that specifies an instruction that performs "result = r0 op r1". 6654 * This could be an ARM instruction or a function call. (If the result 6655 * comes back in a register other than r0, you can override "result".) 6656 * 6657 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6658 * vCC (r1). Useful for integer division and modulus. 6659 * 6660 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6661 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6662 */ 6663 /* binop/lit16 vA, vB, #+CCCC */ 6664 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6665 mov r2, rINST, lsr #12 @ r2<- B 6666 mov r9, rINST, lsr #8 @ r9<- A+ 6667 GET_VREG(r0, r2) @ r0<- vB 6668 and r9, r9, #15 6669 .if 0 6670 cmp r1, #0 @ is second operand zero? 6671 beq common_errDivideByZero 6672 .endif 6673 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6674 6675 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6676 GET_INST_OPCODE(ip) @ extract opcode from rINST 6677 SET_VREG(r0, r9) @ vAA<- r0 6678 GOTO_OPCODE(ip) @ jump to next instruction 6679 /* 10-13 instructions */ 6680 6681 6682/* ------------------------------ */ 6683 .balign 64 6684.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6685/* File: armv5te/OP_ADD_INT_LIT8.S */ 6686/* File: armv5te/binopLit8.S */ 6687 /* 6688 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6689 * that specifies an instruction that performs "result = r0 op r1". 6690 * This could be an ARM instruction or a function call. (If the result 6691 * comes back in a register other than r0, you can override "result".) 6692 * 6693 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6694 * vCC (r1). Useful for integer division and modulus. 6695 * 6696 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6697 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6698 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6699 */ 6700 /* binop/lit8 vAA, vBB, #+CC */ 6701 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6702 mov r9, rINST, lsr #8 @ r9<- AA 6703 and r2, r3, #255 @ r2<- BB 6704 GET_VREG(r0, r2) @ r0<- vBB 6705 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6706 .if 0 6707 @cmp r1, #0 @ is second operand zero? 6708 beq common_errDivideByZero 6709 .endif 6710 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6711 6712 @ optional op; may set condition codes 6713 add r0, r0, r1 @ r0<- op, r0-r3 changed 6714 GET_INST_OPCODE(ip) @ extract opcode from rINST 6715 SET_VREG(r0, r9) @ vAA<- r0 6716 GOTO_OPCODE(ip) @ jump to next instruction 6717 /* 10-12 instructions */ 6718 6719 6720/* ------------------------------ */ 6721 .balign 64 6722.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6723/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6724/* File: armv5te/binopLit8.S */ 6725 /* 6726 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6727 * that specifies an instruction that performs "result = r0 op r1". 6728 * This could be an ARM instruction or a function call. (If the result 6729 * comes back in a register other than r0, you can override "result".) 6730 * 6731 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6732 * vCC (r1). Useful for integer division and modulus. 6733 * 6734 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6735 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6736 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6737 */ 6738 /* binop/lit8 vAA, vBB, #+CC */ 6739 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6740 mov r9, rINST, lsr #8 @ r9<- AA 6741 and r2, r3, #255 @ r2<- BB 6742 GET_VREG(r0, r2) @ r0<- vBB 6743 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6744 .if 0 6745 @cmp r1, #0 @ is second operand zero? 6746 beq common_errDivideByZero 6747 .endif 6748 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6749 6750 @ optional op; may set condition codes 6751 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6752 GET_INST_OPCODE(ip) @ extract opcode from rINST 6753 SET_VREG(r0, r9) @ vAA<- r0 6754 GOTO_OPCODE(ip) @ jump to next instruction 6755 /* 10-12 instructions */ 6756 6757 6758/* ------------------------------ */ 6759 .balign 64 6760.L_OP_MUL_INT_LIT8: /* 0xda */ 6761/* File: armv5te/OP_MUL_INT_LIT8.S */ 6762/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6763/* File: armv5te/binopLit8.S */ 6764 /* 6765 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6766 * that specifies an instruction that performs "result = r0 op r1". 6767 * This could be an ARM instruction or a function call. (If the result 6768 * comes back in a register other than r0, you can override "result".) 6769 * 6770 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6771 * vCC (r1). Useful for integer division and modulus. 6772 * 6773 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6774 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6775 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6776 */ 6777 /* binop/lit8 vAA, vBB, #+CC */ 6778 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6779 mov r9, rINST, lsr #8 @ r9<- AA 6780 and r2, r3, #255 @ r2<- BB 6781 GET_VREG(r0, r2) @ r0<- vBB 6782 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6783 .if 0 6784 @cmp r1, #0 @ is second operand zero? 6785 beq common_errDivideByZero 6786 .endif 6787 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6788 6789 @ optional op; may set condition codes 6790 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6791 GET_INST_OPCODE(ip) @ extract opcode from rINST 6792 SET_VREG(r0, r9) @ vAA<- r0 6793 GOTO_OPCODE(ip) @ jump to next instruction 6794 /* 10-12 instructions */ 6795 6796 6797/* ------------------------------ */ 6798 .balign 64 6799.L_OP_DIV_INT_LIT8: /* 0xdb */ 6800/* File: armv5te/OP_DIV_INT_LIT8.S */ 6801/* File: armv5te/binopLit8.S */ 6802 /* 6803 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6804 * that specifies an instruction that performs "result = r0 op r1". 6805 * This could be an ARM instruction or a function call. (If the result 6806 * comes back in a register other than r0, you can override "result".) 6807 * 6808 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6809 * vCC (r1). Useful for integer division and modulus. 6810 * 6811 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6812 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6813 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6814 */ 6815 /* binop/lit8 vAA, vBB, #+CC */ 6816 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6817 mov r9, rINST, lsr #8 @ r9<- AA 6818 and r2, r3, #255 @ r2<- BB 6819 GET_VREG(r0, r2) @ r0<- vBB 6820 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6821 .if 1 6822 @cmp r1, #0 @ is second operand zero? 6823 beq common_errDivideByZero 6824 .endif 6825 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6826 6827 @ optional op; may set condition codes 6828 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6829 GET_INST_OPCODE(ip) @ extract opcode from rINST 6830 SET_VREG(r0, r9) @ vAA<- r0 6831 GOTO_OPCODE(ip) @ jump to next instruction 6832 /* 10-12 instructions */ 6833 6834 6835/* ------------------------------ */ 6836 .balign 64 6837.L_OP_REM_INT_LIT8: /* 0xdc */ 6838/* File: armv5te/OP_REM_INT_LIT8.S */ 6839/* idivmod returns quotient in r0 and remainder in r1 */ 6840/* File: armv5te/binopLit8.S */ 6841 /* 6842 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6843 * that specifies an instruction that performs "result = r0 op r1". 6844 * This could be an ARM instruction or a function call. (If the result 6845 * comes back in a register other than r0, you can override "result".) 6846 * 6847 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6848 * vCC (r1). Useful for integer division and modulus. 6849 * 6850 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6851 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6852 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6853 */ 6854 /* binop/lit8 vAA, vBB, #+CC */ 6855 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6856 mov r9, rINST, lsr #8 @ r9<- AA 6857 and r2, r3, #255 @ r2<- BB 6858 GET_VREG(r0, r2) @ r0<- vBB 6859 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6860 .if 1 6861 @cmp r1, #0 @ is second operand zero? 6862 beq common_errDivideByZero 6863 .endif 6864 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6865 6866 @ optional op; may set condition codes 6867 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6868 GET_INST_OPCODE(ip) @ extract opcode from rINST 6869 SET_VREG(r1, r9) @ vAA<- r1 6870 GOTO_OPCODE(ip) @ jump to next instruction 6871 /* 10-12 instructions */ 6872 6873 6874/* ------------------------------ */ 6875 .balign 64 6876.L_OP_AND_INT_LIT8: /* 0xdd */ 6877/* File: armv5te/OP_AND_INT_LIT8.S */ 6878/* File: armv5te/binopLit8.S */ 6879 /* 6880 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6881 * that specifies an instruction that performs "result = r0 op r1". 6882 * This could be an ARM instruction or a function call. (If the result 6883 * comes back in a register other than r0, you can override "result".) 6884 * 6885 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6886 * vCC (r1). Useful for integer division and modulus. 6887 * 6888 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6889 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6890 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6891 */ 6892 /* binop/lit8 vAA, vBB, #+CC */ 6893 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6894 mov r9, rINST, lsr #8 @ r9<- AA 6895 and r2, r3, #255 @ r2<- BB 6896 GET_VREG(r0, r2) @ r0<- vBB 6897 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6898 .if 0 6899 @cmp r1, #0 @ is second operand zero? 6900 beq common_errDivideByZero 6901 .endif 6902 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6903 6904 @ optional op; may set condition codes 6905 and r0, r0, r1 @ r0<- op, r0-r3 changed 6906 GET_INST_OPCODE(ip) @ extract opcode from rINST 6907 SET_VREG(r0, r9) @ vAA<- r0 6908 GOTO_OPCODE(ip) @ jump to next instruction 6909 /* 10-12 instructions */ 6910 6911 6912/* ------------------------------ */ 6913 .balign 64 6914.L_OP_OR_INT_LIT8: /* 0xde */ 6915/* File: armv5te/OP_OR_INT_LIT8.S */ 6916/* File: armv5te/binopLit8.S */ 6917 /* 6918 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6919 * that specifies an instruction that performs "result = r0 op r1". 6920 * This could be an ARM instruction or a function call. (If the result 6921 * comes back in a register other than r0, you can override "result".) 6922 * 6923 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6924 * vCC (r1). Useful for integer division and modulus. 6925 * 6926 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6927 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6928 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6929 */ 6930 /* binop/lit8 vAA, vBB, #+CC */ 6931 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6932 mov r9, rINST, lsr #8 @ r9<- AA 6933 and r2, r3, #255 @ r2<- BB 6934 GET_VREG(r0, r2) @ r0<- vBB 6935 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6936 .if 0 6937 @cmp r1, #0 @ is second operand zero? 6938 beq common_errDivideByZero 6939 .endif 6940 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6941 6942 @ optional op; may set condition codes 6943 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6944 GET_INST_OPCODE(ip) @ extract opcode from rINST 6945 SET_VREG(r0, r9) @ vAA<- r0 6946 GOTO_OPCODE(ip) @ jump to next instruction 6947 /* 10-12 instructions */ 6948 6949 6950/* ------------------------------ */ 6951 .balign 64 6952.L_OP_XOR_INT_LIT8: /* 0xdf */ 6953/* File: armv5te/OP_XOR_INT_LIT8.S */ 6954/* File: armv5te/binopLit8.S */ 6955 /* 6956 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6957 * that specifies an instruction that performs "result = r0 op r1". 6958 * This could be an ARM instruction or a function call. (If the result 6959 * comes back in a register other than r0, you can override "result".) 6960 * 6961 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6962 * vCC (r1). Useful for integer division and modulus. 6963 * 6964 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6965 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6966 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6967 */ 6968 /* binop/lit8 vAA, vBB, #+CC */ 6969 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6970 mov r9, rINST, lsr #8 @ r9<- AA 6971 and r2, r3, #255 @ r2<- BB 6972 GET_VREG(r0, r2) @ r0<- vBB 6973 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6974 .if 0 6975 @cmp r1, #0 @ is second operand zero? 6976 beq common_errDivideByZero 6977 .endif 6978 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6979 6980 @ optional op; may set condition codes 6981 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6982 GET_INST_OPCODE(ip) @ extract opcode from rINST 6983 SET_VREG(r0, r9) @ vAA<- r0 6984 GOTO_OPCODE(ip) @ jump to next instruction 6985 /* 10-12 instructions */ 6986 6987 6988/* ------------------------------ */ 6989 .balign 64 6990.L_OP_SHL_INT_LIT8: /* 0xe0 */ 6991/* File: armv5te/OP_SHL_INT_LIT8.S */ 6992/* File: armv5te/binopLit8.S */ 6993 /* 6994 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6995 * that specifies an instruction that performs "result = r0 op r1". 6996 * This could be an ARM instruction or a function call. (If the result 6997 * comes back in a register other than r0, you can override "result".) 6998 * 6999 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7000 * vCC (r1). Useful for integer division and modulus. 7001 * 7002 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7003 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7004 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7005 */ 7006 /* binop/lit8 vAA, vBB, #+CC */ 7007 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7008 mov r9, rINST, lsr #8 @ r9<- AA 7009 and r2, r3, #255 @ r2<- BB 7010 GET_VREG(r0, r2) @ r0<- vBB 7011 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7012 .if 0 7013 @cmp r1, #0 @ is second operand zero? 7014 beq common_errDivideByZero 7015 .endif 7016 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7017 7018 and r1, r1, #31 @ optional op; may set condition codes 7019 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7020 GET_INST_OPCODE(ip) @ extract opcode from rINST 7021 SET_VREG(r0, r9) @ vAA<- r0 7022 GOTO_OPCODE(ip) @ jump to next instruction 7023 /* 10-12 instructions */ 7024 7025 7026/* ------------------------------ */ 7027 .balign 64 7028.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7029/* File: armv5te/OP_SHR_INT_LIT8.S */ 7030/* File: armv5te/binopLit8.S */ 7031 /* 7032 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7033 * that specifies an instruction that performs "result = r0 op r1". 7034 * This could be an ARM instruction or a function call. (If the result 7035 * comes back in a register other than r0, you can override "result".) 7036 * 7037 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7038 * vCC (r1). Useful for integer division and modulus. 7039 * 7040 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7041 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7042 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7043 */ 7044 /* binop/lit8 vAA, vBB, #+CC */ 7045 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7046 mov r9, rINST, lsr #8 @ r9<- AA 7047 and r2, r3, #255 @ r2<- BB 7048 GET_VREG(r0, r2) @ r0<- vBB 7049 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7050 .if 0 7051 @cmp r1, #0 @ is second operand zero? 7052 beq common_errDivideByZero 7053 .endif 7054 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7055 7056 and r1, r1, #31 @ optional op; may set condition codes 7057 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7058 GET_INST_OPCODE(ip) @ extract opcode from rINST 7059 SET_VREG(r0, r9) @ vAA<- r0 7060 GOTO_OPCODE(ip) @ jump to next instruction 7061 /* 10-12 instructions */ 7062 7063 7064/* ------------------------------ */ 7065 .balign 64 7066.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7067/* File: armv5te/OP_USHR_INT_LIT8.S */ 7068/* File: armv5te/binopLit8.S */ 7069 /* 7070 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7071 * that specifies an instruction that performs "result = r0 op r1". 7072 * This could be an ARM instruction or a function call. (If the result 7073 * comes back in a register other than r0, you can override "result".) 7074 * 7075 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7076 * vCC (r1). Useful for integer division and modulus. 7077 * 7078 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7079 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7080 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7081 */ 7082 /* binop/lit8 vAA, vBB, #+CC */ 7083 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7084 mov r9, rINST, lsr #8 @ r9<- AA 7085 and r2, r3, #255 @ r2<- BB 7086 GET_VREG(r0, r2) @ r0<- vBB 7087 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7088 .if 0 7089 @cmp r1, #0 @ is second operand zero? 7090 beq common_errDivideByZero 7091 .endif 7092 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7093 7094 and r1, r1, #31 @ optional op; may set condition codes 7095 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7096 GET_INST_OPCODE(ip) @ extract opcode from rINST 7097 SET_VREG(r0, r9) @ vAA<- r0 7098 GOTO_OPCODE(ip) @ jump to next instruction 7099 /* 10-12 instructions */ 7100 7101 7102/* ------------------------------ */ 7103 .balign 64 7104.L_OP_IGET_VOLATILE: /* 0xe3 */ 7105/* File: armv5te/OP_IGET_VOLATILE.S */ 7106/* File: armv5te/OP_IGET.S */ 7107 /* 7108 * General 32-bit instance field get. 7109 * 7110 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7111 */ 7112 /* op vA, vB, field@CCCC */ 7113 mov r0, rINST, lsr #12 @ r0<- B 7114 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7115 FETCH(r1, 1) @ r1<- field ref CCCC 7116 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7117 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7118 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7119 cmp r0, #0 @ is resolved entry null? 7120 bne .LOP_IGET_VOLATILE_finish @ no, already resolved 71218: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7122 EXPORT_PC() @ resolve() could throw 7123 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7124 bl dvmResolveInstField @ r0<- resolved InstField ptr 7125 cmp r0, #0 7126 bne .LOP_IGET_VOLATILE_finish 7127 b common_exceptionThrown 7128 7129 7130/* ------------------------------ */ 7131 .balign 64 7132.L_OP_IPUT_VOLATILE: /* 0xe4 */ 7133/* File: armv5te/OP_IPUT_VOLATILE.S */ 7134/* File: armv5te/OP_IPUT.S */ 7135 /* 7136 * General 32-bit instance field put. 7137 * 7138 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 7139 */ 7140 /* op vA, vB, field@CCCC */ 7141 mov r0, rINST, lsr #12 @ r0<- B 7142 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7143 FETCH(r1, 1) @ r1<- field ref CCCC 7144 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7145 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7146 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7147 cmp r0, #0 @ is resolved entry null? 7148 bne .LOP_IPUT_VOLATILE_finish @ no, already resolved 71498: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7150 EXPORT_PC() @ resolve() could throw 7151 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7152 bl dvmResolveInstField @ r0<- resolved InstField ptr 7153 cmp r0, #0 @ success? 7154 bne .LOP_IPUT_VOLATILE_finish @ yes, finish up 7155 b common_exceptionThrown 7156 7157 7158/* ------------------------------ */ 7159 .balign 64 7160.L_OP_SGET_VOLATILE: /* 0xe5 */ 7161/* File: armv5te/OP_SGET_VOLATILE.S */ 7162/* File: armv5te/OP_SGET.S */ 7163 /* 7164 * General 32-bit SGET handler. 7165 * 7166 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7167 */ 7168 /* op vAA, field@BBBB */ 7169 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7170 FETCH(r1, 1) @ r1<- field ref BBBB 7171 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7172 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7173 cmp r0, #0 @ is resolved entry null? 7174 beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve 7175.LOP_SGET_VOLATILE_finish: @ field ptr in r0 7176 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7177 SMP_DMB @ acquiring load 7178 mov r2, rINST, lsr #8 @ r2<- AA 7179 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7180 SET_VREG(r1, r2) @ fp[AA]<- r1 7181 GET_INST_OPCODE(ip) @ extract opcode from rINST 7182 GOTO_OPCODE(ip) @ jump to next instruction 7183 7184 7185/* ------------------------------ */ 7186 .balign 64 7187.L_OP_SPUT_VOLATILE: /* 0xe6 */ 7188/* File: armv5te/OP_SPUT_VOLATILE.S */ 7189/* File: armv5te/OP_SPUT.S */ 7190 /* 7191 * General 32-bit SPUT handler. 7192 * 7193 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 7194 */ 7195 /* op vAA, field@BBBB */ 7196 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7197 FETCH(r1, 1) @ r1<- field ref BBBB 7198 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7199 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7200 cmp r0, #0 @ is resolved entry null? 7201 beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve 7202.LOP_SPUT_VOLATILE_finish: @ field ptr in r0 7203 mov r2, rINST, lsr #8 @ r2<- AA 7204 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7205 GET_VREG(r1, r2) @ r1<- fp[AA] 7206 GET_INST_OPCODE(ip) @ extract opcode from rINST 7207 SMP_DMB @ releasing store 7208 str r1, [r0, #offStaticField_value] @ field<- vAA 7209 GOTO_OPCODE(ip) @ jump to next instruction 7210 7211 7212/* ------------------------------ */ 7213 .balign 64 7214.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */ 7215/* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */ 7216/* File: armv5te/OP_IGET.S */ 7217 /* 7218 * General 32-bit instance field get. 7219 * 7220 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7221 */ 7222 /* op vA, vB, field@CCCC */ 7223 mov r0, rINST, lsr #12 @ r0<- B 7224 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7225 FETCH(r1, 1) @ r1<- field ref CCCC 7226 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7227 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7228 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7229 cmp r0, #0 @ is resolved entry null? 7230 bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved 72318: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7232 EXPORT_PC() @ resolve() could throw 7233 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7234 bl dvmResolveInstField @ r0<- resolved InstField ptr 7235 cmp r0, #0 7236 bne .LOP_IGET_OBJECT_VOLATILE_finish 7237 b common_exceptionThrown 7238 7239 7240/* ------------------------------ */ 7241 .balign 64 7242.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7243/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7244/* File: armv5te/OP_IGET_WIDE.S */ 7245 /* 7246 * Wide 32-bit instance field get. 7247 */ 7248 /* iget-wide vA, vB, field@CCCC */ 7249 mov r0, rINST, lsr #12 @ r0<- B 7250 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7251 FETCH(r1, 1) @ r1<- field ref CCCC 7252 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7253 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7254 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7255 cmp r0, #0 @ is resolved entry null? 7256 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 72578: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7258 EXPORT_PC() @ resolve() could throw 7259 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7260 bl dvmResolveInstField @ r0<- resolved InstField ptr 7261 cmp r0, #0 7262 bne .LOP_IGET_WIDE_VOLATILE_finish 7263 b common_exceptionThrown 7264 7265 7266/* ------------------------------ */ 7267 .balign 64 7268.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7269/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7270/* File: armv5te/OP_IPUT_WIDE.S */ 7271 /* iput-wide vA, vB, field@CCCC */ 7272 mov r0, rINST, lsr #12 @ r0<- B 7273 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7274 FETCH(r1, 1) @ r1<- field ref CCCC 7275 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7276 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7277 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7278 cmp r0, #0 @ is resolved entry null? 7279 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 72808: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7281 EXPORT_PC() @ resolve() could throw 7282 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7283 bl dvmResolveInstField @ r0<- resolved InstField ptr 7284 cmp r0, #0 @ success? 7285 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7286 b common_exceptionThrown 7287 7288 7289/* ------------------------------ */ 7290 .balign 64 7291.L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7292/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7293/* File: armv5te/OP_SGET_WIDE.S */ 7294 /* 7295 * 64-bit SGET handler. 7296 */ 7297 /* sget-wide vAA, field@BBBB */ 7298 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7299 FETCH(r1, 1) @ r1<- field ref BBBB 7300 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7301 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7302 cmp r0, #0 @ is resolved entry null? 7303 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7304.LOP_SGET_WIDE_VOLATILE_finish: 7305 mov r9, rINST, lsr #8 @ r9<- AA 7306 .if 1 7307 add r0, r0, #offStaticField_value @ r0<- pointer to data 7308 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 7309 .else 7310 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7311 .endif 7312 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7313 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7314 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7315 GET_INST_OPCODE(ip) @ extract opcode from rINST 7316 GOTO_OPCODE(ip) @ jump to next instruction 7317 7318 7319/* ------------------------------ */ 7320 .balign 64 7321.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7322/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7323/* File: armv5te/OP_SPUT_WIDE.S */ 7324 /* 7325 * 64-bit SPUT handler. 7326 */ 7327 /* sput-wide vAA, field@BBBB */ 7328 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7329 FETCH(r1, 1) @ r1<- field ref BBBB 7330 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7331 mov r9, rINST, lsr #8 @ r9<- AA 7332 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7333 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7334 cmp r2, #0 @ is resolved entry null? 7335 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7336.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7337 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7338 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7339 GET_INST_OPCODE(r10) @ extract opcode from rINST 7340 .if 1 7341 add r2, r2, #offStaticField_value @ r2<- pointer to data 7342 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 7343 .else 7344 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7345 .endif 7346 GOTO_OPCODE(r10) @ jump to next instruction 7347 7348 7349/* ------------------------------ */ 7350 .balign 64 7351.L_OP_BREAKPOINT: /* 0xec */ 7352/* File: armv5te/OP_BREAKPOINT.S */ 7353/* File: armv5te/unused.S */ 7354 bl common_abort 7355 7356 7357/* ------------------------------ */ 7358 .balign 64 7359.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7360/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7361 /* 7362 * Handle a throw-verification-error instruction. This throws an 7363 * exception for an error discovered during verification. The 7364 * exception is indicated by AA, with some detail provided by BBBB. 7365 */ 7366 /* op AA, ref@BBBB */ 7367 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7368 FETCH(r2, 1) @ r2<- BBBB 7369 EXPORT_PC() @ export the PC 7370 mov r1, rINST, lsr #8 @ r1<- AA 7371 bl dvmThrowVerificationError @ always throws 7372 b common_exceptionThrown @ handle exception 7373 7374/* ------------------------------ */ 7375 .balign 64 7376.L_OP_EXECUTE_INLINE: /* 0xee */ 7377/* File: armv5te/OP_EXECUTE_INLINE.S */ 7378 /* 7379 * Execute a "native inline" instruction. 7380 * 7381 * We need to call an InlineOp4Func: 7382 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7383 * 7384 * The first four args are in r0-r3, pointer to return value storage 7385 * is on the stack. The function's return value is a flag that tells 7386 * us if an exception was thrown. 7387 */ 7388 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7389 FETCH(r10, 1) @ r10<- BBBB 7390 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7391 EXPORT_PC() @ can throw 7392 sub sp, sp, #8 @ make room for arg, +64 bit align 7393 mov r0, rINST, lsr #12 @ r0<- B 7394 str r1, [sp] @ push &glue->retval 7395 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7396 add sp, sp, #8 @ pop stack 7397 cmp r0, #0 @ test boolean result of inline 7398 beq common_exceptionThrown @ returned false, handle exception 7399 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7400 GET_INST_OPCODE(ip) @ extract opcode from rINST 7401 GOTO_OPCODE(ip) @ jump to next instruction 7402 7403/* ------------------------------ */ 7404 .balign 64 7405.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7406/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7407 /* 7408 * Execute a "native inline" instruction, using "/range" semantics. 7409 * Same idea as execute-inline, but we get the args differently. 7410 * 7411 * We need to call an InlineOp4Func: 7412 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7413 * 7414 * The first four args are in r0-r3, pointer to return value storage 7415 * is on the stack. The function's return value is a flag that tells 7416 * us if an exception was thrown. 7417 */ 7418 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7419 FETCH(r10, 1) @ r10<- BBBB 7420 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7421 EXPORT_PC() @ can throw 7422 sub sp, sp, #8 @ make room for arg, +64 bit align 7423 mov r0, rINST, lsr #8 @ r0<- AA 7424 str r1, [sp] @ push &glue->retval 7425 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7426 add sp, sp, #8 @ pop stack 7427 cmp r0, #0 @ test boolean result of inline 7428 beq common_exceptionThrown @ returned false, handle exception 7429 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7430 GET_INST_OPCODE(ip) @ extract opcode from rINST 7431 GOTO_OPCODE(ip) @ jump to next instruction 7432 7433/* ------------------------------ */ 7434 .balign 64 7435.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7436/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7437 /* 7438 * invoke-direct-empty is a no-op in a "standard" interpreter. 7439 */ 7440 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7441 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7442 GOTO_OPCODE(ip) @ execute it 7443 7444/* ------------------------------ */ 7445 .balign 64 7446.L_OP_UNUSED_F1: /* 0xf1 */ 7447/* File: armv5te/OP_UNUSED_F1.S */ 7448/* File: armv5te/unused.S */ 7449 bl common_abort 7450 7451 7452/* ------------------------------ */ 7453 .balign 64 7454.L_OP_IGET_QUICK: /* 0xf2 */ 7455/* File: armv5te/OP_IGET_QUICK.S */ 7456 /* For: iget-quick, iget-object-quick */ 7457 /* op vA, vB, offset@CCCC */ 7458 mov r2, rINST, lsr #12 @ r2<- B 7459 GET_VREG(r3, r2) @ r3<- object we're operating on 7460 FETCH(r1, 1) @ r1<- field byte offset 7461 cmp r3, #0 @ check object for null 7462 mov r2, rINST, lsr #8 @ r2<- A(+) 7463 beq common_errNullObject @ object was null 7464 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7465 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7466 and r2, r2, #15 7467 GET_INST_OPCODE(ip) @ extract opcode from rINST 7468 SET_VREG(r0, r2) @ fp[A]<- r0 7469 GOTO_OPCODE(ip) @ jump to next instruction 7470 7471/* ------------------------------ */ 7472 .balign 64 7473.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7474/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7475 /* iget-wide-quick vA, vB, offset@CCCC */ 7476 mov r2, rINST, lsr #12 @ r2<- B 7477 GET_VREG(r3, r2) @ r3<- object we're operating on 7478 FETCH(ip, 1) @ ip<- field byte offset 7479 cmp r3, #0 @ check object for null 7480 mov r2, rINST, lsr #8 @ r2<- A(+) 7481 beq common_errNullObject @ object was null 7482 ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) 7483 and r2, r2, #15 7484 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7485 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7486 GET_INST_OPCODE(ip) @ extract opcode from rINST 7487 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7488 GOTO_OPCODE(ip) @ jump to next instruction 7489 7490/* ------------------------------ */ 7491 .balign 64 7492.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7493/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7494/* File: armv5te/OP_IGET_QUICK.S */ 7495 /* For: iget-quick, iget-object-quick */ 7496 /* op vA, vB, offset@CCCC */ 7497 mov r2, rINST, lsr #12 @ r2<- B 7498 GET_VREG(r3, r2) @ r3<- object we're operating on 7499 FETCH(r1, 1) @ r1<- field byte offset 7500 cmp r3, #0 @ check object for null 7501 mov r2, rINST, lsr #8 @ r2<- A(+) 7502 beq common_errNullObject @ object was null 7503 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7504 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7505 and r2, r2, #15 7506 GET_INST_OPCODE(ip) @ extract opcode from rINST 7507 SET_VREG(r0, r2) @ fp[A]<- r0 7508 GOTO_OPCODE(ip) @ jump to next instruction 7509 7510 7511/* ------------------------------ */ 7512 .balign 64 7513.L_OP_IPUT_QUICK: /* 0xf5 */ 7514/* File: armv5te/OP_IPUT_QUICK.S */ 7515 /* For: iput-quick, iput-object-quick */ 7516 /* op vA, vB, offset@CCCC */ 7517 mov r2, rINST, lsr #12 @ r2<- B 7518 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7519 FETCH(r1, 1) @ r1<- field byte offset 7520 cmp r3, #0 @ check object for null 7521 mov r2, rINST, lsr #8 @ r2<- A(+) 7522 beq common_errNullObject @ object was null 7523 and r2, r2, #15 7524 GET_VREG(r0, r2) @ r0<- fp[A] 7525 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7526 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7527 GET_INST_OPCODE(ip) @ extract opcode from rINST 7528 GOTO_OPCODE(ip) @ jump to next instruction 7529 7530/* ------------------------------ */ 7531 .balign 64 7532.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7533/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7534 /* iput-wide-quick vA, vB, offset@CCCC */ 7535 mov r0, rINST, lsr #8 @ r0<- A(+) 7536 mov r1, rINST, lsr #12 @ r1<- B 7537 and r0, r0, #15 7538 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7539 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7540 cmp r2, #0 @ check object for null 7541 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7542 beq common_errNullObject @ object was null 7543 FETCH(r3, 1) @ r3<- field byte offset 7544 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7545 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7546 GET_INST_OPCODE(ip) @ extract opcode from rINST 7547 GOTO_OPCODE(ip) @ jump to next instruction 7548 7549/* ------------------------------ */ 7550 .balign 64 7551.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7552/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7553/* File: armv5te/OP_IPUT_QUICK.S */ 7554 /* For: iput-quick, iput-object-quick */ 7555 /* op vA, vB, offset@CCCC */ 7556 mov r2, rINST, lsr #12 @ r2<- B 7557 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7558 FETCH(r1, 1) @ r1<- field byte offset 7559 cmp r3, #0 @ check object for null 7560 mov r2, rINST, lsr #8 @ r2<- A(+) 7561 beq common_errNullObject @ object was null 7562 and r2, r2, #15 7563 GET_VREG(r0, r2) @ r0<- fp[A] 7564 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7565 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7566 GET_INST_OPCODE(ip) @ extract opcode from rINST 7567 GOTO_OPCODE(ip) @ jump to next instruction 7568 7569 7570/* ------------------------------ */ 7571 .balign 64 7572.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7573/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7574 /* 7575 * Handle an optimized virtual method call. 7576 * 7577 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7578 */ 7579 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7580 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7581 FETCH(r3, 2) @ r3<- FEDC or CCCC 7582 FETCH(r1, 1) @ r1<- BBBB 7583 .if (!0) 7584 and r3, r3, #15 @ r3<- C (or stays CCCC) 7585 .endif 7586 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7587 cmp r2, #0 @ is "this" null? 7588 beq common_errNullObject @ null "this", throw exception 7589 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7590 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7591 EXPORT_PC() @ invoke must export 7592 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7593 bl common_invokeMethodNoRange @ continue on 7594 7595/* ------------------------------ */ 7596 .balign 64 7597.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7598/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7599/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7600 /* 7601 * Handle an optimized virtual method call. 7602 * 7603 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7604 */ 7605 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7606 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7607 FETCH(r3, 2) @ r3<- FEDC or CCCC 7608 FETCH(r1, 1) @ r1<- BBBB 7609 .if (!1) 7610 and r3, r3, #15 @ r3<- C (or stays CCCC) 7611 .endif 7612 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7613 cmp r2, #0 @ is "this" null? 7614 beq common_errNullObject @ null "this", throw exception 7615 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7616 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7617 EXPORT_PC() @ invoke must export 7618 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7619 bl common_invokeMethodRange @ continue on 7620 7621 7622/* ------------------------------ */ 7623 .balign 64 7624.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7625/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7626 /* 7627 * Handle an optimized "super" method call. 7628 * 7629 * for: [opt] invoke-super-quick, invoke-super-quick/range 7630 */ 7631 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7632 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7633 FETCH(r10, 2) @ r10<- GFED or CCCC 7634 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7635 .if (!0) 7636 and r10, r10, #15 @ r10<- D (or stays CCCC) 7637 .endif 7638 FETCH(r1, 1) @ r1<- BBBB 7639 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7640 EXPORT_PC() @ must export for invoke 7641 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7642 GET_VREG(r3, r10) @ r3<- "this" 7643 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7644 cmp r3, #0 @ null "this" ref? 7645 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7646 beq common_errNullObject @ "this" is null, throw exception 7647 bl common_invokeMethodNoRange @ continue on 7648 7649/* ------------------------------ */ 7650 .balign 64 7651.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7652/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7653/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7654 /* 7655 * Handle an optimized "super" method call. 7656 * 7657 * for: [opt] invoke-super-quick, invoke-super-quick/range 7658 */ 7659 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7660 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7661 FETCH(r10, 2) @ r10<- GFED or CCCC 7662 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7663 .if (!1) 7664 and r10, r10, #15 @ r10<- D (or stays CCCC) 7665 .endif 7666 FETCH(r1, 1) @ r1<- BBBB 7667 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7668 EXPORT_PC() @ must export for invoke 7669 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7670 GET_VREG(r3, r10) @ r3<- "this" 7671 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7672 cmp r3, #0 @ null "this" ref? 7673 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7674 beq common_errNullObject @ "this" is null, throw exception 7675 bl common_invokeMethodRange @ continue on 7676 7677 7678/* ------------------------------ */ 7679 .balign 64 7680.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */ 7681/* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */ 7682/* File: armv5te/OP_IPUT.S */ 7683 /* 7684 * General 32-bit instance field put. 7685 * 7686 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 7687 */ 7688 /* op vA, vB, field@CCCC */ 7689 mov r0, rINST, lsr #12 @ r0<- B 7690 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7691 FETCH(r1, 1) @ r1<- field ref CCCC 7692 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7693 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7694 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7695 cmp r0, #0 @ is resolved entry null? 7696 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved 76978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7698 EXPORT_PC() @ resolve() could throw 7699 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7700 bl dvmResolveInstField @ r0<- resolved InstField ptr 7701 cmp r0, #0 @ success? 7702 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up 7703 b common_exceptionThrown 7704 7705 7706/* ------------------------------ */ 7707 .balign 64 7708.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */ 7709/* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */ 7710/* File: armv5te/OP_SGET.S */ 7711 /* 7712 * General 32-bit SGET handler. 7713 * 7714 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7715 */ 7716 /* op vAA, field@BBBB */ 7717 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7718 FETCH(r1, 1) @ r1<- field ref BBBB 7719 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7720 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7721 cmp r0, #0 @ is resolved entry null? 7722 beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve 7723.LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0 7724 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7725 SMP_DMB @ acquiring load 7726 mov r2, rINST, lsr #8 @ r2<- AA 7727 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7728 SET_VREG(r1, r2) @ fp[AA]<- r1 7729 GET_INST_OPCODE(ip) @ extract opcode from rINST 7730 GOTO_OPCODE(ip) @ jump to next instruction 7731 7732 7733/* ------------------------------ */ 7734 .balign 64 7735.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */ 7736/* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */ 7737/* File: armv5te/OP_SPUT.S */ 7738 /* 7739 * General 32-bit SPUT handler. 7740 * 7741 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 7742 */ 7743 /* op vAA, field@BBBB */ 7744 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7745 FETCH(r1, 1) @ r1<- field ref BBBB 7746 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7747 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7748 cmp r0, #0 @ is resolved entry null? 7749 beq .LOP_SPUT_OBJECT_VOLATILE_resolve @ yes, do resolve 7750.LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0 7751 mov r2, rINST, lsr #8 @ r2<- AA 7752 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7753 GET_VREG(r1, r2) @ r1<- fp[AA] 7754 GET_INST_OPCODE(ip) @ extract opcode from rINST 7755 SMP_DMB @ releasing store 7756 str r1, [r0, #offStaticField_value] @ field<- vAA 7757 GOTO_OPCODE(ip) @ jump to next instruction 7758 7759 7760/* ------------------------------ */ 7761 .balign 64 7762.L_OP_UNUSED_FF: /* 0xff */ 7763/* File: armv5te/OP_UNUSED_FF.S */ 7764/* File: armv5te/unused.S */ 7765 bl common_abort 7766 7767 7768 7769 .balign 64 7770 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7771 .global dvmAsmInstructionEnd 7772dvmAsmInstructionEnd: 7773 7774/* 7775 * =========================================================================== 7776 * Sister implementations 7777 * =========================================================================== 7778 */ 7779 .global dvmAsmSisterStart 7780 .type dvmAsmSisterStart, %function 7781 .text 7782 .balign 4 7783dvmAsmSisterStart: 7784 7785/* continuation for OP_CONST_STRING */ 7786 7787 /* 7788 * Continuation if the String has not yet been resolved. 7789 * r1: BBBB (String ref) 7790 * r9: target register 7791 */ 7792.LOP_CONST_STRING_resolve: 7793 EXPORT_PC() 7794 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7795 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7796 bl dvmResolveString @ r0<- String reference 7797 cmp r0, #0 @ failed? 7798 beq common_exceptionThrown @ yup, handle the exception 7799 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7800 GET_INST_OPCODE(ip) @ extract opcode from rINST 7801 SET_VREG(r0, r9) @ vAA<- r0 7802 GOTO_OPCODE(ip) @ jump to next instruction 7803 7804/* continuation for OP_CONST_STRING_JUMBO */ 7805 7806 /* 7807 * Continuation if the String has not yet been resolved. 7808 * r1: BBBBBBBB (String ref) 7809 * r9: target register 7810 */ 7811.LOP_CONST_STRING_JUMBO_resolve: 7812 EXPORT_PC() 7813 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7814 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7815 bl dvmResolveString @ r0<- String reference 7816 cmp r0, #0 @ failed? 7817 beq common_exceptionThrown @ yup, handle the exception 7818 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7819 GET_INST_OPCODE(ip) @ extract opcode from rINST 7820 SET_VREG(r0, r9) @ vAA<- r0 7821 GOTO_OPCODE(ip) @ jump to next instruction 7822 7823/* continuation for OP_CONST_CLASS */ 7824 7825 /* 7826 * Continuation if the Class has not yet been resolved. 7827 * r1: BBBB (Class ref) 7828 * r9: target register 7829 */ 7830.LOP_CONST_CLASS_resolve: 7831 EXPORT_PC() 7832 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7833 mov r2, #1 @ r2<- true 7834 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7835 bl dvmResolveClass @ r0<- Class reference 7836 cmp r0, #0 @ failed? 7837 beq common_exceptionThrown @ yup, handle the exception 7838 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7839 GET_INST_OPCODE(ip) @ extract opcode from rINST 7840 SET_VREG(r0, r9) @ vAA<- r0 7841 GOTO_OPCODE(ip) @ jump to next instruction 7842 7843/* continuation for OP_CHECK_CAST */ 7844 7845 /* 7846 * Trivial test failed, need to perform full check. This is common. 7847 * r0 holds obj->clazz 7848 * r1 holds class resolved from BBBB 7849 * r9 holds object 7850 */ 7851.LOP_CHECK_CAST_fullcheck: 7852 bl dvmInstanceofNonTrivial @ r0<- boolean result 7853 cmp r0, #0 @ failed? 7854 bne .LOP_CHECK_CAST_okay @ no, success 7855 7856 @ A cast has failed. We need to throw a ClassCastException with the 7857 @ class of the object that failed to be cast. 7858 EXPORT_PC() @ about to throw 7859 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7860 ldr r0, .LstrClassCastExceptionPtr 7861 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7862 bl dvmThrowExceptionWithClassMessage 7863 b common_exceptionThrown 7864 7865 /* 7866 * Resolution required. This is the least-likely path. 7867 * 7868 * r2 holds BBBB 7869 * r9 holds object 7870 */ 7871.LOP_CHECK_CAST_resolve: 7872 EXPORT_PC() @ resolve() could throw 7873 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7874 mov r1, r2 @ r1<- BBBB 7875 mov r2, #0 @ r2<- false 7876 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7877 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7878 cmp r0, #0 @ got null? 7879 beq common_exceptionThrown @ yes, handle exception 7880 mov r1, r0 @ r1<- class resolved from BBB 7881 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7882 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7883 7884.LstrClassCastExceptionPtr: 7885 .word .LstrClassCastException 7886 7887/* continuation for OP_INSTANCE_OF */ 7888 7889 /* 7890 * Trivial test failed, need to perform full check. This is common. 7891 * r0 holds obj->clazz 7892 * r1 holds class resolved from BBBB 7893 * r9 holds A 7894 */ 7895.LOP_INSTANCE_OF_fullcheck: 7896 bl dvmInstanceofNonTrivial @ r0<- boolean result 7897 @ fall through to OP_INSTANCE_OF_store 7898 7899 /* 7900 * r0 holds boolean result 7901 * r9 holds A 7902 */ 7903.LOP_INSTANCE_OF_store: 7904 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7905 SET_VREG(r0, r9) @ vA<- r0 7906 GET_INST_OPCODE(ip) @ extract opcode from rINST 7907 GOTO_OPCODE(ip) @ jump to next instruction 7908 7909 /* 7910 * Trivial test succeeded, save and bail. 7911 * r9 holds A 7912 */ 7913.LOP_INSTANCE_OF_trivial: 7914 mov r0, #1 @ indicate success 7915 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7916 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7917 SET_VREG(r0, r9) @ vA<- r0 7918 GET_INST_OPCODE(ip) @ extract opcode from rINST 7919 GOTO_OPCODE(ip) @ jump to next instruction 7920 7921 /* 7922 * Resolution required. This is the least-likely path. 7923 * 7924 * r3 holds BBBB 7925 * r9 holds A 7926 */ 7927.LOP_INSTANCE_OF_resolve: 7928 EXPORT_PC() @ resolve() could throw 7929 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7930 mov r1, r3 @ r1<- BBBB 7931 mov r2, #1 @ r2<- true 7932 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7933 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7934 cmp r0, #0 @ got null? 7935 beq common_exceptionThrown @ yes, handle exception 7936 mov r1, r0 @ r1<- class resolved from BBB 7937 mov r3, rINST, lsr #12 @ r3<- B 7938 GET_VREG(r0, r3) @ r0<- vB (object) 7939 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7940 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7941 7942/* continuation for OP_NEW_INSTANCE */ 7943 7944 .balign 32 @ minimize cache lines 7945.LOP_NEW_INSTANCE_finish: @ r0=new object 7946 mov r3, rINST, lsr #8 @ r3<- AA 7947 cmp r0, #0 @ failed? 7948 beq common_exceptionThrown @ yes, handle the exception 7949 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7950 GET_INST_OPCODE(ip) @ extract opcode from rINST 7951 SET_VREG(r0, r3) @ vAA<- r0 7952 GOTO_OPCODE(ip) @ jump to next instruction 7953 7954 /* 7955 * Class initialization required. 7956 * 7957 * r0 holds class object 7958 */ 7959.LOP_NEW_INSTANCE_needinit: 7960 mov r9, r0 @ save r0 7961 bl dvmInitClass @ initialize class 7962 cmp r0, #0 @ check boolean result 7963 mov r0, r9 @ restore r0 7964 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7965 b common_exceptionThrown @ failed, deal with init exception 7966 7967 /* 7968 * Resolution required. This is the least-likely path. 7969 * 7970 * r1 holds BBBB 7971 */ 7972.LOP_NEW_INSTANCE_resolve: 7973 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7974 mov r2, #0 @ r2<- false 7975 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7976 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7977 cmp r0, #0 @ got null? 7978 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7979 b common_exceptionThrown @ yes, handle exception 7980 7981.LstrInstantiationErrorPtr: 7982 .word .LstrInstantiationError 7983 7984/* continuation for OP_NEW_ARRAY */ 7985 7986 7987 /* 7988 * Resolve class. (This is an uncommon case.) 7989 * 7990 * r1 holds array length 7991 * r2 holds class ref CCCC 7992 */ 7993.LOP_NEW_ARRAY_resolve: 7994 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7995 mov r9, r1 @ r9<- length (save) 7996 mov r1, r2 @ r1<- CCCC 7997 mov r2, #0 @ r2<- false 7998 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7999 bl dvmResolveClass @ r0<- call(clazz, ref) 8000 cmp r0, #0 @ got null? 8001 mov r1, r9 @ r1<- length (restore) 8002 beq common_exceptionThrown @ yes, handle exception 8003 @ fall through to OP_NEW_ARRAY_finish 8004 8005 /* 8006 * Finish allocation. 8007 * 8008 * r0 holds class 8009 * r1 holds array length 8010 */ 8011.LOP_NEW_ARRAY_finish: 8012 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8013 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8014 cmp r0, #0 @ failed? 8015 mov r2, rINST, lsr #8 @ r2<- A+ 8016 beq common_exceptionThrown @ yes, handle the exception 8017 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8018 and r2, r2, #15 @ r2<- A 8019 GET_INST_OPCODE(ip) @ extract opcode from rINST 8020 SET_VREG(r0, r2) @ vA<- r0 8021 GOTO_OPCODE(ip) @ jump to next instruction 8022 8023/* continuation for OP_FILLED_NEW_ARRAY */ 8024 8025 /* 8026 * On entry: 8027 * r0 holds array class 8028 * r10 holds AA or BA 8029 */ 8030.LOP_FILLED_NEW_ARRAY_continue: 8031 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8032 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8033 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8034 .if 0 8035 mov r1, r10 @ r1<- AA (length) 8036 .else 8037 mov r1, r10, lsr #4 @ r1<- B (length) 8038 .endif 8039 cmp r3, #'I' @ array of ints? 8040 cmpne r3, #'L' @ array of objects? 8041 cmpne r3, #'[' @ array of arrays? 8042 mov r9, r1 @ save length in r9 8043 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8044 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8045 cmp r0, #0 @ null return? 8046 beq common_exceptionThrown @ alloc failed, handle exception 8047 8048 FETCH(r1, 2) @ r1<- FEDC or CCCC 8049 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8050 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8051 subs r9, r9, #1 @ length--, check for neg 8052 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8053 bmi 2f @ was zero, bail 8054 8055 @ copy values from registers into the array 8056 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8057 .if 0 8058 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 80591: ldr r3, [r2], #4 @ r3<- *r2++ 8060 subs r9, r9, #1 @ count-- 8061 str r3, [r0], #4 @ *contents++ = vX 8062 bpl 1b 8063 @ continue at 2 8064 .else 8065 cmp r9, #4 @ length was initially 5? 8066 and r2, r10, #15 @ r2<- A 8067 bne 1f @ <= 4 args, branch 8068 GET_VREG(r3, r2) @ r3<- vA 8069 sub r9, r9, #1 @ count-- 8070 str r3, [r0, #16] @ contents[4] = vA 80711: and r2, r1, #15 @ r2<- F/E/D/C 8072 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8073 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8074 subs r9, r9, #1 @ count-- 8075 str r3, [r0], #4 @ *contents++ = vX 8076 bpl 1b 8077 @ continue at 2 8078 .endif 8079 80802: 8081 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8082 GOTO_OPCODE(ip) @ execute it 8083 8084 /* 8085 * Throw an exception indicating that we have not implemented this 8086 * mode of filled-new-array. 8087 */ 8088.LOP_FILLED_NEW_ARRAY_notimpl: 8089 ldr r0, .L_strInternalError 8090 ldr r1, .L_strFilledNewArrayNotImpl 8091 bl dvmThrowException 8092 b common_exceptionThrown 8093 8094 .if (!0) @ define in one or the other, not both 8095.L_strFilledNewArrayNotImpl: 8096 .word .LstrFilledNewArrayNotImpl 8097.L_strInternalError: 8098 .word .LstrInternalError 8099 .endif 8100 8101/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8102 8103 /* 8104 * On entry: 8105 * r0 holds array class 8106 * r10 holds AA or BA 8107 */ 8108.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8109 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8110 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8111 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8112 .if 1 8113 mov r1, r10 @ r1<- AA (length) 8114 .else 8115 mov r1, r10, lsr #4 @ r1<- B (length) 8116 .endif 8117 cmp r3, #'I' @ array of ints? 8118 cmpne r3, #'L' @ array of objects? 8119 cmpne r3, #'[' @ array of arrays? 8120 mov r9, r1 @ save length in r9 8121 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8122 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8123 cmp r0, #0 @ null return? 8124 beq common_exceptionThrown @ alloc failed, handle exception 8125 8126 FETCH(r1, 2) @ r1<- FEDC or CCCC 8127 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8128 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8129 subs r9, r9, #1 @ length--, check for neg 8130 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8131 bmi 2f @ was zero, bail 8132 8133 @ copy values from registers into the array 8134 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8135 .if 1 8136 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 81371: ldr r3, [r2], #4 @ r3<- *r2++ 8138 subs r9, r9, #1 @ count-- 8139 str r3, [r0], #4 @ *contents++ = vX 8140 bpl 1b 8141 @ continue at 2 8142 .else 8143 cmp r9, #4 @ length was initially 5? 8144 and r2, r10, #15 @ r2<- A 8145 bne 1f @ <= 4 args, branch 8146 GET_VREG(r3, r2) @ r3<- vA 8147 sub r9, r9, #1 @ count-- 8148 str r3, [r0, #16] @ contents[4] = vA 81491: and r2, r1, #15 @ r2<- F/E/D/C 8150 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8151 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8152 subs r9, r9, #1 @ count-- 8153 str r3, [r0], #4 @ *contents++ = vX 8154 bpl 1b 8155 @ continue at 2 8156 .endif 8157 81582: 8159 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8160 GOTO_OPCODE(ip) @ execute it 8161 8162 /* 8163 * Throw an exception indicating that we have not implemented this 8164 * mode of filled-new-array. 8165 */ 8166.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8167 ldr r0, .L_strInternalError 8168 ldr r1, .L_strFilledNewArrayNotImpl 8169 bl dvmThrowException 8170 b common_exceptionThrown 8171 8172 .if (!1) @ define in one or the other, not both 8173.L_strFilledNewArrayNotImpl: 8174 .word .LstrFilledNewArrayNotImpl 8175.L_strInternalError: 8176 .word .LstrInternalError 8177 .endif 8178 8179/* continuation for OP_CMPL_FLOAT */ 8180.LOP_CMPL_FLOAT_finish: 8181 SET_VREG(r0, r9) @ vAA<- r0 8182 GOTO_OPCODE(ip) @ jump to next instruction 8183 8184/* continuation for OP_CMPG_FLOAT */ 8185.LOP_CMPG_FLOAT_finish: 8186 SET_VREG(r0, r9) @ vAA<- r0 8187 GOTO_OPCODE(ip) @ jump to next instruction 8188 8189/* continuation for OP_CMPL_DOUBLE */ 8190.LOP_CMPL_DOUBLE_finish: 8191 SET_VREG(r0, r9) @ vAA<- r0 8192 GOTO_OPCODE(ip) @ jump to next instruction 8193 8194/* continuation for OP_CMPG_DOUBLE */ 8195.LOP_CMPG_DOUBLE_finish: 8196 SET_VREG(r0, r9) @ vAA<- r0 8197 GOTO_OPCODE(ip) @ jump to next instruction 8198 8199/* continuation for OP_CMP_LONG */ 8200 8201.LOP_CMP_LONG_less: 8202 mvn r1, #0 @ r1<- -1 8203 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8204 @ instead, we just replicate the tail end. 8205 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8206 SET_VREG(r1, r9) @ vAA<- r1 8207 GET_INST_OPCODE(ip) @ extract opcode from rINST 8208 GOTO_OPCODE(ip) @ jump to next instruction 8209 8210.LOP_CMP_LONG_greater: 8211 mov r1, #1 @ r1<- 1 8212 @ fall through to _finish 8213 8214.LOP_CMP_LONG_finish: 8215 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8216 SET_VREG(r1, r9) @ vAA<- r1 8217 GET_INST_OPCODE(ip) @ extract opcode from rINST 8218 GOTO_OPCODE(ip) @ jump to next instruction 8219 8220/* continuation for OP_AGET_WIDE */ 8221 8222.LOP_AGET_WIDE_finish: 8223 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8224 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8225 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8226 GET_INST_OPCODE(ip) @ extract opcode from rINST 8227 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8228 GOTO_OPCODE(ip) @ jump to next instruction 8229 8230/* continuation for OP_APUT_WIDE */ 8231 8232.LOP_APUT_WIDE_finish: 8233 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8234 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8235 GET_INST_OPCODE(ip) @ extract opcode from rINST 8236 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8237 GOTO_OPCODE(ip) @ jump to next instruction 8238 8239/* continuation for OP_APUT_OBJECT */ 8240 /* 8241 * On entry: 8242 * r1 = vBB (arrayObj) 8243 * r9 = vAA (obj) 8244 * r10 = offset into array (vBB + vCC * width) 8245 */ 8246.LOP_APUT_OBJECT_finish: 8247 cmp r9, #0 @ storing null reference? 8248 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8249 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8250 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8251 bl dvmCanPutArrayElement @ test object type vs. array type 8252 cmp r0, #0 @ okay? 8253 beq common_errArrayStore @ no 8254.LOP_APUT_OBJECT_skip_check: 8255 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8256 GET_INST_OPCODE(ip) @ extract opcode from rINST 8257 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8258 GOTO_OPCODE(ip) @ jump to next instruction 8259 8260/* continuation for OP_IGET */ 8261 8262 /* 8263 * Currently: 8264 * r0 holds resolved field 8265 * r9 holds object 8266 */ 8267.LOP_IGET_finish: 8268 @bl common_squeak0 8269 cmp r9, #0 @ check object for null 8270 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8271 beq common_errNullObject @ object was null 8272 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8273 @ no-op @ acquiring load 8274 mov r2, rINST, lsr #8 @ r2<- A+ 8275 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8276 and r2, r2, #15 @ r2<- A 8277 GET_INST_OPCODE(ip) @ extract opcode from rINST 8278 SET_VREG(r0, r2) @ fp[A]<- r0 8279 GOTO_OPCODE(ip) @ jump to next instruction 8280 8281/* continuation for OP_IGET_WIDE */ 8282 8283 /* 8284 * Currently: 8285 * r0 holds resolved field 8286 * r9 holds object 8287 */ 8288.LOP_IGET_WIDE_finish: 8289 cmp r9, #0 @ check object for null 8290 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8291 beq common_errNullObject @ object was null 8292 .if 0 8293 add r0, r9, r3 @ r0<- address of field 8294 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 8295 .else 8296 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8297 .endif 8298 mov r2, rINST, lsr #8 @ r2<- A+ 8299 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8300 and r2, r2, #15 @ r2<- A 8301 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8302 GET_INST_OPCODE(ip) @ extract opcode from rINST 8303 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8304 GOTO_OPCODE(ip) @ jump to next instruction 8305 8306/* continuation for OP_IGET_OBJECT */ 8307 8308 /* 8309 * Currently: 8310 * r0 holds resolved field 8311 * r9 holds object 8312 */ 8313.LOP_IGET_OBJECT_finish: 8314 @bl common_squeak0 8315 cmp r9, #0 @ check object for null 8316 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8317 beq common_errNullObject @ object was null 8318 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8319 @ no-op @ acquiring load 8320 mov r2, rINST, lsr #8 @ r2<- A+ 8321 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8322 and r2, r2, #15 @ r2<- A 8323 GET_INST_OPCODE(ip) @ extract opcode from rINST 8324 SET_VREG(r0, r2) @ fp[A]<- r0 8325 GOTO_OPCODE(ip) @ jump to next instruction 8326 8327/* continuation for OP_IGET_BOOLEAN */ 8328 8329 /* 8330 * Currently: 8331 * r0 holds resolved field 8332 * r9 holds object 8333 */ 8334.LOP_IGET_BOOLEAN_finish: 8335 @bl common_squeak1 8336 cmp r9, #0 @ check object for null 8337 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8338 beq common_errNullObject @ object was null 8339 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8340 @ no-op @ acquiring load 8341 mov r2, rINST, lsr #8 @ r2<- A+ 8342 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8343 and r2, r2, #15 @ r2<- A 8344 GET_INST_OPCODE(ip) @ extract opcode from rINST 8345 SET_VREG(r0, r2) @ fp[A]<- r0 8346 GOTO_OPCODE(ip) @ jump to next instruction 8347 8348/* continuation for OP_IGET_BYTE */ 8349 8350 /* 8351 * Currently: 8352 * r0 holds resolved field 8353 * r9 holds object 8354 */ 8355.LOP_IGET_BYTE_finish: 8356 @bl common_squeak2 8357 cmp r9, #0 @ check object for null 8358 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8359 beq common_errNullObject @ object was null 8360 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8361 @ no-op @ acquiring load 8362 mov r2, rINST, lsr #8 @ r2<- A+ 8363 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8364 and r2, r2, #15 @ r2<- A 8365 GET_INST_OPCODE(ip) @ extract opcode from rINST 8366 SET_VREG(r0, r2) @ fp[A]<- r0 8367 GOTO_OPCODE(ip) @ jump to next instruction 8368 8369/* continuation for OP_IGET_CHAR */ 8370 8371 /* 8372 * Currently: 8373 * r0 holds resolved field 8374 * r9 holds object 8375 */ 8376.LOP_IGET_CHAR_finish: 8377 @bl common_squeak3 8378 cmp r9, #0 @ check object for null 8379 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8380 beq common_errNullObject @ object was null 8381 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8382 @ no-op @ acquiring load 8383 mov r2, rINST, lsr #8 @ r2<- A+ 8384 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8385 and r2, r2, #15 @ r2<- A 8386 GET_INST_OPCODE(ip) @ extract opcode from rINST 8387 SET_VREG(r0, r2) @ fp[A]<- r0 8388 GOTO_OPCODE(ip) @ jump to next instruction 8389 8390/* continuation for OP_IGET_SHORT */ 8391 8392 /* 8393 * Currently: 8394 * r0 holds resolved field 8395 * r9 holds object 8396 */ 8397.LOP_IGET_SHORT_finish: 8398 @bl common_squeak4 8399 cmp r9, #0 @ check object for null 8400 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8401 beq common_errNullObject @ object was null 8402 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8403 @ no-op @ acquiring load 8404 mov r2, rINST, lsr #8 @ r2<- A+ 8405 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8406 and r2, r2, #15 @ r2<- A 8407 GET_INST_OPCODE(ip) @ extract opcode from rINST 8408 SET_VREG(r0, r2) @ fp[A]<- r0 8409 GOTO_OPCODE(ip) @ jump to next instruction 8410 8411/* continuation for OP_IPUT */ 8412 8413 /* 8414 * Currently: 8415 * r0 holds resolved field 8416 * r9 holds object 8417 */ 8418.LOP_IPUT_finish: 8419 @bl common_squeak0 8420 mov r1, rINST, lsr #8 @ r1<- A+ 8421 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8422 and r1, r1, #15 @ r1<- A 8423 cmp r9, #0 @ check object for null 8424 GET_VREG(r0, r1) @ r0<- fp[A] 8425 beq common_errNullObject @ object was null 8426 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8427 GET_INST_OPCODE(ip) @ extract opcode from rINST 8428 @ no-op @ releasing store 8429 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8430 GOTO_OPCODE(ip) @ jump to next instruction 8431 8432/* continuation for OP_IPUT_WIDE */ 8433 8434 /* 8435 * Currently: 8436 * r0 holds resolved field 8437 * r9 holds object 8438 */ 8439.LOP_IPUT_WIDE_finish: 8440 mov r2, rINST, lsr #8 @ r2<- A+ 8441 cmp r9, #0 @ check object for null 8442 and r2, r2, #15 @ r2<- A 8443 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8444 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8445 beq common_errNullObject @ object was null 8446 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8447 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8448 GET_INST_OPCODE(r10) @ extract opcode from rINST 8449 .if 0 8450 add r2, r9, r3 @ r2<- target address 8451 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 8452 .else 8453 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 8454 .endif 8455 GOTO_OPCODE(r10) @ jump to next instruction 8456 8457/* continuation for OP_IPUT_OBJECT */ 8458 8459 /* 8460 * Currently: 8461 * r0 holds resolved field 8462 * r9 holds object 8463 */ 8464.LOP_IPUT_OBJECT_finish: 8465 @bl common_squeak0 8466 mov r1, rINST, lsr #8 @ r1<- A+ 8467 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8468 and r1, r1, #15 @ r1<- A 8469 cmp r9, #0 @ check object for null 8470 GET_VREG(r0, r1) @ r0<- fp[A] 8471 beq common_errNullObject @ object was null 8472 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8473 GET_INST_OPCODE(ip) @ extract opcode from rINST 8474 @ no-op @ releasing store 8475 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8476 GOTO_OPCODE(ip) @ jump to next instruction 8477 8478/* continuation for OP_IPUT_BOOLEAN */ 8479 8480 /* 8481 * Currently: 8482 * r0 holds resolved field 8483 * r9 holds object 8484 */ 8485.LOP_IPUT_BOOLEAN_finish: 8486 @bl common_squeak1 8487 mov r1, rINST, lsr #8 @ r1<- A+ 8488 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8489 and r1, r1, #15 @ r1<- A 8490 cmp r9, #0 @ check object for null 8491 GET_VREG(r0, r1) @ r0<- fp[A] 8492 beq common_errNullObject @ object was null 8493 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8494 GET_INST_OPCODE(ip) @ extract opcode from rINST 8495 @ no-op @ releasing store 8496 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8497 GOTO_OPCODE(ip) @ jump to next instruction 8498 8499/* continuation for OP_IPUT_BYTE */ 8500 8501 /* 8502 * Currently: 8503 * r0 holds resolved field 8504 * r9 holds object 8505 */ 8506.LOP_IPUT_BYTE_finish: 8507 @bl common_squeak2 8508 mov r1, rINST, lsr #8 @ r1<- A+ 8509 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8510 and r1, r1, #15 @ r1<- A 8511 cmp r9, #0 @ check object for null 8512 GET_VREG(r0, r1) @ r0<- fp[A] 8513 beq common_errNullObject @ object was null 8514 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8515 GET_INST_OPCODE(ip) @ extract opcode from rINST 8516 @ no-op @ releasing store 8517 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8518 GOTO_OPCODE(ip) @ jump to next instruction 8519 8520/* continuation for OP_IPUT_CHAR */ 8521 8522 /* 8523 * Currently: 8524 * r0 holds resolved field 8525 * r9 holds object 8526 */ 8527.LOP_IPUT_CHAR_finish: 8528 @bl common_squeak3 8529 mov r1, rINST, lsr #8 @ r1<- A+ 8530 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8531 and r1, r1, #15 @ r1<- A 8532 cmp r9, #0 @ check object for null 8533 GET_VREG(r0, r1) @ r0<- fp[A] 8534 beq common_errNullObject @ object was null 8535 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8536 GET_INST_OPCODE(ip) @ extract opcode from rINST 8537 @ no-op @ releasing store 8538 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8539 GOTO_OPCODE(ip) @ jump to next instruction 8540 8541/* continuation for OP_IPUT_SHORT */ 8542 8543 /* 8544 * Currently: 8545 * r0 holds resolved field 8546 * r9 holds object 8547 */ 8548.LOP_IPUT_SHORT_finish: 8549 @bl common_squeak4 8550 mov r1, rINST, lsr #8 @ r1<- A+ 8551 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8552 and r1, r1, #15 @ r1<- A 8553 cmp r9, #0 @ check object for null 8554 GET_VREG(r0, r1) @ r0<- fp[A] 8555 beq common_errNullObject @ object was null 8556 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8557 GET_INST_OPCODE(ip) @ extract opcode from rINST 8558 @ no-op @ releasing store 8559 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8560 GOTO_OPCODE(ip) @ jump to next instruction 8561 8562/* continuation for OP_SGET */ 8563 8564 /* 8565 * Continuation if the field has not yet been resolved. 8566 * r1: BBBB field ref 8567 */ 8568.LOP_SGET_resolve: 8569 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8570 EXPORT_PC() @ resolve() could throw, so export now 8571 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8572 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8573 cmp r0, #0 @ success? 8574 bne .LOP_SGET_finish @ yes, finish 8575 b common_exceptionThrown @ no, handle exception 8576 8577/* continuation for OP_SGET_WIDE */ 8578 8579 /* 8580 * Continuation if the field has not yet been resolved. 8581 * r1: BBBB field ref 8582 * 8583 * Returns StaticField pointer in r0. 8584 */ 8585.LOP_SGET_WIDE_resolve: 8586 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8587 EXPORT_PC() @ resolve() could throw, so export now 8588 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8589 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8590 cmp r0, #0 @ success? 8591 bne .LOP_SGET_WIDE_finish @ yes, finish 8592 b common_exceptionThrown @ no, handle exception 8593 8594/* continuation for OP_SGET_OBJECT */ 8595 8596 /* 8597 * Continuation if the field has not yet been resolved. 8598 * r1: BBBB field ref 8599 */ 8600.LOP_SGET_OBJECT_resolve: 8601 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8602 EXPORT_PC() @ resolve() could throw, so export now 8603 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8604 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8605 cmp r0, #0 @ success? 8606 bne .LOP_SGET_OBJECT_finish @ yes, finish 8607 b common_exceptionThrown @ no, handle exception 8608 8609/* continuation for OP_SGET_BOOLEAN */ 8610 8611 /* 8612 * Continuation if the field has not yet been resolved. 8613 * r1: BBBB field ref 8614 */ 8615.LOP_SGET_BOOLEAN_resolve: 8616 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8617 EXPORT_PC() @ resolve() could throw, so export now 8618 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8619 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8620 cmp r0, #0 @ success? 8621 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8622 b common_exceptionThrown @ no, handle exception 8623 8624/* continuation for OP_SGET_BYTE */ 8625 8626 /* 8627 * Continuation if the field has not yet been resolved. 8628 * r1: BBBB field ref 8629 */ 8630.LOP_SGET_BYTE_resolve: 8631 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8632 EXPORT_PC() @ resolve() could throw, so export now 8633 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8634 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8635 cmp r0, #0 @ success? 8636 bne .LOP_SGET_BYTE_finish @ yes, finish 8637 b common_exceptionThrown @ no, handle exception 8638 8639/* continuation for OP_SGET_CHAR */ 8640 8641 /* 8642 * Continuation if the field has not yet been resolved. 8643 * r1: BBBB field ref 8644 */ 8645.LOP_SGET_CHAR_resolve: 8646 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8647 EXPORT_PC() @ resolve() could throw, so export now 8648 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8649 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8650 cmp r0, #0 @ success? 8651 bne .LOP_SGET_CHAR_finish @ yes, finish 8652 b common_exceptionThrown @ no, handle exception 8653 8654/* continuation for OP_SGET_SHORT */ 8655 8656 /* 8657 * Continuation if the field has not yet been resolved. 8658 * r1: BBBB field ref 8659 */ 8660.LOP_SGET_SHORT_resolve: 8661 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8662 EXPORT_PC() @ resolve() could throw, so export now 8663 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8664 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8665 cmp r0, #0 @ success? 8666 bne .LOP_SGET_SHORT_finish @ yes, finish 8667 b common_exceptionThrown @ no, handle exception 8668 8669/* continuation for OP_SPUT */ 8670 8671 /* 8672 * Continuation if the field has not yet been resolved. 8673 * r1: BBBB field ref 8674 */ 8675.LOP_SPUT_resolve: 8676 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8677 EXPORT_PC() @ resolve() could throw, so export now 8678 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8679 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8680 cmp r0, #0 @ success? 8681 bne .LOP_SPUT_finish @ yes, finish 8682 b common_exceptionThrown @ no, handle exception 8683 8684/* continuation for OP_SPUT_WIDE */ 8685 8686 /* 8687 * Continuation if the field has not yet been resolved. 8688 * r1: BBBB field ref 8689 * r9: &fp[AA] 8690 * 8691 * Returns StaticField pointer in r2. 8692 */ 8693.LOP_SPUT_WIDE_resolve: 8694 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8695 EXPORT_PC() @ resolve() could throw, so export now 8696 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8697 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8698 cmp r0, #0 @ success? 8699 mov r2, r0 @ copy to r2 8700 bne .LOP_SPUT_WIDE_finish @ yes, finish 8701 b common_exceptionThrown @ no, handle exception 8702 8703/* continuation for OP_SPUT_OBJECT */ 8704 8705 /* 8706 * Continuation if the field has not yet been resolved. 8707 * r1: BBBB field ref 8708 */ 8709.LOP_SPUT_OBJECT_resolve: 8710 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8711 EXPORT_PC() @ resolve() could throw, so export now 8712 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8713 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8714 cmp r0, #0 @ success? 8715 bne .LOP_SPUT_OBJECT_finish @ yes, finish 8716 b common_exceptionThrown @ no, handle exception 8717 8718/* continuation for OP_SPUT_BOOLEAN */ 8719 8720 /* 8721 * Continuation if the field has not yet been resolved. 8722 * r1: BBBB field ref 8723 */ 8724.LOP_SPUT_BOOLEAN_resolve: 8725 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8726 EXPORT_PC() @ resolve() could throw, so export now 8727 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8728 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8729 cmp r0, #0 @ success? 8730 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8731 b common_exceptionThrown @ no, handle exception 8732 8733/* continuation for OP_SPUT_BYTE */ 8734 8735 /* 8736 * Continuation if the field has not yet been resolved. 8737 * r1: BBBB field ref 8738 */ 8739.LOP_SPUT_BYTE_resolve: 8740 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8741 EXPORT_PC() @ resolve() could throw, so export now 8742 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8743 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8744 cmp r0, #0 @ success? 8745 bne .LOP_SPUT_BYTE_finish @ yes, finish 8746 b common_exceptionThrown @ no, handle exception 8747 8748/* continuation for OP_SPUT_CHAR */ 8749 8750 /* 8751 * Continuation if the field has not yet been resolved. 8752 * r1: BBBB field ref 8753 */ 8754.LOP_SPUT_CHAR_resolve: 8755 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8756 EXPORT_PC() @ resolve() could throw, so export now 8757 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8758 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8759 cmp r0, #0 @ success? 8760 bne .LOP_SPUT_CHAR_finish @ yes, finish 8761 b common_exceptionThrown @ no, handle exception 8762 8763/* continuation for OP_SPUT_SHORT */ 8764 8765 /* 8766 * Continuation if the field has not yet been resolved. 8767 * r1: BBBB field ref 8768 */ 8769.LOP_SPUT_SHORT_resolve: 8770 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8771 EXPORT_PC() @ resolve() could throw, so export now 8772 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8773 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8774 cmp r0, #0 @ success? 8775 bne .LOP_SPUT_SHORT_finish @ yes, finish 8776 b common_exceptionThrown @ no, handle exception 8777 8778/* continuation for OP_INVOKE_VIRTUAL */ 8779 8780 /* 8781 * At this point: 8782 * r0 = resolved base method 8783 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8784 */ 8785.LOP_INVOKE_VIRTUAL_continue: 8786 GET_VREG(r1, r10) @ r1<- "this" ptr 8787 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8788 cmp r1, #0 @ is "this" null? 8789 beq common_errNullObject @ null "this", throw exception 8790 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8791 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8792 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8793 bl common_invokeMethodNoRange @ continue on 8794 8795/* continuation for OP_INVOKE_SUPER */ 8796 8797 /* 8798 * At this point: 8799 * r0 = resolved base method 8800 * r9 = method->clazz 8801 */ 8802.LOP_INVOKE_SUPER_continue: 8803 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8804 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8805 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8806 EXPORT_PC() @ must export for invoke 8807 cmp r2, r3 @ compare (methodIndex, vtableCount) 8808 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8809 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8810 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8811 bl common_invokeMethodNoRange @ continue on 8812 8813.LOP_INVOKE_SUPER_resolve: 8814 mov r0, r9 @ r0<- method->clazz 8815 mov r2, #METHOD_VIRTUAL @ resolver method type 8816 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8817 cmp r0, #0 @ got null? 8818 bne .LOP_INVOKE_SUPER_continue @ no, continue 8819 b common_exceptionThrown @ yes, handle exception 8820 8821 /* 8822 * Throw a NoSuchMethodError with the method name as the message. 8823 * r0 = resolved base method 8824 */ 8825.LOP_INVOKE_SUPER_nsm: 8826 ldr r1, [r0, #offMethod_name] @ r1<- method name 8827 b common_errNoSuchMethod 8828 8829/* continuation for OP_INVOKE_DIRECT */ 8830 8831 /* 8832 * On entry: 8833 * r1 = reference (BBBB or CCCC) 8834 * r10 = "this" register 8835 */ 8836.LOP_INVOKE_DIRECT_resolve: 8837 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8838 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8839 mov r2, #METHOD_DIRECT @ resolver method type 8840 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8841 cmp r0, #0 @ got null? 8842 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8843 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8844 b common_exceptionThrown @ yes, handle exception 8845 8846/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8847 8848 /* 8849 * At this point: 8850 * r0 = resolved base method 8851 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8852 */ 8853.LOP_INVOKE_VIRTUAL_RANGE_continue: 8854 GET_VREG(r1, r10) @ r1<- "this" ptr 8855 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8856 cmp r1, #0 @ is "this" null? 8857 beq common_errNullObject @ null "this", throw exception 8858 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8859 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8860 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8861 bl common_invokeMethodRange @ continue on 8862 8863/* continuation for OP_INVOKE_SUPER_RANGE */ 8864 8865 /* 8866 * At this point: 8867 * r0 = resolved base method 8868 * r9 = method->clazz 8869 */ 8870.LOP_INVOKE_SUPER_RANGE_continue: 8871 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8872 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8873 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8874 EXPORT_PC() @ must export for invoke 8875 cmp r2, r3 @ compare (methodIndex, vtableCount) 8876 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8877 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8878 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8879 bl common_invokeMethodRange @ continue on 8880 8881.LOP_INVOKE_SUPER_RANGE_resolve: 8882 mov r0, r9 @ r0<- method->clazz 8883 mov r2, #METHOD_VIRTUAL @ resolver method type 8884 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8885 cmp r0, #0 @ got null? 8886 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8887 b common_exceptionThrown @ yes, handle exception 8888 8889 /* 8890 * Throw a NoSuchMethodError with the method name as the message. 8891 * r0 = resolved base method 8892 */ 8893.LOP_INVOKE_SUPER_RANGE_nsm: 8894 ldr r1, [r0, #offMethod_name] @ r1<- method name 8895 b common_errNoSuchMethod 8896 8897/* continuation for OP_INVOKE_DIRECT_RANGE */ 8898 8899 /* 8900 * On entry: 8901 * r1 = reference (BBBB or CCCC) 8902 * r10 = "this" register 8903 */ 8904.LOP_INVOKE_DIRECT_RANGE_resolve: 8905 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8906 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8907 mov r2, #METHOD_DIRECT @ resolver method type 8908 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8909 cmp r0, #0 @ got null? 8910 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8911 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8912 b common_exceptionThrown @ yes, handle exception 8913 8914/* continuation for OP_FLOAT_TO_LONG */ 8915/* 8916 * Convert the float in r0 to a long in r0/r1. 8917 * 8918 * We have to clip values to long min/max per the specification. The 8919 * expected common case is a "reasonable" value that converts directly 8920 * to modest integer. The EABI convert function isn't doing this for us. 8921 */ 8922f2l_doconv: 8923 stmfd sp!, {r4, lr} 8924 mov r1, #0x5f000000 @ (float)maxlong 8925 mov r4, r0 8926 bl __aeabi_fcmpge @ is arg >= maxlong? 8927 cmp r0, #0 @ nonzero == yes 8928 mvnne r0, #0 @ return maxlong (7fffffff) 8929 mvnne r1, #0x80000000 8930 ldmnefd sp!, {r4, pc} 8931 8932 mov r0, r4 @ recover arg 8933 mov r1, #0xdf000000 @ (float)minlong 8934 bl __aeabi_fcmple @ is arg <= minlong? 8935 cmp r0, #0 @ nonzero == yes 8936 movne r0, #0 @ return minlong (80000000) 8937 movne r1, #0x80000000 8938 ldmnefd sp!, {r4, pc} 8939 8940 mov r0, r4 @ recover arg 8941 mov r1, r4 8942 bl __aeabi_fcmpeq @ is arg == self? 8943 cmp r0, #0 @ zero == no 8944 moveq r1, #0 @ return zero for NaN 8945 ldmeqfd sp!, {r4, pc} 8946 8947 mov r0, r4 @ recover arg 8948 bl __aeabi_f2lz @ convert float to long 8949 ldmfd sp!, {r4, pc} 8950 8951/* continuation for OP_DOUBLE_TO_LONG */ 8952/* 8953 * Convert the double in r0/r1 to a long in r0/r1. 8954 * 8955 * We have to clip values to long min/max per the specification. The 8956 * expected common case is a "reasonable" value that converts directly 8957 * to modest integer. The EABI convert function isn't doing this for us. 8958 */ 8959d2l_doconv: 8960 stmfd sp!, {r4, r5, lr} @ save regs 8961 mov r3, #0x43000000 @ maxlong, as a double (high word) 8962 add r3, #0x00e00000 @ 0x43e00000 8963 mov r2, #0 @ maxlong, as a double (low word) 8964 sub sp, sp, #4 @ align for EABI 8965 mov r4, r0 @ save a copy of r0 8966 mov r5, r1 @ and r1 8967 bl __aeabi_dcmpge @ is arg >= maxlong? 8968 cmp r0, #0 @ nonzero == yes 8969 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8970 mvnne r1, #0x80000000 8971 bne 1f 8972 8973 mov r0, r4 @ recover arg 8974 mov r1, r5 8975 mov r3, #0xc3000000 @ minlong, as a double (high word) 8976 add r3, #0x00e00000 @ 0xc3e00000 8977 mov r2, #0 @ minlong, as a double (low word) 8978 bl __aeabi_dcmple @ is arg <= minlong? 8979 cmp r0, #0 @ nonzero == yes 8980 movne r0, #0 @ return minlong (8000000000000000) 8981 movne r1, #0x80000000 8982 bne 1f 8983 8984 mov r0, r4 @ recover arg 8985 mov r1, r5 8986 mov r2, r4 @ compare against self 8987 mov r3, r5 8988 bl __aeabi_dcmpeq @ is arg == self? 8989 cmp r0, #0 @ zero == no 8990 moveq r1, #0 @ return zero for NaN 8991 beq 1f 8992 8993 mov r0, r4 @ recover arg 8994 mov r1, r5 8995 bl __aeabi_d2lz @ convert double to long 8996 89971: 8998 add sp, sp, #4 8999 ldmfd sp!, {r4, r5, pc} 9000 9001/* continuation for OP_MUL_LONG */ 9002 9003.LOP_MUL_LONG_finish: 9004 GET_INST_OPCODE(ip) @ extract opcode from rINST 9005 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9006 GOTO_OPCODE(ip) @ jump to next instruction 9007 9008/* continuation for OP_SHL_LONG */ 9009 9010.LOP_SHL_LONG_finish: 9011 mov r0, r0, asl r2 @ r0<- r0 << r2 9012 GET_INST_OPCODE(ip) @ extract opcode from rINST 9013 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9014 GOTO_OPCODE(ip) @ jump to next instruction 9015 9016/* continuation for OP_SHR_LONG */ 9017 9018.LOP_SHR_LONG_finish: 9019 mov r1, r1, asr r2 @ r1<- r1 >> r2 9020 GET_INST_OPCODE(ip) @ extract opcode from rINST 9021 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9022 GOTO_OPCODE(ip) @ jump to next instruction 9023 9024/* continuation for OP_USHR_LONG */ 9025 9026.LOP_USHR_LONG_finish: 9027 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9028 GET_INST_OPCODE(ip) @ extract opcode from rINST 9029 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9030 GOTO_OPCODE(ip) @ jump to next instruction 9031 9032/* continuation for OP_SHL_LONG_2ADDR */ 9033 9034.LOP_SHL_LONG_2ADDR_finish: 9035 GET_INST_OPCODE(ip) @ extract opcode from rINST 9036 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9037 GOTO_OPCODE(ip) @ jump to next instruction 9038 9039/* continuation for OP_SHR_LONG_2ADDR */ 9040 9041.LOP_SHR_LONG_2ADDR_finish: 9042 GET_INST_OPCODE(ip) @ extract opcode from rINST 9043 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9044 GOTO_OPCODE(ip) @ jump to next instruction 9045 9046/* continuation for OP_USHR_LONG_2ADDR */ 9047 9048.LOP_USHR_LONG_2ADDR_finish: 9049 GET_INST_OPCODE(ip) @ extract opcode from rINST 9050 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9051 GOTO_OPCODE(ip) @ jump to next instruction 9052 9053/* continuation for OP_IGET_VOLATILE */ 9054 9055 /* 9056 * Currently: 9057 * r0 holds resolved field 9058 * r9 holds object 9059 */ 9060.LOP_IGET_VOLATILE_finish: 9061 @bl common_squeak0 9062 cmp r9, #0 @ check object for null 9063 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9064 beq common_errNullObject @ object was null 9065 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9066 SMP_DMB @ acquiring load 9067 mov r2, rINST, lsr #8 @ r2<- A+ 9068 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9069 and r2, r2, #15 @ r2<- A 9070 GET_INST_OPCODE(ip) @ extract opcode from rINST 9071 SET_VREG(r0, r2) @ fp[A]<- r0 9072 GOTO_OPCODE(ip) @ jump to next instruction 9073 9074/* continuation for OP_IPUT_VOLATILE */ 9075 9076 /* 9077 * Currently: 9078 * r0 holds resolved field 9079 * r9 holds object 9080 */ 9081.LOP_IPUT_VOLATILE_finish: 9082 @bl common_squeak0 9083 mov r1, rINST, lsr #8 @ r1<- A+ 9084 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9085 and r1, r1, #15 @ r1<- A 9086 cmp r9, #0 @ check object for null 9087 GET_VREG(r0, r1) @ r0<- fp[A] 9088 beq common_errNullObject @ object was null 9089 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9090 GET_INST_OPCODE(ip) @ extract opcode from rINST 9091 SMP_DMB @ releasing store 9092 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9093 GOTO_OPCODE(ip) @ jump to next instruction 9094 9095/* continuation for OP_SGET_VOLATILE */ 9096 9097 /* 9098 * Continuation if the field has not yet been resolved. 9099 * r1: BBBB field ref 9100 */ 9101.LOP_SGET_VOLATILE_resolve: 9102 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9103 EXPORT_PC() @ resolve() could throw, so export now 9104 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9105 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9106 cmp r0, #0 @ success? 9107 bne .LOP_SGET_VOLATILE_finish @ yes, finish 9108 b common_exceptionThrown @ no, handle exception 9109 9110/* continuation for OP_SPUT_VOLATILE */ 9111 9112 /* 9113 * Continuation if the field has not yet been resolved. 9114 * r1: BBBB field ref 9115 */ 9116.LOP_SPUT_VOLATILE_resolve: 9117 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9118 EXPORT_PC() @ resolve() could throw, so export now 9119 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9120 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9121 cmp r0, #0 @ success? 9122 bne .LOP_SPUT_VOLATILE_finish @ yes, finish 9123 b common_exceptionThrown @ no, handle exception 9124 9125/* continuation for OP_IGET_OBJECT_VOLATILE */ 9126 9127 /* 9128 * Currently: 9129 * r0 holds resolved field 9130 * r9 holds object 9131 */ 9132.LOP_IGET_OBJECT_VOLATILE_finish: 9133 @bl common_squeak0 9134 cmp r9, #0 @ check object for null 9135 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9136 beq common_errNullObject @ object was null 9137 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9138 SMP_DMB @ acquiring load 9139 mov r2, rINST, lsr #8 @ r2<- A+ 9140 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9141 and r2, r2, #15 @ r2<- A 9142 GET_INST_OPCODE(ip) @ extract opcode from rINST 9143 SET_VREG(r0, r2) @ fp[A]<- r0 9144 GOTO_OPCODE(ip) @ jump to next instruction 9145 9146/* continuation for OP_IGET_WIDE_VOLATILE */ 9147 9148 /* 9149 * Currently: 9150 * r0 holds resolved field 9151 * r9 holds object 9152 */ 9153.LOP_IGET_WIDE_VOLATILE_finish: 9154 cmp r9, #0 @ check object for null 9155 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9156 beq common_errNullObject @ object was null 9157 .if 1 9158 add r0, r9, r3 @ r0<- address of field 9159 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 9160 .else 9161 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9162 .endif 9163 mov r2, rINST, lsr #8 @ r2<- A+ 9164 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9165 and r2, r2, #15 @ r2<- A 9166 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9167 GET_INST_OPCODE(ip) @ extract opcode from rINST 9168 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9169 GOTO_OPCODE(ip) @ jump to next instruction 9170 9171/* continuation for OP_IPUT_WIDE_VOLATILE */ 9172 9173 /* 9174 * Currently: 9175 * r0 holds resolved field 9176 * r9 holds object 9177 */ 9178.LOP_IPUT_WIDE_VOLATILE_finish: 9179 mov r2, rINST, lsr #8 @ r2<- A+ 9180 cmp r9, #0 @ check object for null 9181 and r2, r2, #15 @ r2<- A 9182 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9183 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9184 beq common_errNullObject @ object was null 9185 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9186 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9187 GET_INST_OPCODE(r10) @ extract opcode from rINST 9188 .if 1 9189 add r2, r9, r3 @ r2<- target address 9190 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 9191 .else 9192 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9193 .endif 9194 GOTO_OPCODE(r10) @ jump to next instruction 9195 9196/* continuation for OP_SGET_WIDE_VOLATILE */ 9197 9198 /* 9199 * Continuation if the field has not yet been resolved. 9200 * r1: BBBB field ref 9201 * 9202 * Returns StaticField pointer in r0. 9203 */ 9204.LOP_SGET_WIDE_VOLATILE_resolve: 9205 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9206 EXPORT_PC() @ resolve() could throw, so export now 9207 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9208 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9209 cmp r0, #0 @ success? 9210 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9211 b common_exceptionThrown @ no, handle exception 9212 9213/* continuation for OP_SPUT_WIDE_VOLATILE */ 9214 9215 /* 9216 * Continuation if the field has not yet been resolved. 9217 * r1: BBBB field ref 9218 * r9: &fp[AA] 9219 * 9220 * Returns StaticField pointer in r2. 9221 */ 9222.LOP_SPUT_WIDE_VOLATILE_resolve: 9223 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9224 EXPORT_PC() @ resolve() could throw, so export now 9225 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9226 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9227 cmp r0, #0 @ success? 9228 mov r2, r0 @ copy to r2 9229 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9230 b common_exceptionThrown @ no, handle exception 9231 9232/* continuation for OP_EXECUTE_INLINE */ 9233 9234 /* 9235 * Extract args, call function. 9236 * r0 = #of args (0-4) 9237 * r10 = call index 9238 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9239 * 9240 * Other ideas: 9241 * - Use a jump table from the main piece to jump directly into the 9242 * AND/LDR pairs. Costs a data load, saves a branch. 9243 * - Have five separate pieces that do the loading, so we can work the 9244 * interleave a little better. Increases code size. 9245 */ 9246.LOP_EXECUTE_INLINE_continue: 9247 rsb r0, r0, #4 @ r0<- 4-r0 9248 FETCH(r9, 2) @ r9<- FEDC 9249 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9250 bl common_abort @ (skipped due to ARM prefetch) 92514: and ip, r9, #0xf000 @ isolate F 9252 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 92533: and ip, r9, #0x0f00 @ isolate E 9254 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 92552: and ip, r9, #0x00f0 @ isolate D 9256 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 92571: and ip, r9, #0x000f @ isolate C 9258 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 92590: 9260 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9261 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9262 @ (not reached) 9263 9264.LOP_EXECUTE_INLINE_table: 9265 .word gDvmInlineOpsTable 9266 9267/* continuation for OP_EXECUTE_INLINE_RANGE */ 9268 9269 /* 9270 * Extract args, call function. 9271 * r0 = #of args (0-4) 9272 * r10 = call index 9273 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9274 */ 9275.LOP_EXECUTE_INLINE_RANGE_continue: 9276 rsb r0, r0, #4 @ r0<- 4-r0 9277 FETCH(r9, 2) @ r9<- CCCC 9278 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9279 bl common_abort @ (skipped due to ARM prefetch) 92804: add ip, r9, #3 @ base+3 9281 GET_VREG(r3, ip) @ r3<- vBase[3] 92823: add ip, r9, #2 @ base+2 9283 GET_VREG(r2, ip) @ r2<- vBase[2] 92842: add ip, r9, #1 @ base+1 9285 GET_VREG(r1, ip) @ r1<- vBase[1] 92861: add ip, r9, #0 @ (nop) 9287 GET_VREG(r0, ip) @ r0<- vBase[0] 92880: 9289 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9290 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9291 @ (not reached) 9292 9293.LOP_EXECUTE_INLINE_RANGE_table: 9294 .word gDvmInlineOpsTable 9295 9296/* continuation for OP_IPUT_OBJECT_VOLATILE */ 9297 9298 /* 9299 * Currently: 9300 * r0 holds resolved field 9301 * r9 holds object 9302 */ 9303.LOP_IPUT_OBJECT_VOLATILE_finish: 9304 @bl common_squeak0 9305 mov r1, rINST, lsr #8 @ r1<- A+ 9306 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9307 and r1, r1, #15 @ r1<- A 9308 cmp r9, #0 @ check object for null 9309 GET_VREG(r0, r1) @ r0<- fp[A] 9310 beq common_errNullObject @ object was null 9311 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9312 GET_INST_OPCODE(ip) @ extract opcode from rINST 9313 SMP_DMB @ releasing store 9314 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9315 GOTO_OPCODE(ip) @ jump to next instruction 9316 9317/* continuation for OP_SGET_OBJECT_VOLATILE */ 9318 9319 /* 9320 * Continuation if the field has not yet been resolved. 9321 * r1: BBBB field ref 9322 */ 9323.LOP_SGET_OBJECT_VOLATILE_resolve: 9324 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9325 EXPORT_PC() @ resolve() could throw, so export now 9326 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9327 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9328 cmp r0, #0 @ success? 9329 bne .LOP_SGET_OBJECT_VOLATILE_finish @ yes, finish 9330 b common_exceptionThrown @ no, handle exception 9331 9332/* continuation for OP_SPUT_OBJECT_VOLATILE */ 9333 9334 /* 9335 * Continuation if the field has not yet been resolved. 9336 * r1: BBBB field ref 9337 */ 9338.LOP_SPUT_OBJECT_VOLATILE_resolve: 9339 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9340 EXPORT_PC() @ resolve() could throw, so export now 9341 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9342 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9343 cmp r0, #0 @ success? 9344 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ yes, finish 9345 b common_exceptionThrown @ no, handle exception 9346 9347 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9348 .global dvmAsmSisterEnd 9349dvmAsmSisterEnd: 9350 9351/* File: armv5te/footer.S */ 9352 9353/* 9354 * =========================================================================== 9355 * Common subroutines and data 9356 * =========================================================================== 9357 */ 9358 9359 9360 9361 .text 9362 .align 2 9363 9364#if defined(WITH_JIT) 9365#if defined(WITH_SELF_VERIFICATION) 9366 .global dvmJitToInterpPunt 9367dvmJitToInterpPunt: 9368 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9369 mov r2,#kSVSPunt @ r2<- interpreter entry point 9370 mov r3, #0 9371 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9372 b jitSVShadowRunEnd @ doesn't return 9373 9374 .global dvmJitToInterpSingleStep 9375dvmJitToInterpSingleStep: 9376 str lr,[rGLUE,#offGlue_jitResumeNPC] 9377 str r1,[rGLUE,#offGlue_jitResumeDPC] 9378 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9379 b jitSVShadowRunEnd @ doesn't return 9380 9381 .global dvmJitToInterpTraceSelectNoChain 9382dvmJitToInterpTraceSelectNoChain: 9383 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9384 mov r0,rPC @ pass our target PC 9385 mov r2,#kSVSTraceSelectNoChain @ r2<- interpreter entry point 9386 mov r3, #0 9387 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9388 b jitSVShadowRunEnd @ doesn't return 9389 9390 .global dvmJitToInterpTraceSelect 9391dvmJitToInterpTraceSelect: 9392 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9393 ldr r0,[lr, #-1] @ pass our target PC 9394 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9395 mov r3, #0 9396 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9397 b jitSVShadowRunEnd @ doesn't return 9398 9399 .global dvmJitToInterpBackwardBranch 9400dvmJitToInterpBackwardBranch: 9401 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9402 ldr r0,[lr, #-1] @ pass our target PC 9403 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9404 mov r3, #0 9405 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9406 b jitSVShadowRunEnd @ doesn't return 9407 9408 .global dvmJitToInterpNormal 9409dvmJitToInterpNormal: 9410 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9411 ldr r0,[lr, #-1] @ pass our target PC 9412 mov r2,#kSVSNormal @ r2<- interpreter entry point 9413 mov r3, #0 9414 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9415 b jitSVShadowRunEnd @ doesn't return 9416 9417 .global dvmJitToInterpNoChain 9418dvmJitToInterpNoChain: 9419 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9420 mov r0,rPC @ pass our target PC 9421 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9422 mov r3, #0 9423 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9424 b jitSVShadowRunEnd @ doesn't return 9425#else 9426/* 9427 * Return from the translation cache to the interpreter when the compiler is 9428 * having issues translating/executing a Dalvik instruction. We have to skip 9429 * the code cache lookup otherwise it is possible to indefinitely bouce 9430 * between the interpreter and the code cache if the instruction that fails 9431 * to be compiled happens to be at a trace start. 9432 */ 9433 .global dvmJitToInterpPunt 9434dvmJitToInterpPunt: 9435 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9436 mov rPC, r0 9437#if defined(WITH_JIT_TUNING) 9438 mov r0,lr 9439 bl dvmBumpPunt; 9440#endif 9441 EXPORT_PC() 9442 mov r0, #0 9443 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9444 adrl rIBASE, dvmAsmInstructionStart 9445 FETCH_INST() 9446 GET_INST_OPCODE(ip) 9447 GOTO_OPCODE(ip) 9448 9449/* 9450 * Return to the interpreter to handle a single instruction. 9451 * On entry: 9452 * r0 <= PC 9453 * r1 <= PC of resume instruction 9454 * lr <= resume point in translation 9455 */ 9456 .global dvmJitToInterpSingleStep 9457dvmJitToInterpSingleStep: 9458 str lr,[rGLUE,#offGlue_jitResumeNPC] 9459 str r1,[rGLUE,#offGlue_jitResumeDPC] 9460 mov r1,#kInterpEntryInstr 9461 @ enum is 4 byte in aapcs-EABI 9462 str r1, [rGLUE, #offGlue_entryPoint] 9463 mov rPC,r0 9464 EXPORT_PC() 9465 9466 adrl rIBASE, dvmAsmInstructionStart 9467 mov r2,#kJitSingleStep @ Ask for single step and then revert 9468 str r2,[rGLUE,#offGlue_jitState] 9469 mov r1,#1 @ set changeInterp to bail to debug interp 9470 b common_gotoBail 9471 9472/* 9473 * Return from the translation cache and immediately request 9474 * a translation for the exit target. Commonly used for callees. 9475 */ 9476 .global dvmJitToInterpTraceSelectNoChain 9477dvmJitToInterpTraceSelectNoChain: 9478#if defined(WITH_JIT_TUNING) 9479 bl dvmBumpNoChain 9480#endif 9481 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9482 mov r0,rPC 9483 bl dvmJitGetCodeAddr @ Is there a translation? 9484 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9485 mov r1, rPC @ arg1 of translation may need this 9486 mov lr, #0 @ in case target is HANDLER_INTERPRET 9487 cmp r0,#0 9488 bxne r0 @ continue native execution if so 9489 b 2f 9490 9491/* 9492 * Return from the translation cache and immediately request 9493 * a translation for the exit target. Commonly used following 9494 * invokes. 9495 */ 9496 .global dvmJitToInterpTraceSelect 9497dvmJitToInterpTraceSelect: 9498 ldr rPC,[lr, #-1] @ get our target PC 9499 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9500 add rINST,lr,#-5 @ save start of chain branch 9501 add rINST, #-4 @ .. which is 9 bytes back 9502 mov r0,rPC 9503 bl dvmJitGetCodeAddr @ Is there a translation? 9504 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9505 cmp r0,#0 9506 beq 2f 9507 mov r1,rINST 9508 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9509 mov r1, rPC @ arg1 of translation may need this 9510 mov lr, #0 @ in case target is HANDLER_INTERPRET 9511 cmp r0,#0 @ successful chain? 9512 bxne r0 @ continue native execution 9513 b toInterpreter @ didn't chain - resume with interpreter 9514 9515/* No translation, so request one if profiling isn't disabled*/ 95162: 9517 adrl rIBASE, dvmAsmInstructionStart 9518 GET_JIT_PROF_TABLE(r0) 9519 FETCH_INST() 9520 cmp r0, #0 9521 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9522 bne common_selectTrace 9523 GET_INST_OPCODE(ip) 9524 GOTO_OPCODE(ip) 9525 9526/* 9527 * Return from the translation cache to the interpreter. 9528 * The return was done with a BLX from thumb mode, and 9529 * the following 32-bit word contains the target rPC value. 9530 * Note that lr (r14) will have its low-order bit set to denote 9531 * its thumb-mode origin. 9532 * 9533 * We'll need to stash our lr origin away, recover the new 9534 * target and then check to see if there is a translation available 9535 * for our new target. If so, we do a translation chain and 9536 * go back to native execution. Otherwise, it's back to the 9537 * interpreter (after treating this entry as a potential 9538 * trace start). 9539 */ 9540 .global dvmJitToInterpNormal 9541dvmJitToInterpNormal: 9542 ldr rPC,[lr, #-1] @ get our target PC 9543 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9544 add rINST,lr,#-5 @ save start of chain branch 9545 add rINST,#-4 @ .. which is 9 bytes back 9546#if defined(WITH_JIT_TUNING) 9547 bl dvmBumpNormal 9548#endif 9549 mov r0,rPC 9550 bl dvmJitGetCodeAddr @ Is there a translation? 9551 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9552 cmp r0,#0 9553 beq toInterpreter @ go if not, otherwise do chain 9554 mov r1,rINST 9555 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9556 mov r1, rPC @ arg1 of translation may need this 9557 mov lr, #0 @ in case target is HANDLER_INTERPRET 9558 cmp r0,#0 @ successful chain? 9559 bxne r0 @ continue native execution 9560 b toInterpreter @ didn't chain - resume with interpreter 9561 9562/* 9563 * Return from the translation cache to the interpreter to do method invocation. 9564 * Check if translation exists for the callee, but don't chain to it. 9565 */ 9566 .global dvmJitToInterpNoChain 9567dvmJitToInterpNoChain: 9568#if defined(WITH_JIT_TUNING) 9569 bl dvmBumpNoChain 9570#endif 9571 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9572 mov r0,rPC 9573 bl dvmJitGetCodeAddr @ Is there a translation? 9574 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9575 mov r1, rPC @ arg1 of translation may need this 9576 mov lr, #0 @ in case target is HANDLER_INTERPRET 9577 cmp r0,#0 9578 bxne r0 @ continue native execution if so 9579#endif 9580 9581/* 9582 * No translation, restore interpreter regs and start interpreting. 9583 * rGLUE & rFP were preserved in the translated code, and rPC has 9584 * already been restored by the time we get here. We'll need to set 9585 * up rIBASE & rINST, and load the address of the JitTable into r0. 9586 */ 9587toInterpreter: 9588 EXPORT_PC() 9589 adrl rIBASE, dvmAsmInstructionStart 9590 FETCH_INST() 9591 GET_JIT_PROF_TABLE(r0) 9592 @ NOTE: intended fallthrough 9593/* 9594 * Common code to update potential trace start counter, and initiate 9595 * a trace-build if appropriate. On entry, rPC should point to the 9596 * next instruction to execute, and rINST should be already loaded with 9597 * the next opcode word, and r0 holds a pointer to the jit profile 9598 * table (pJitProfTable). 9599 */ 9600common_testUpdateProfile: 9601 cmp r0,#0 9602 GET_INST_OPCODE(ip) 9603 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9604 9605common_updateProfile: 9606 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9607 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9608 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9609 GET_INST_OPCODE(ip) 9610 subs r1,r1,#1 @ decrement counter 9611 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9612 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9613 9614/* 9615 * Here, we switch to the debug interpreter to request 9616 * trace selection. First, though, check to see if there 9617 * is already a native translation in place (and, if so, 9618 * jump to it now). 9619 */ 9620 GET_JIT_THRESHOLD(r1) 9621 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9622 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9623 EXPORT_PC() 9624 mov r0,rPC 9625 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9626 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9627 mov r1, rPC @ arg1 of translation may need this 9628 mov lr, #0 @ in case target is HANDLER_INTERPRET 9629 cmp r0,#0 9630#if !defined(WITH_SELF_VERIFICATION) 9631 bxne r0 @ jump to the translation 9632 mov r2,#kJitTSelectRequest @ ask for trace selection 9633 @ fall-through to common_selectTrace 9634#else 9635 moveq r2,#kJitTSelectRequest @ ask for trace selection 9636 beq common_selectTrace 9637 /* 9638 * At this point, we have a target translation. However, if 9639 * that translation is actually the interpret-only pseudo-translation 9640 * we want to treat it the same as no translation. 9641 */ 9642 mov r10, r0 @ save target 9643 bl dvmCompilerGetInterpretTemplate 9644 cmp r0, r10 @ special case? 9645 bne jitSVShadowRunStart @ set up self verification shadow space 9646 GET_INST_OPCODE(ip) 9647 GOTO_OPCODE(ip) 9648 /* no return */ 9649#endif 9650 9651/* 9652 * On entry: 9653 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9654 */ 9655common_selectTrace: 9656 str r2,[rGLUE,#offGlue_jitState] 9657 mov r2,#kInterpEntryInstr @ normal entry reason 9658 str r2,[rGLUE,#offGlue_entryPoint] 9659 mov r1,#1 @ set changeInterp 9660 b common_gotoBail 9661 9662#if defined(WITH_SELF_VERIFICATION) 9663/* 9664 * Save PC and registers to shadow memory for self verification mode 9665 * before jumping to native translation. 9666 * On entry: 9667 * rPC, rFP, rGLUE: the values that they should contain 9668 * r10: the address of the target translation. 9669 */ 9670jitSVShadowRunStart: 9671 mov r0,rPC @ r0<- program counter 9672 mov r1,rFP @ r1<- frame pointer 9673 mov r2,rGLUE @ r2<- InterpState pointer 9674 mov r3,r10 @ r3<- target translation 9675 bl dvmSelfVerificationSaveState @ save registers to shadow space 9676 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9677 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9678 bx r10 @ jump to the translation 9679 9680/* 9681 * Restore PC, registers, and interpState to original values 9682 * before jumping back to the interpreter. 9683 */ 9684jitSVShadowRunEnd: 9685 mov r1,rFP @ pass ending fp 9686 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9687 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9688 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9689 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9690 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9691 cmp r1,#0 @ check for punt condition 9692 beq 1f 9693 mov r2,#kJitSelfVerification @ ask for self verification 9694 str r2,[rGLUE,#offGlue_jitState] 9695 mov r2,#kInterpEntryInstr @ normal entry reason 9696 str r2,[rGLUE,#offGlue_entryPoint] 9697 mov r1,#1 @ set changeInterp 9698 b common_gotoBail 9699 97001: @ exit to interpreter without check 9701 EXPORT_PC() 9702 adrl rIBASE, dvmAsmInstructionStart 9703 FETCH_INST() 9704 GET_INST_OPCODE(ip) 9705 GOTO_OPCODE(ip) 9706#endif 9707 9708#endif 9709 9710/* 9711 * Common code when a backward branch is taken. 9712 * 9713 * TODO: we could avoid a branch by just setting r0 and falling through 9714 * into the common_periodicChecks code, and having a test on r0 at the 9715 * end determine if we should return to the caller or update & branch to 9716 * the next instr. 9717 * 9718 * On entry: 9719 * r9 is PC adjustment *in bytes* 9720 */ 9721common_backwardBranch: 9722 mov r0, #kInterpEntryInstr 9723 bl common_periodicChecks 9724#if defined(WITH_JIT) 9725 GET_JIT_PROF_TABLE(r0) 9726 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9727 cmp r0,#0 9728 bne common_updateProfile 9729 GET_INST_OPCODE(ip) 9730 GOTO_OPCODE(ip) 9731#else 9732 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9733 GET_INST_OPCODE(ip) @ extract opcode from rINST 9734 GOTO_OPCODE(ip) @ jump to next instruction 9735#endif 9736 9737 9738/* 9739 * Need to see if the thread needs to be suspended or debugger/profiler 9740 * activity has begun. If so, we suspend the thread or side-exit to 9741 * the debug interpreter as appropriate. 9742 * 9743 * The common case is no activity on any of these, so we want to figure 9744 * that out quickly. If something is up, we can then sort out what. 9745 * 9746 * We want to be fast if the VM was built without debugger or profiler 9747 * support, but we also need to recognize that the system is usually 9748 * shipped with both of these enabled. 9749 * 9750 * TODO: reduce this so we're just checking a single location. 9751 * 9752 * On entry: 9753 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 9754 * r9 is trampoline PC adjustment *in bytes* 9755 */ 9756common_periodicChecks: 9757 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9758 9759#if defined(WITH_DEBUGGER) 9760 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9761#endif 9762#if defined(WITH_PROFILER) 9763 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9764#endif 9765 9766 ldr ip, [r3] @ ip<- suspendCount (int) 9767 9768#if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9769 cmp r1, #0 @ debugger enabled? 9770 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9771 ldr r2, [r2] @ r2<- activeProfilers (int) 9772 orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive 9773 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 9774#elif defined(WITH_DEBUGGER) 9775 cmp r1, #0 @ debugger enabled? 9776 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9777 orrsne ip, ip, r1 @ yes, ip<- suspend | debugger; set Z 9778 @ (if not enabled, Z was set by test for r1==0, which is what we want) 9779#elif defined (WITH_PROFILER) 9780 ldr r2, [r2] @ r2<- activeProfilers (int) 9781 orrs ip, ip, r2 @ ip<- suspendCount | activeProfilers 9782#else 9783 cmp ip, #0 @ not ORing anything in; set Z 9784#endif 9785 9786 bxeq lr @ all zero, return 9787 9788 /* 9789 * One or more interesting events have happened. Figure out what. 9790 * 9791 * If debugging or profiling are compiled in, we need to disambiguate. 9792 * 9793 * r0 still holds the reentry type. 9794 */ 9795#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9796 ldr ip, [r3] @ ip<- suspendCount (int) 9797 cmp ip, #0 @ want suspend? 9798 beq 1f @ no, must be debugger/profiler 9799#endif 9800 9801 stmfd sp!, {r0, lr} @ preserve r0 and lr 9802#if defined(WITH_JIT) 9803 /* 9804 * Refresh the Jit's cached copy of profile table pointer. This pointer 9805 * doubles as the Jit's on/off switch. 9806 */ 9807 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9808 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9809 ldr r3, [r3] @ r3 <- pJitProfTable 9810 EXPORT_PC() @ need for precise GC 9811 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9812#else 9813 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9814 EXPORT_PC() @ need for precise GC 9815#endif 9816 bl dvmCheckSuspendPending @ do full check, suspend if necessary 9817 ldmfd sp!, {r0, lr} @ restore r0 and lr 9818 9819#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9820 9821 /* 9822 * Reload the debugger/profiler enable flags. We're checking to see 9823 * if either of these got set while we were suspended. 9824 * 9825 * We can't really avoid the #ifdefs here, because the fields don't 9826 * exist when the feature is disabled. 9827 */ 9828#if defined(WITH_DEBUGGER) 9829 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9830 cmp r1, #0 @ debugger enabled? 9831 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9832#else 9833 mov r1, #0 9834#endif 9835#if defined(WITH_PROFILER) 9836 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9837 ldr r2, [r2] @ r2<- activeProfilers (int) 9838#else 9839 mov r2, #0 9840#endif 9841 9842 orrs r1, r1, r2 9843 beq 2f 9844 98451: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 9846 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 9847 add rPC, rPC, r9 @ update rPC 9848 mov r1, #1 @ "want switch" = true 9849 b common_gotoBail @ side exit 9850 9851#endif /*WITH_DEBUGGER || WITH_PROFILER*/ 9852 98532: 9854 bx lr @ nothing to do, return 9855 9856 9857/* 9858 * The equivalent of "goto bail", this calls through the "bail handler". 9859 * 9860 * State registers will be saved to the "glue" area before bailing. 9861 * 9862 * On entry: 9863 * r1 is "bool changeInterp", indicating if we want to switch to the 9864 * other interpreter or just bail all the way out 9865 */ 9866common_gotoBail: 9867 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9868 mov r0, rGLUE @ r0<- glue ptr 9869 b dvmMterpStdBail @ call(glue, changeInterp) 9870 9871 @add r1, r1, #1 @ using (boolean+1) 9872 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9873 @bl _longjmp @ does not return 9874 @bl common_abort 9875 9876 9877/* 9878 * Common code for method invocation with range. 9879 * 9880 * On entry: 9881 * r0 is "Method* methodToCall", the method we're trying to call 9882 */ 9883common_invokeMethodRange: 9884.LinvokeNewRange: 9885 @ prepare to copy args to "outs" area of current frame 9886 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9887 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9888 beq .LinvokeArgsDone @ if no args, skip the rest 9889 FETCH(r1, 2) @ r1<- CCCC 9890 9891 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9892 @ (very few methods have > 10 args; could unroll for common cases) 9893 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9894 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9895 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 98961: ldr r1, [r3], #4 @ val = *fp++ 9897 subs r2, r2, #1 @ count-- 9898 str r1, [r10], #4 @ *outs++ = val 9899 bne 1b @ ...while count != 0 9900 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9901 b .LinvokeArgsDone 9902 9903/* 9904 * Common code for method invocation without range. 9905 * 9906 * On entry: 9907 * r0 is "Method* methodToCall", the method we're trying to call 9908 */ 9909common_invokeMethodNoRange: 9910.LinvokeNewNoRange: 9911 @ prepare to copy args to "outs" area of current frame 9912 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9913 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9914 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9915 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9916 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9917 beq .LinvokeArgsDone 9918 9919 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9920.LinvokeNonRange: 9921 rsb r2, r2, #5 @ r2<- 5-r2 9922 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9923 bl common_abort @ (skipped due to ARM prefetch) 99245: and ip, rINST, #0x0f00 @ isolate A 9925 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9926 mov r0, r0 @ nop 9927 str r2, [r10, #-4]! @ *--outs = vA 99284: and ip, r1, #0xf000 @ isolate G 9929 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9930 mov r0, r0 @ nop 9931 str r2, [r10, #-4]! @ *--outs = vG 99323: and ip, r1, #0x0f00 @ isolate F 9933 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9934 mov r0, r0 @ nop 9935 str r2, [r10, #-4]! @ *--outs = vF 99362: and ip, r1, #0x00f0 @ isolate E 9937 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9938 mov r0, r0 @ nop 9939 str r2, [r10, #-4]! @ *--outs = vE 99401: and ip, r1, #0x000f @ isolate D 9941 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9942 mov r0, r0 @ nop 9943 str r2, [r10, #-4]! @ *--outs = vD 99440: @ fall through to .LinvokeArgsDone 9945 9946.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9947 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9948 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9949 @ find space for the new stack frame, check for overflow 9950 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9951 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9952 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9953@ bl common_dumpRegs 9954 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9955 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9956 cmp r3, r9 @ bottom < interpStackEnd? 9957 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9958 blo .LstackOverflow @ yes, this frame will overflow stack 9959 9960 @ set up newSaveArea 9961#ifdef EASY_GDB 9962 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9963 str ip, [r10, #offStackSaveArea_prevSave] 9964#endif 9965 str rFP, [r10, #offStackSaveArea_prevFrame] 9966 str rPC, [r10, #offStackSaveArea_savedPc] 9967#if defined(WITH_JIT) 9968 mov r9, #0 9969 str r9, [r10, #offStackSaveArea_returnAddr] 9970#endif 9971 str r0, [r10, #offStackSaveArea_method] 9972 tst r3, #ACC_NATIVE 9973 bne .LinvokeNative 9974 9975 /* 9976 stmfd sp!, {r0-r3} 9977 bl common_printNewline 9978 mov r0, rFP 9979 mov r1, #0 9980 bl dvmDumpFp 9981 ldmfd sp!, {r0-r3} 9982 stmfd sp!, {r0-r3} 9983 mov r0, r1 9984 mov r1, r10 9985 bl dvmDumpFp 9986 bl common_printNewline 9987 ldmfd sp!, {r0-r3} 9988 */ 9989 9990 ldrh r9, [r2] @ r9 <- load INST from new PC 9991 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9992 mov rPC, r2 @ publish new rPC 9993 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9994 9995 @ Update "glue" values for the new method 9996 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9997 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9998 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9999#if defined(WITH_JIT) 10000 GET_JIT_PROF_TABLE(r0) 10001 mov rFP, r1 @ fp = newFp 10002 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10003 mov rINST, r9 @ publish new rINST 10004 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10005 cmp r0,#0 10006 bne common_updateProfile 10007 GOTO_OPCODE(ip) @ jump to next instruction 10008#else 10009 mov rFP, r1 @ fp = newFp 10010 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10011 mov rINST, r9 @ publish new rINST 10012 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10013 GOTO_OPCODE(ip) @ jump to next instruction 10014#endif 10015 10016.LinvokeNative: 10017 @ Prep for the native call 10018 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10019 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10020 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10021 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10022 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10023 mov r9, r3 @ r9<- glue->self (preserve) 10024 10025 mov r2, r0 @ r2<- methodToCall 10026 mov r0, r1 @ r0<- newFp (points to args) 10027 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10028 10029#ifdef ASSIST_DEBUGGER 10030 /* insert fake function header to help gdb find the stack frame */ 10031 b .Lskip 10032 .type dalvik_mterp, %function 10033dalvik_mterp: 10034 .fnstart 10035 MTERP_ENTRY1 10036 MTERP_ENTRY2 10037.Lskip: 10038#endif 10039 10040 @mov lr, pc @ set return addr 10041 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10042 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10043 10044#if defined(WITH_JIT) 10045 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10046#endif 10047 10048 @ native return; r9=self, r10=newSaveArea 10049 @ equivalent to dvmPopJniLocals 10050 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10051 ldr r1, [r9, #offThread_exception] @ check for exception 10052#if defined(WITH_JIT) 10053 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10054#endif 10055 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10056 cmp r1, #0 @ null? 10057 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10058#if defined(WITH_JIT) 10059 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10060#endif 10061 bne common_exceptionThrown @ no, handle exception 10062 10063 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10064 GET_INST_OPCODE(ip) @ extract opcode from rINST 10065 GOTO_OPCODE(ip) @ jump to next instruction 10066 10067.LstackOverflow: @ r0=methodToCall 10068 mov r1, r0 @ r1<- methodToCall 10069 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10070 bl dvmHandleStackOverflow 10071 b common_exceptionThrown 10072#ifdef ASSIST_DEBUGGER 10073 .fnend 10074#endif 10075 10076 10077 /* 10078 * Common code for method invocation, calling through "glue code". 10079 * 10080 * TODO: now that we have range and non-range invoke handlers, this 10081 * needs to be split into two. Maybe just create entry points 10082 * that set r9 and jump here? 10083 * 10084 * On entry: 10085 * r0 is "Method* methodToCall", the method we're trying to call 10086 * r9 is "bool methodCallRange", indicating if this is a /range variant 10087 */ 10088 .if 0 10089.LinvokeOld: 10090 sub sp, sp, #8 @ space for args + pad 10091 FETCH(ip, 2) @ ip<- FEDC or CCCC 10092 mov r2, r0 @ A2<- methodToCall 10093 mov r0, rGLUE @ A0<- glue 10094 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10095 mov r1, r9 @ A1<- methodCallRange 10096 mov r3, rINST, lsr #8 @ A3<- AA 10097 str ip, [sp, #0] @ A4<- ip 10098 bl dvmMterp_invokeMethod @ call the C invokeMethod 10099 add sp, sp, #8 @ remove arg area 10100 b common_resumeAfterGlueCall @ continue to next instruction 10101 .endif 10102 10103 10104 10105/* 10106 * Common code for handling a return instruction. 10107 * 10108 * This does not return. 10109 */ 10110common_returnFromMethod: 10111.LreturnNew: 10112 mov r0, #kInterpEntryReturn 10113 mov r9, #0 10114 bl common_periodicChecks 10115 10116 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10117 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10118 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10119 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10120 @ r2<- method we're returning to 10121 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10122 cmp r2, #0 @ is this a break frame? 10123 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10124 mov r1, #0 @ "want switch" = false 10125 beq common_gotoBail @ break frame, bail out completely 10126 10127 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10128 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10129 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10130 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10131#if defined(WITH_JIT) 10132 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10133 GET_JIT_PROF_TABLE(r0) 10134 mov rPC, r9 @ publish new rPC 10135 str r1, [rGLUE, #offGlue_methodClassDex] 10136 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10137 cmp r10, #0 @ caller is compiled code 10138 blxne r10 10139 GET_INST_OPCODE(ip) @ extract opcode from rINST 10140 cmp r0,#0 10141 bne common_updateProfile 10142 GOTO_OPCODE(ip) @ jump to next instruction 10143#else 10144 GET_INST_OPCODE(ip) @ extract opcode from rINST 10145 mov rPC, r9 @ publish new rPC 10146 str r1, [rGLUE, #offGlue_methodClassDex] 10147 GOTO_OPCODE(ip) @ jump to next instruction 10148#endif 10149 10150 /* 10151 * Return handling, calls through "glue code". 10152 */ 10153 .if 0 10154.LreturnOld: 10155 SAVE_PC_FP_TO_GLUE() @ export state 10156 mov r0, rGLUE @ arg to function 10157 bl dvmMterp_returnFromMethod 10158 b common_resumeAfterGlueCall 10159 .endif 10160 10161 10162/* 10163 * Somebody has thrown an exception. Handle it. 10164 * 10165 * If the exception processing code returns to us (instead of falling 10166 * out of the interpreter), continue with whatever the next instruction 10167 * now happens to be. 10168 * 10169 * This does not return. 10170 */ 10171 .global dvmMterpCommonExceptionThrown 10172dvmMterpCommonExceptionThrown: 10173common_exceptionThrown: 10174.LexceptionNew: 10175 mov r0, #kInterpEntryThrow 10176 mov r9, #0 10177 bl common_periodicChecks 10178 10179 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10180 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10181 mov r1, r10 @ r1<- self 10182 mov r0, r9 @ r0<- exception 10183 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10184 mov r3, #0 @ r3<- NULL 10185 str r3, [r10, #offThread_exception] @ self->exception = NULL 10186 10187 /* set up args and a local for "&fp" */ 10188 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10189 str rFP, [sp, #-4]! @ *--sp = fp 10190 mov ip, sp @ ip<- &fp 10191 mov r3, #0 @ r3<- false 10192 str ip, [sp, #-4]! @ *--sp = &fp 10193 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10194 mov r0, r10 @ r0<- self 10195 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10196 mov r2, r9 @ r2<- exception 10197 sub r1, rPC, r1 @ r1<- pc - method->insns 10198 mov r1, r1, asr #1 @ r1<- offset in code units 10199 10200 /* call, r0 gets catchRelPc (a code-unit offset) */ 10201 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10202 10203 /* fix earlier stack overflow if necessary; may trash rFP */ 10204 ldrb r1, [r10, #offThread_stackOverflowed] 10205 cmp r1, #0 @ did we overflow earlier? 10206 beq 1f @ no, skip ahead 10207 mov rFP, r0 @ save relPc result in rFP 10208 mov r0, r10 @ r0<- self 10209 mov r1, r9 @ r1<- exception 10210 bl dvmCleanupStackOverflow @ call(self) 10211 mov r0, rFP @ restore result 102121: 10213 10214 /* update frame pointer and check result from dvmFindCatchBlock */ 10215 ldr rFP, [sp, #4] @ retrieve the updated rFP 10216 cmp r0, #0 @ is catchRelPc < 0? 10217 add sp, sp, #8 @ restore stack 10218 bmi .LnotCaughtLocally 10219 10220 /* adjust locals to match self->curFrame and updated PC */ 10221 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10222 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10223 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10224 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10225 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10226 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10227 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10228 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10229 10230 /* release the tracked alloc on the exception */ 10231 mov r0, r9 @ r0<- exception 10232 mov r1, r10 @ r1<- self 10233 bl dvmReleaseTrackedAlloc @ release the exception 10234 10235 /* restore the exception if the handler wants it */ 10236 FETCH_INST() @ load rINST from rPC 10237 GET_INST_OPCODE(ip) @ extract opcode from rINST 10238 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10239 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10240 GOTO_OPCODE(ip) @ jump to next instruction 10241 10242.LnotCaughtLocally: @ r9=exception, r10=self 10243 /* fix stack overflow if necessary */ 10244 ldrb r1, [r10, #offThread_stackOverflowed] 10245 cmp r1, #0 @ did we overflow earlier? 10246 movne r0, r10 @ if yes: r0<- self 10247 movne r1, r9 @ if yes: r1<- exception 10248 blne dvmCleanupStackOverflow @ if yes: call(self) 10249 10250 @ may want to show "not caught locally" debug messages here 10251#if DVM_SHOW_EXCEPTION >= 2 10252 /* call __android_log_print(prio, tag, format, ...) */ 10253 /* "Exception %s from %s:%d not caught locally" */ 10254 @ dvmLineNumFromPC(method, pc - method->insns) 10255 ldr r0, [rGLUE, #offGlue_method] 10256 ldr r1, [r0, #offMethod_insns] 10257 sub r1, rPC, r1 10258 asr r1, r1, #1 10259 bl dvmLineNumFromPC 10260 str r0, [sp, #-4]! 10261 @ dvmGetMethodSourceFile(method) 10262 ldr r0, [rGLUE, #offGlue_method] 10263 bl dvmGetMethodSourceFile 10264 str r0, [sp, #-4]! 10265 @ exception->clazz->descriptor 10266 ldr r3, [r9, #offObject_clazz] 10267 ldr r3, [r3, #offClassObject_descriptor] 10268 @ 10269 ldr r2, strExceptionNotCaughtLocally 10270 ldr r1, strLogTag 10271 mov r0, #3 @ LOG_DEBUG 10272 bl __android_log_print 10273#endif 10274 str r9, [r10, #offThread_exception] @ restore exception 10275 mov r0, r9 @ r0<- exception 10276 mov r1, r10 @ r1<- self 10277 bl dvmReleaseTrackedAlloc @ release the exception 10278 mov r1, #0 @ "want switch" = false 10279 b common_gotoBail @ bail out 10280 10281 10282 /* 10283 * Exception handling, calls through "glue code". 10284 */ 10285 .if 0 10286.LexceptionOld: 10287 SAVE_PC_FP_TO_GLUE() @ export state 10288 mov r0, rGLUE @ arg to function 10289 bl dvmMterp_exceptionThrown 10290 b common_resumeAfterGlueCall 10291 .endif 10292 10293 10294/* 10295 * After returning from a "glued" function, pull out the updated 10296 * values and start executing at the next instruction. 10297 */ 10298common_resumeAfterGlueCall: 10299 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10300 FETCH_INST() @ load rINST from rPC 10301 GET_INST_OPCODE(ip) @ extract opcode from rINST 10302 GOTO_OPCODE(ip) @ jump to next instruction 10303 10304/* 10305 * Invalid array index. 10306 */ 10307common_errArrayIndex: 10308 EXPORT_PC() 10309 ldr r0, strArrayIndexException 10310 mov r1, #0 10311 bl dvmThrowException 10312 b common_exceptionThrown 10313 10314/* 10315 * Invalid array value. 10316 */ 10317common_errArrayStore: 10318 EXPORT_PC() 10319 ldr r0, strArrayStoreException 10320 mov r1, #0 10321 bl dvmThrowException 10322 b common_exceptionThrown 10323 10324/* 10325 * Integer divide or mod by zero. 10326 */ 10327common_errDivideByZero: 10328 EXPORT_PC() 10329 ldr r0, strArithmeticException 10330 ldr r1, strDivideByZero 10331 bl dvmThrowException 10332 b common_exceptionThrown 10333 10334/* 10335 * Attempt to allocate an array with a negative size. 10336 */ 10337common_errNegativeArraySize: 10338 EXPORT_PC() 10339 ldr r0, strNegativeArraySizeException 10340 mov r1, #0 10341 bl dvmThrowException 10342 b common_exceptionThrown 10343 10344/* 10345 * Invocation of a non-existent method. 10346 */ 10347common_errNoSuchMethod: 10348 EXPORT_PC() 10349 ldr r0, strNoSuchMethodError 10350 mov r1, #0 10351 bl dvmThrowException 10352 b common_exceptionThrown 10353 10354/* 10355 * We encountered a null object when we weren't expecting one. We 10356 * export the PC, throw a NullPointerException, and goto the exception 10357 * processing code. 10358 */ 10359common_errNullObject: 10360 EXPORT_PC() 10361 ldr r0, strNullPointerException 10362 mov r1, #0 10363 bl dvmThrowException 10364 b common_exceptionThrown 10365 10366/* 10367 * For debugging, cause an immediate fault. The source address will 10368 * be in lr (use a bl instruction to jump here). 10369 */ 10370common_abort: 10371 ldr pc, .LdeadFood 10372.LdeadFood: 10373 .word 0xdeadf00d 10374 10375/* 10376 * Spit out a "we were here", preserving all registers. (The attempt 10377 * to save ip won't work, but we need to save an even number of 10378 * registers for EABI 64-bit stack alignment.) 10379 */ 10380 .macro SQUEAK num 10381common_squeak\num: 10382 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10383 ldr r0, strSqueak 10384 mov r1, #\num 10385 bl printf 10386 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10387 bx lr 10388 .endm 10389 10390 SQUEAK 0 10391 SQUEAK 1 10392 SQUEAK 2 10393 SQUEAK 3 10394 SQUEAK 4 10395 SQUEAK 5 10396 10397/* 10398 * Spit out the number in r0, preserving registers. 10399 */ 10400common_printNum: 10401 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10402 mov r1, r0 10403 ldr r0, strSqueak 10404 bl printf 10405 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10406 bx lr 10407 10408/* 10409 * Print a newline, preserving registers. 10410 */ 10411common_printNewline: 10412 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10413 ldr r0, strNewline 10414 bl printf 10415 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10416 bx lr 10417 10418 /* 10419 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10420 */ 10421common_printHex: 10422 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10423 mov r1, r0 10424 ldr r0, strPrintHex 10425 bl printf 10426 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10427 bx lr 10428 10429/* 10430 * Print the 64-bit quantity in r0-r1, preserving registers. 10431 */ 10432common_printLong: 10433 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10434 mov r3, r1 10435 mov r2, r0 10436 ldr r0, strPrintLong 10437 bl printf 10438 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10439 bx lr 10440 10441/* 10442 * Print full method info. Pass the Method* in r0. Preserves regs. 10443 */ 10444common_printMethod: 10445 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10446 bl dvmMterpPrintMethod 10447 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10448 bx lr 10449 10450/* 10451 * Call a C helper function that dumps regs and possibly some 10452 * additional info. Requires the C function to be compiled in. 10453 */ 10454 .if 0 10455common_dumpRegs: 10456 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10457 bl dvmMterpDumpArmRegs 10458 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10459 bx lr 10460 .endif 10461 10462#if 0 10463/* 10464 * Experiment on VFP mode. 10465 * 10466 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10467 * 10468 * Updates the bits specified by "mask", setting them to the values in "val". 10469 */ 10470setFPSCR: 10471 and r0, r0, r1 @ make sure no stray bits are set 10472 fmrx r2, fpscr @ get VFP reg 10473 mvn r1, r1 @ bit-invert mask 10474 and r2, r2, r1 @ clear masked bits 10475 orr r2, r2, r0 @ set specified bits 10476 fmxr fpscr, r2 @ set VFP reg 10477 mov r0, r2 @ return new value 10478 bx lr 10479 10480 .align 2 10481 .global dvmConfigureFP 10482 .type dvmConfigureFP, %function 10483dvmConfigureFP: 10484 stmfd sp!, {ip, lr} 10485 /* 0x03000000 sets DN/FZ */ 10486 /* 0x00009f00 clears the six exception enable flags */ 10487 bl common_squeak0 10488 mov r0, #0x03000000 @ r0<- 0x03000000 10489 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10490 bl setFPSCR 10491 ldmfd sp!, {ip, pc} 10492#endif 10493 10494 10495/* 10496 * String references, must be close to the code that uses them. 10497 */ 10498 .align 2 10499strArithmeticException: 10500 .word .LstrArithmeticException 10501strArrayIndexException: 10502 .word .LstrArrayIndexException 10503strArrayStoreException: 10504 .word .LstrArrayStoreException 10505strDivideByZero: 10506 .word .LstrDivideByZero 10507strNegativeArraySizeException: 10508 .word .LstrNegativeArraySizeException 10509strNoSuchMethodError: 10510 .word .LstrNoSuchMethodError 10511strNullPointerException: 10512 .word .LstrNullPointerException 10513 10514strLogTag: 10515 .word .LstrLogTag 10516strExceptionNotCaughtLocally: 10517 .word .LstrExceptionNotCaughtLocally 10518 10519strNewline: 10520 .word .LstrNewline 10521strSqueak: 10522 .word .LstrSqueak 10523strPrintHex: 10524 .word .LstrPrintHex 10525strPrintLong: 10526 .word .LstrPrintLong 10527 10528/* 10529 * Zero-terminated ASCII string data. 10530 * 10531 * On ARM we have two choices: do like gcc does, and LDR from a .word 10532 * with the address, or use an ADR pseudo-op to get the address 10533 * directly. ADR saves 4 bytes and an indirection, but it's using a 10534 * PC-relative addressing mode and hence has a limited range, which 10535 * makes it not work well with mergeable string sections. 10536 */ 10537 .section .rodata.str1.4,"aMS",%progbits,1 10538 10539.LstrBadEntryPoint: 10540 .asciz "Bad entry point %d\n" 10541.LstrArithmeticException: 10542 .asciz "Ljava/lang/ArithmeticException;" 10543.LstrArrayIndexException: 10544 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10545.LstrArrayStoreException: 10546 .asciz "Ljava/lang/ArrayStoreException;" 10547.LstrClassCastException: 10548 .asciz "Ljava/lang/ClassCastException;" 10549.LstrDivideByZero: 10550 .asciz "divide by zero" 10551.LstrFilledNewArrayNotImpl: 10552 .asciz "filled-new-array only implemented for objects and 'int'" 10553.LstrInternalError: 10554 .asciz "Ljava/lang/InternalError;" 10555.LstrInstantiationError: 10556 .asciz "Ljava/lang/InstantiationError;" 10557.LstrNegativeArraySizeException: 10558 .asciz "Ljava/lang/NegativeArraySizeException;" 10559.LstrNoSuchMethodError: 10560 .asciz "Ljava/lang/NoSuchMethodError;" 10561.LstrNullPointerException: 10562 .asciz "Ljava/lang/NullPointerException;" 10563 10564.LstrLogTag: 10565 .asciz "mterp" 10566.LstrExceptionNotCaughtLocally: 10567 .asciz "Exception %s from %s:%d not caught locally\n" 10568 10569.LstrNewline: 10570 .asciz "\n" 10571.LstrSqueak: 10572 .asciz "<%d>" 10573.LstrPrintHex: 10574 .asciz "<0x%x>" 10575.LstrPrintLong: 10576 .asciz "<%lld>" 10577 10578