InterpAsm-armv5te-vfp.S revision 1df319e3674d993a07bc0ff1f56a5915410b5903
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te-vfp'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24/* 25 * ARMv5 definitions and declarations. 26 */ 27 28/* 29ARM EABI general notes: 30 31r0-r3 hold first 4 args to a method; they are not preserved across method calls 32r4-r8 are available for general use 33r9 is given special treatment in some situations, but not for us 34r10 (sl) seems to be generally available 35r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 36r12 (ip) is scratch -- not preserved across method calls 37r13 (sp) should be managed carefully in case a signal arrives 38r14 (lr) must be preserved 39r15 (pc) can be tinkered with directly 40 41r0 holds returns of <= 4 bytes 42r0-r1 hold returns of 8 bytes, low word in r0 43 44Callee must save/restore r4+ (except r12) if it modifies them. If VFP 45is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 46s0-s15 (d0-d7, q0-a3) do not need to be. 47 48Stack is "full descending". Only the arguments that don't fit in the first 4 49registers are placed on the stack. "sp" points at the first stacked argument 50(i.e. the 5th arg). 51 52VFP: single-precision results in s0, double-precision results in d0. 53 54In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5564-bit quantities (long long, double) must be 64-bit aligned. 56*/ 57 58/* 59Mterp and ARM notes: 60 61The following registers have fixed assignments: 62 63 reg nick purpose 64 r4 rPC interpreted program counter, used for fetching instructions 65 r5 rFP interpreted frame pointer, used for accessing locals and args 66 r6 rGLUE MterpGlue pointer 67 r7 rINST first 16-bit code unit of current instruction 68 r8 rIBASE interpreted instruction base pointer, used for computed goto 69 70Macros are provided for common operations. Each macro MUST emit only 71one instruction to make instruction-counting easier. They MUST NOT alter 72unspecified registers or condition codes. 73*/ 74 75/* single-purpose registers, given names for clarity */ 76#define rPC r4 77#define rFP r5 78#define rGLUE r6 79#define rINST r7 80#define rIBASE r8 81 82/* save/restore the PC and/or FP from the glue struct */ 83#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 84#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 85#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 86#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 87#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 88#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 89 90/* 91 * "export" the PC to the stack frame, f/b/o future exception objects. Must 92 * be done *before* something calls dvmThrowException. 93 * 94 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 95 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 96 * 97 * It's okay to do this more than once. 98 */ 99#define EXPORT_PC() \ 100 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 101 102/* 103 * Given a frame pointer, find the stack save area. 104 * 105 * In C this is "((StackSaveArea*)(_fp) -1)". 106 */ 107#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 108 sub _reg, _fpreg, #sizeofStackSaveArea 109 110/* 111 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 112 */ 113#define FETCH_INST() ldrh rINST, [rPC] 114 115/* 116 * Fetch the next instruction from the specified offset. Advances rPC 117 * to point to the next instruction. "_count" is in 16-bit code units. 118 * 119 * Because of the limited size of immediate constants on ARM, this is only 120 * suitable for small forward movements (i.e. don't try to implement "goto" 121 * with this). 122 * 123 * This must come AFTER anything that can throw an exception, or the 124 * exception catch may miss. (This also implies that it must come after 125 * EXPORT_PC().) 126 */ 127#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 128 129/* 130 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 131 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 132 */ 133#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 134 ldrh _dreg, [_sreg, #(_count*2)]! 135 136/* 137 * Fetch the next instruction from an offset specified by _reg. Updates 138 * rPC to point to the next instruction. "_reg" must specify the distance 139 * in bytes, *not* 16-bit code units, and may be a signed value. 140 * 141 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 142 * bits that hold the shift distance are used for the half/byte/sign flags. 143 * In some cases we can pre-double _reg for free, so we require a byte offset 144 * here. 145 */ 146#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 147 148/* 149 * Fetch a half-word code unit from an offset past the current PC. The 150 * "_count" value is in 16-bit code units. Does not advance rPC. 151 * 152 * The "_S" variant works the same but treats the value as signed. 153 */ 154#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 155#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 156 157/* 158 * Fetch one byte from an offset past the current PC. Pass in the same 159 * "_count" as you would for FETCH, and an additional 0/1 indicating which 160 * byte of the halfword you want (lo/hi). 161 */ 162#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 163 164/* 165 * Put the instruction's opcode field into the specified register. 166 */ 167#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 168 169/* 170 * Put the prefetched instruction's opcode field into the specified register. 171 */ 172#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 173 174/* 175 * Begin executing the opcode in _reg. Because this only jumps within the 176 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 177 */ 178#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 180#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 181 182/* 183 * Get/set the 32-bit value from a Dalvik register. 184 */ 185#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 186#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 187 188#if defined(WITH_JIT) 189#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 191#endif 192 193/* 194 * Convert a virtual register index into an address. 195 */ 196#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 197 add _reg, rFP, _vreg, lsl #2 198 199/* 200 * This is a #include, not a %include, because we want the C pre-processor 201 * to expand the macros into assembler assignment statements. 202 */ 203#include "../common/asm-constants.h" 204 205#if defined(WITH_JIT) 206#include "../common/jit-config.h" 207#endif 208 209/* File: armv5te/platform.S */ 210/* 211 * =========================================================================== 212 * CPU-version-specific defines 213 * =========================================================================== 214 */ 215 216/* 217 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 218 * one-way branch. 219 * 220 * May modify IP. Does not modify LR. 221 */ 222.macro LDR_PC source 223 ldr pc, \source 224.endm 225 226/* 227 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 228 * Jump to subroutine. 229 * 230 * May modify IP and LR. 231 */ 232.macro LDR_PC_LR source 233 mov lr, pc 234 ldr pc, \source 235.endm 236 237/* 238 * Macro for "LDMFD SP!, {...regs...,PC}". 239 * 240 * May modify IP and LR. 241 */ 242.macro LDMFD_PC regs 243 ldmfd sp!, {\regs,pc} 244.endm 245 246/* 247 * Macro for data memory barrier; not meaningful pre-ARMv6K. 248 */ 249.macro SMP_DMB 250.endm 251 252/* 253 * Macro for data memory barrier; not meaningful pre-ARMv6K. 254 */ 255.macro SMP_DMB_ST 256.endm 257 258/* File: armv5te/entry.S */ 259/* 260 * Copyright (C) 2008 The Android Open Source Project 261 * 262 * Licensed under the Apache License, Version 2.0 (the "License"); 263 * you may not use this file except in compliance with the License. 264 * You may obtain a copy of the License at 265 * 266 * http://www.apache.org/licenses/LICENSE-2.0 267 * 268 * Unless required by applicable law or agreed to in writing, software 269 * distributed under the License is distributed on an "AS IS" BASIS, 270 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 271 * See the License for the specific language governing permissions and 272 * limitations under the License. 273 */ 274/* 275 * Interpreter entry point. 276 */ 277 278/* 279 * We don't have formal stack frames, so gdb scans upward in the code 280 * to find the start of the function (a label with the %function type), 281 * and then looks at the next few instructions to figure out what 282 * got pushed onto the stack. From this it figures out how to restore 283 * the registers, including PC, for the previous stack frame. If gdb 284 * sees a non-function label, it stops scanning, so either we need to 285 * have nothing but assembler-local labels between the entry point and 286 * the break, or we need to fake it out. 287 * 288 * When this is defined, we add some stuff to make gdb less confused. 289 */ 290#define ASSIST_DEBUGGER 1 291 292 .text 293 .align 2 294 .global dvmMterpStdRun 295 .type dvmMterpStdRun, %function 296 297/* 298 * On entry: 299 * r0 MterpGlue* glue 300 * 301 * This function returns a boolean "changeInterp" value. The return comes 302 * via a call to dvmMterpStdBail(). 303 */ 304dvmMterpStdRun: 305#define MTERP_ENTRY1 \ 306 .save {r4-r10,fp,lr}; \ 307 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 308#define MTERP_ENTRY2 \ 309 .pad #4; \ 310 sub sp, sp, #4 @ align 64 311 312 .fnstart 313 MTERP_ENTRY1 314 MTERP_ENTRY2 315 316 /* save stack pointer, add magic word for debuggerd */ 317 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 318 319 /* set up "named" registers, figure out entry point */ 320 mov rGLUE, r0 @ set rGLUE 321 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 322 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 323 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 324 cmp r1, #kInterpEntryInstr @ usual case? 325 bne .Lnot_instr @ no, handle it 326 327#if defined(WITH_JIT) 328.LentryInstr: 329 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 330 /* Entry is always a possible trace start */ 331 GET_JIT_PROF_TABLE(r0) 332 FETCH_INST() 333 mov r1, #0 @ prepare the value for the new state 334 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 335 cmp r0,#0 @ is profiling disabled? 336#if !defined(WITH_SELF_VERIFICATION) 337 bne common_updateProfile @ profiling is enabled 338#else 339 ldr r2, [r10, #offThread_shadowSpace] @ to find out the jit exit state 340 beq 1f @ profiling is disabled 341 ldr r3, [r2, #offShadowSpace_jitExitState] @ jit exit state 342 cmp r3, #kSVSTraceSelect @ hot trace following? 343 moveq r2,#kJitTSelectRequestHot @ ask for trace selection 344 beq common_selectTrace @ go build the trace 345 cmp r3, #kSVSNoProfile @ don't profile the next instruction? 346 beq 1f @ intrepret the next instruction 347 b common_updateProfile @ collect profiles 348#endif 3491: 350 GET_INST_OPCODE(ip) 351 GOTO_OPCODE(ip) 352#else 353 /* start executing the instruction at rPC */ 354 FETCH_INST() @ load rINST from rPC 355 GET_INST_OPCODE(ip) @ extract opcode from rINST 356 GOTO_OPCODE(ip) @ jump to next instruction 357#endif 358 359.Lnot_instr: 360 cmp r1, #kInterpEntryReturn @ were we returning from a method? 361 beq common_returnFromMethod 362 363.Lnot_return: 364 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 365 beq common_exceptionThrown 366 367#if defined(WITH_JIT) 368.Lnot_throw: 369 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 370 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 371 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 372 bne .Lbad_arg 373 cmp rPC,r2 374 bne .LentryInstr @ must have branched, don't resume 375#if defined(WITH_SELF_VERIFICATION) 376 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 377 b jitSVShadowRunStart @ re-enter the translation after the 378 @ single-stepped instruction 379 @noreturn 380#endif 381 mov r1, #kInterpEntryInstr 382 str r1, [rGLUE, #offGlue_entryPoint] 383 bx r10 @ re-enter the translation 384#endif 385 386.Lbad_arg: 387 ldr r0, strBadEntryPoint 388 @ r1 holds value of entryPoint 389 bl printf 390 bl dvmAbort 391 .fnend 392 393 394 .global dvmMterpStdBail 395 .type dvmMterpStdBail, %function 396 397/* 398 * Restore the stack pointer and PC from the save point established on entry. 399 * This is essentially the same as a longjmp, but should be cheaper. The 400 * last instruction causes us to return to whoever called dvmMterpStdRun. 401 * 402 * We pushed some registers on the stack in dvmMterpStdRun, then saved 403 * SP and LR. Here we restore SP, restore the registers, and then restore 404 * LR to PC. 405 * 406 * On entry: 407 * r0 MterpGlue* glue 408 * r1 bool changeInterp 409 */ 410dvmMterpStdBail: 411 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 412 mov r0, r1 @ return the changeInterp value 413 add sp, sp, #4 @ un-align 64 414 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 415 416 417/* 418 * String references. 419 */ 420strBadEntryPoint: 421 .word .LstrBadEntryPoint 422 423 424 .global dvmAsmInstructionStart 425 .type dvmAsmInstructionStart, %function 426dvmAsmInstructionStart = .L_OP_NOP 427 .text 428 429/* ------------------------------ */ 430 .balign 64 431.L_OP_NOP: /* 0x00 */ 432/* File: armv5te/OP_NOP.S */ 433 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 434 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 435 GOTO_OPCODE(ip) @ execute it 436 437#ifdef ASSIST_DEBUGGER 438 /* insert fake function header to help gdb find the stack frame */ 439 .type dalvik_inst, %function 440dalvik_inst: 441 .fnstart 442 MTERP_ENTRY1 443 MTERP_ENTRY2 444 .fnend 445#endif 446 447/* ------------------------------ */ 448 .balign 64 449.L_OP_MOVE: /* 0x01 */ 450/* File: armv5te/OP_MOVE.S */ 451 /* for move, move-object, long-to-int */ 452 /* op vA, vB */ 453 mov r1, rINST, lsr #12 @ r1<- B from 15:12 454 mov r0, rINST, lsr #8 @ r0<- A from 11:8 455 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 456 GET_VREG(r2, r1) @ r2<- fp[B] 457 and r0, r0, #15 458 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 459 SET_VREG(r2, r0) @ fp[A]<- r2 460 GOTO_OPCODE(ip) @ execute next instruction 461 462/* ------------------------------ */ 463 .balign 64 464.L_OP_MOVE_FROM16: /* 0x02 */ 465/* File: armv5te/OP_MOVE_FROM16.S */ 466 /* for: move/from16, move-object/from16 */ 467 /* op vAA, vBBBB */ 468 FETCH(r1, 1) @ r1<- BBBB 469 mov r0, rINST, lsr #8 @ r0<- AA 470 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 471 GET_VREG(r2, r1) @ r2<- fp[BBBB] 472 GET_INST_OPCODE(ip) @ extract opcode from rINST 473 SET_VREG(r2, r0) @ fp[AA]<- r2 474 GOTO_OPCODE(ip) @ jump to next instruction 475 476/* ------------------------------ */ 477 .balign 64 478.L_OP_MOVE_16: /* 0x03 */ 479/* File: armv5te/OP_MOVE_16.S */ 480 /* for: move/16, move-object/16 */ 481 /* op vAAAA, vBBBB */ 482 FETCH(r1, 2) @ r1<- BBBB 483 FETCH(r0, 1) @ r0<- AAAA 484 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 485 GET_VREG(r2, r1) @ r2<- fp[BBBB] 486 GET_INST_OPCODE(ip) @ extract opcode from rINST 487 SET_VREG(r2, r0) @ fp[AAAA]<- r2 488 GOTO_OPCODE(ip) @ jump to next instruction 489 490/* ------------------------------ */ 491 .balign 64 492.L_OP_MOVE_WIDE: /* 0x04 */ 493/* File: armv5te/OP_MOVE_WIDE.S */ 494 /* move-wide vA, vB */ 495 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 496 mov r2, rINST, lsr #8 @ r2<- A(+) 497 mov r3, rINST, lsr #12 @ r3<- B 498 and r2, r2, #15 499 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 500 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 501 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 502 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 503 GET_INST_OPCODE(ip) @ extract opcode from rINST 504 stmia r2, {r0-r1} @ fp[A]<- r0/r1 505 GOTO_OPCODE(ip) @ jump to next instruction 506 507/* ------------------------------ */ 508 .balign 64 509.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 510/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 511 /* move-wide/from16 vAA, vBBBB */ 512 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 513 FETCH(r3, 1) @ r3<- BBBB 514 mov r2, rINST, lsr #8 @ r2<- AA 515 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 516 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 517 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 518 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 519 GET_INST_OPCODE(ip) @ extract opcode from rINST 520 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 521 GOTO_OPCODE(ip) @ jump to next instruction 522 523/* ------------------------------ */ 524 .balign 64 525.L_OP_MOVE_WIDE_16: /* 0x06 */ 526/* File: armv5te/OP_MOVE_WIDE_16.S */ 527 /* move-wide/16 vAAAA, vBBBB */ 528 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 529 FETCH(r3, 2) @ r3<- BBBB 530 FETCH(r2, 1) @ r2<- AAAA 531 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 532 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 533 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 534 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 535 GET_INST_OPCODE(ip) @ extract opcode from rINST 536 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 537 GOTO_OPCODE(ip) @ jump to next instruction 538 539/* ------------------------------ */ 540 .balign 64 541.L_OP_MOVE_OBJECT: /* 0x07 */ 542/* File: armv5te/OP_MOVE_OBJECT.S */ 543/* File: armv5te/OP_MOVE.S */ 544 /* for move, move-object, long-to-int */ 545 /* op vA, vB */ 546 mov r1, rINST, lsr #12 @ r1<- B from 15:12 547 mov r0, rINST, lsr #8 @ r0<- A from 11:8 548 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 549 GET_VREG(r2, r1) @ r2<- fp[B] 550 and r0, r0, #15 551 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 552 SET_VREG(r2, r0) @ fp[A]<- r2 553 GOTO_OPCODE(ip) @ execute next instruction 554 555 556/* ------------------------------ */ 557 .balign 64 558.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 559/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 560/* File: armv5te/OP_MOVE_FROM16.S */ 561 /* for: move/from16, move-object/from16 */ 562 /* op vAA, vBBBB */ 563 FETCH(r1, 1) @ r1<- BBBB 564 mov r0, rINST, lsr #8 @ r0<- AA 565 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 566 GET_VREG(r2, r1) @ r2<- fp[BBBB] 567 GET_INST_OPCODE(ip) @ extract opcode from rINST 568 SET_VREG(r2, r0) @ fp[AA]<- r2 569 GOTO_OPCODE(ip) @ jump to next instruction 570 571 572/* ------------------------------ */ 573 .balign 64 574.L_OP_MOVE_OBJECT_16: /* 0x09 */ 575/* File: armv5te/OP_MOVE_OBJECT_16.S */ 576/* File: armv5te/OP_MOVE_16.S */ 577 /* for: move/16, move-object/16 */ 578 /* op vAAAA, vBBBB */ 579 FETCH(r1, 2) @ r1<- BBBB 580 FETCH(r0, 1) @ r0<- AAAA 581 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 582 GET_VREG(r2, r1) @ r2<- fp[BBBB] 583 GET_INST_OPCODE(ip) @ extract opcode from rINST 584 SET_VREG(r2, r0) @ fp[AAAA]<- r2 585 GOTO_OPCODE(ip) @ jump to next instruction 586 587 588/* ------------------------------ */ 589 .balign 64 590.L_OP_MOVE_RESULT: /* 0x0a */ 591/* File: armv5te/OP_MOVE_RESULT.S */ 592 /* for: move-result, move-result-object */ 593 /* op vAA */ 594 mov r2, rINST, lsr #8 @ r2<- AA 595 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 596 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 597 GET_INST_OPCODE(ip) @ extract opcode from rINST 598 SET_VREG(r0, r2) @ fp[AA]<- r0 599 GOTO_OPCODE(ip) @ jump to next instruction 600 601/* ------------------------------ */ 602 .balign 64 603.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 604/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 605 /* move-result-wide vAA */ 606 mov r2, rINST, lsr #8 @ r2<- AA 607 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 608 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 609 ldmia r3, {r0-r1} @ r0/r1<- retval.j 610 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 611 GET_INST_OPCODE(ip) @ extract opcode from rINST 612 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 613 GOTO_OPCODE(ip) @ jump to next instruction 614 615/* ------------------------------ */ 616 .balign 64 617.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 618/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 619/* File: armv5te/OP_MOVE_RESULT.S */ 620 /* for: move-result, move-result-object */ 621 /* op vAA */ 622 mov r2, rINST, lsr #8 @ r2<- AA 623 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 624 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 625 GET_INST_OPCODE(ip) @ extract opcode from rINST 626 SET_VREG(r0, r2) @ fp[AA]<- r0 627 GOTO_OPCODE(ip) @ jump to next instruction 628 629 630/* ------------------------------ */ 631 .balign 64 632.L_OP_MOVE_EXCEPTION: /* 0x0d */ 633/* File: armv5te/OP_MOVE_EXCEPTION.S */ 634 /* move-exception vAA */ 635 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 636 mov r2, rINST, lsr #8 @ r2<- AA 637 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 638 mov r1, #0 @ r1<- 0 639 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 640 SET_VREG(r3, r2) @ fp[AA]<- exception obj 641 GET_INST_OPCODE(ip) @ extract opcode from rINST 642 str r1, [r0, #offThread_exception] @ dvmClearException bypass 643 GOTO_OPCODE(ip) @ jump to next instruction 644 645/* ------------------------------ */ 646 .balign 64 647.L_OP_RETURN_VOID: /* 0x0e */ 648/* File: armv5te/OP_RETURN_VOID.S */ 649 b common_returnFromMethod 650 651/* ------------------------------ */ 652 .balign 64 653.L_OP_RETURN: /* 0x0f */ 654/* File: armv5te/OP_RETURN.S */ 655 /* 656 * Return a 32-bit value. Copies the return value into the "glue" 657 * structure, then jumps to the return handler. 658 * 659 * for: return, return-object 660 */ 661 /* op vAA */ 662 mov r2, rINST, lsr #8 @ r2<- AA 663 GET_VREG(r0, r2) @ r0<- vAA 664 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 665 b common_returnFromMethod 666 667/* ------------------------------ */ 668 .balign 64 669.L_OP_RETURN_WIDE: /* 0x10 */ 670/* File: armv5te/OP_RETURN_WIDE.S */ 671 /* 672 * Return a 64-bit value. Copies the return value into the "glue" 673 * structure, then jumps to the return handler. 674 */ 675 /* return-wide vAA */ 676 mov r2, rINST, lsr #8 @ r2<- AA 677 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 678 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 679 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 680 stmia r3, {r0-r1} @ retval<- r0/r1 681 b common_returnFromMethod 682 683/* ------------------------------ */ 684 .balign 64 685.L_OP_RETURN_OBJECT: /* 0x11 */ 686/* File: armv5te/OP_RETURN_OBJECT.S */ 687/* File: armv5te/OP_RETURN.S */ 688 /* 689 * Return a 32-bit value. Copies the return value into the "glue" 690 * structure, then jumps to the return handler. 691 * 692 * for: return, return-object 693 */ 694 /* op vAA */ 695 mov r2, rINST, lsr #8 @ r2<- AA 696 GET_VREG(r0, r2) @ r0<- vAA 697 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 698 b common_returnFromMethod 699 700 701/* ------------------------------ */ 702 .balign 64 703.L_OP_CONST_4: /* 0x12 */ 704/* File: armv5te/OP_CONST_4.S */ 705 /* const/4 vA, #+B */ 706 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 707 mov r0, rINST, lsr #8 @ r0<- A+ 708 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 709 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 710 and r0, r0, #15 711 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 712 SET_VREG(r1, r0) @ fp[A]<- r1 713 GOTO_OPCODE(ip) @ execute next instruction 714 715/* ------------------------------ */ 716 .balign 64 717.L_OP_CONST_16: /* 0x13 */ 718/* File: armv5te/OP_CONST_16.S */ 719 /* const/16 vAA, #+BBBB */ 720 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 721 mov r3, rINST, lsr #8 @ r3<- AA 722 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 723 SET_VREG(r0, r3) @ vAA<- r0 724 GET_INST_OPCODE(ip) @ extract opcode from rINST 725 GOTO_OPCODE(ip) @ jump to next instruction 726 727/* ------------------------------ */ 728 .balign 64 729.L_OP_CONST: /* 0x14 */ 730/* File: armv5te/OP_CONST.S */ 731 /* const vAA, #+BBBBbbbb */ 732 mov r3, rINST, lsr #8 @ r3<- AA 733 FETCH(r0, 1) @ r0<- bbbb (low) 734 FETCH(r1, 2) @ r1<- BBBB (high) 735 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 736 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 737 GET_INST_OPCODE(ip) @ extract opcode from rINST 738 SET_VREG(r0, r3) @ vAA<- r0 739 GOTO_OPCODE(ip) @ jump to next instruction 740 741/* ------------------------------ */ 742 .balign 64 743.L_OP_CONST_HIGH16: /* 0x15 */ 744/* File: armv5te/OP_CONST_HIGH16.S */ 745 /* const/high16 vAA, #+BBBB0000 */ 746 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 747 mov r3, rINST, lsr #8 @ r3<- AA 748 mov r0, r0, lsl #16 @ r0<- BBBB0000 749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 750 SET_VREG(r0, r3) @ vAA<- r0 751 GET_INST_OPCODE(ip) @ extract opcode from rINST 752 GOTO_OPCODE(ip) @ jump to next instruction 753 754/* ------------------------------ */ 755 .balign 64 756.L_OP_CONST_WIDE_16: /* 0x16 */ 757/* File: armv5te/OP_CONST_WIDE_16.S */ 758 /* const-wide/16 vAA, #+BBBB */ 759 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 760 mov r3, rINST, lsr #8 @ r3<- AA 761 mov r1, r0, asr #31 @ r1<- ssssssss 762 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 763 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 764 GET_INST_OPCODE(ip) @ extract opcode from rINST 765 stmia r3, {r0-r1} @ vAA<- r0/r1 766 GOTO_OPCODE(ip) @ jump to next instruction 767 768/* ------------------------------ */ 769 .balign 64 770.L_OP_CONST_WIDE_32: /* 0x17 */ 771/* File: armv5te/OP_CONST_WIDE_32.S */ 772 /* const-wide/32 vAA, #+BBBBbbbb */ 773 FETCH(r0, 1) @ r0<- 0000bbbb (low) 774 mov r3, rINST, lsr #8 @ r3<- AA 775 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 776 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 777 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 778 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 779 mov r1, r0, asr #31 @ r1<- ssssssss 780 GET_INST_OPCODE(ip) @ extract opcode from rINST 781 stmia r3, {r0-r1} @ vAA<- r0/r1 782 GOTO_OPCODE(ip) @ jump to next instruction 783 784/* ------------------------------ */ 785 .balign 64 786.L_OP_CONST_WIDE: /* 0x18 */ 787/* File: armv5te/OP_CONST_WIDE.S */ 788 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 789 FETCH(r0, 1) @ r0<- bbbb (low) 790 FETCH(r1, 2) @ r1<- BBBB (low middle) 791 FETCH(r2, 3) @ r2<- hhhh (high middle) 792 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 793 FETCH(r3, 4) @ r3<- HHHH (high) 794 mov r9, rINST, lsr #8 @ r9<- AA 795 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 796 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 797 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 798 GET_INST_OPCODE(ip) @ extract opcode from rINST 799 stmia r9, {r0-r1} @ vAA<- r0/r1 800 GOTO_OPCODE(ip) @ jump to next instruction 801 802/* ------------------------------ */ 803 .balign 64 804.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 805/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 806 /* const-wide/high16 vAA, #+BBBB000000000000 */ 807 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 808 mov r3, rINST, lsr #8 @ r3<- AA 809 mov r0, #0 @ r0<- 00000000 810 mov r1, r1, lsl #16 @ r1<- BBBB0000 811 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 812 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 813 GET_INST_OPCODE(ip) @ extract opcode from rINST 814 stmia r3, {r0-r1} @ vAA<- r0/r1 815 GOTO_OPCODE(ip) @ jump to next instruction 816 817/* ------------------------------ */ 818 .balign 64 819.L_OP_CONST_STRING: /* 0x1a */ 820/* File: armv5te/OP_CONST_STRING.S */ 821 /* const/string vAA, String@BBBB */ 822 FETCH(r1, 1) @ r1<- BBBB 823 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 824 mov r9, rINST, lsr #8 @ r9<- AA 825 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 826 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 827 cmp r0, #0 @ not yet resolved? 828 beq .LOP_CONST_STRING_resolve 829 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 830 GET_INST_OPCODE(ip) @ extract opcode from rINST 831 SET_VREG(r0, r9) @ vAA<- r0 832 GOTO_OPCODE(ip) @ jump to next instruction 833 834/* ------------------------------ */ 835 .balign 64 836.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 837/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 838 /* const/string vAA, String@BBBBBBBB */ 839 FETCH(r0, 1) @ r0<- bbbb (low) 840 FETCH(r1, 2) @ r1<- BBBB (high) 841 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 842 mov r9, rINST, lsr #8 @ r9<- AA 843 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 844 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 845 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 846 cmp r0, #0 847 beq .LOP_CONST_STRING_JUMBO_resolve 848 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 849 GET_INST_OPCODE(ip) @ extract opcode from rINST 850 SET_VREG(r0, r9) @ vAA<- r0 851 GOTO_OPCODE(ip) @ jump to next instruction 852 853/* ------------------------------ */ 854 .balign 64 855.L_OP_CONST_CLASS: /* 0x1c */ 856/* File: armv5te/OP_CONST_CLASS.S */ 857 /* const/class vAA, Class@BBBB */ 858 FETCH(r1, 1) @ r1<- BBBB 859 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 860 mov r9, rINST, lsr #8 @ r9<- AA 861 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 862 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 863 cmp r0, #0 @ not yet resolved? 864 beq .LOP_CONST_CLASS_resolve 865 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 866 GET_INST_OPCODE(ip) @ extract opcode from rINST 867 SET_VREG(r0, r9) @ vAA<- r0 868 GOTO_OPCODE(ip) @ jump to next instruction 869 870/* ------------------------------ */ 871 .balign 64 872.L_OP_MONITOR_ENTER: /* 0x1d */ 873/* File: armv5te/OP_MONITOR_ENTER.S */ 874 /* 875 * Synchronize on an object. 876 */ 877 /* monitor-enter vAA */ 878 mov r2, rINST, lsr #8 @ r2<- AA 879 GET_VREG(r1, r2) @ r1<- vAA (object) 880 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 881 cmp r1, #0 @ null object? 882 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 883 beq common_errNullObject @ null object, throw an exception 884 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 885 bl dvmLockObject @ call(self, obj) 886#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 887 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 888 ldr r1, [r0, #offThread_exception] @ check for exception 889 cmp r1, #0 890 bne common_exceptionThrown @ exception raised, bail out 891#endif 892 GET_INST_OPCODE(ip) @ extract opcode from rINST 893 GOTO_OPCODE(ip) @ jump to next instruction 894 895/* ------------------------------ */ 896 .balign 64 897.L_OP_MONITOR_EXIT: /* 0x1e */ 898/* File: armv5te/OP_MONITOR_EXIT.S */ 899 /* 900 * Unlock an object. 901 * 902 * Exceptions that occur when unlocking a monitor need to appear as 903 * if they happened at the following instruction. See the Dalvik 904 * instruction spec. 905 */ 906 /* monitor-exit vAA */ 907 mov r2, rINST, lsr #8 @ r2<- AA 908 EXPORT_PC() @ before fetch: export the PC 909 GET_VREG(r1, r2) @ r1<- vAA (object) 910 cmp r1, #0 @ null object? 911 beq 1f @ yes 912 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 913 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 914 cmp r0, #0 @ failed? 915 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 916 beq common_exceptionThrown @ yes, exception is pending 917 GET_INST_OPCODE(ip) @ extract opcode from rINST 918 GOTO_OPCODE(ip) @ jump to next instruction 9191: 920 FETCH_ADVANCE_INST(1) @ advance before throw 921 b common_errNullObject 922 923/* ------------------------------ */ 924 .balign 64 925.L_OP_CHECK_CAST: /* 0x1f */ 926/* File: armv5te/OP_CHECK_CAST.S */ 927 /* 928 * Check to see if a cast from one class to another is allowed. 929 */ 930 /* check-cast vAA, class@BBBB */ 931 mov r3, rINST, lsr #8 @ r3<- AA 932 FETCH(r2, 1) @ r2<- BBBB 933 GET_VREG(r9, r3) @ r9<- object 934 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 935 cmp r9, #0 @ is object null? 936 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 937 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 938 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 939 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 940 cmp r1, #0 @ have we resolved this before? 941 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 942.LOP_CHECK_CAST_resolved: 943 cmp r0, r1 @ same class (trivial success)? 944 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 945.LOP_CHECK_CAST_okay: 946 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 947 GET_INST_OPCODE(ip) @ extract opcode from rINST 948 GOTO_OPCODE(ip) @ jump to next instruction 949 950/* ------------------------------ */ 951 .balign 64 952.L_OP_INSTANCE_OF: /* 0x20 */ 953/* File: armv5te/OP_INSTANCE_OF.S */ 954 /* 955 * Check to see if an object reference is an instance of a class. 956 * 957 * Most common situation is a non-null object, being compared against 958 * an already-resolved class. 959 */ 960 /* instance-of vA, vB, class@CCCC */ 961 mov r3, rINST, lsr #12 @ r3<- B 962 mov r9, rINST, lsr #8 @ r9<- A+ 963 GET_VREG(r0, r3) @ r0<- vB (object) 964 and r9, r9, #15 @ r9<- A 965 cmp r0, #0 @ is object null? 966 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 967 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 968 FETCH(r3, 1) @ r3<- CCCC 969 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 970 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 971 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 972 cmp r1, #0 @ have we resolved this before? 973 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 974.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 975 cmp r0, r1 @ same class (trivial success)? 976 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 977 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 978 979/* ------------------------------ */ 980 .balign 64 981.L_OP_ARRAY_LENGTH: /* 0x21 */ 982/* File: armv5te/OP_ARRAY_LENGTH.S */ 983 /* 984 * Return the length of an array. 985 */ 986 mov r1, rINST, lsr #12 @ r1<- B 987 mov r2, rINST, lsr #8 @ r2<- A+ 988 GET_VREG(r0, r1) @ r0<- vB (object ref) 989 and r2, r2, #15 @ r2<- A 990 cmp r0, #0 @ is object null? 991 beq common_errNullObject @ yup, fail 992 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 993 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 994 GET_INST_OPCODE(ip) @ extract opcode from rINST 995 SET_VREG(r3, r2) @ vB<- length 996 GOTO_OPCODE(ip) @ jump to next instruction 997 998/* ------------------------------ */ 999 .balign 64 1000.L_OP_NEW_INSTANCE: /* 0x22 */ 1001/* File: armv5te/OP_NEW_INSTANCE.S */ 1002 /* 1003 * Create a new instance of a class. 1004 */ 1005 /* new-instance vAA, class@BBBB */ 1006 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1007 FETCH(r1, 1) @ r1<- BBBB 1008 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1009 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1010 EXPORT_PC() @ req'd for init, resolve, alloc 1011 cmp r0, #0 @ already resolved? 1012 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1013.LOP_NEW_INSTANCE_resolved: @ r0=class 1014 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1015 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1016 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1017.LOP_NEW_INSTANCE_initialized: @ r0=class 1018 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1019 bl dvmAllocObject @ r0<- new object 1020 b .LOP_NEW_INSTANCE_finish @ continue 1021 1022/* ------------------------------ */ 1023 .balign 64 1024.L_OP_NEW_ARRAY: /* 0x23 */ 1025/* File: armv5te/OP_NEW_ARRAY.S */ 1026 /* 1027 * Allocate an array of objects, specified with the array class 1028 * and a count. 1029 * 1030 * The verifier guarantees that this is an array class, so we don't 1031 * check for it here. 1032 */ 1033 /* new-array vA, vB, class@CCCC */ 1034 mov r0, rINST, lsr #12 @ r0<- B 1035 FETCH(r2, 1) @ r2<- CCCC 1036 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1037 GET_VREG(r1, r0) @ r1<- vB (array length) 1038 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1039 cmp r1, #0 @ check length 1040 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1041 bmi common_errNegativeArraySize @ negative length, bail 1042 cmp r0, #0 @ already resolved? 1043 EXPORT_PC() @ req'd for resolve, alloc 1044 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1045 b .LOP_NEW_ARRAY_resolve @ do resolve now 1046 1047/* ------------------------------ */ 1048 .balign 64 1049.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1050/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1051 /* 1052 * Create a new array with elements filled from registers. 1053 * 1054 * for: filled-new-array, filled-new-array/range 1055 */ 1056 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1057 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1058 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1059 FETCH(r1, 1) @ r1<- BBBB 1060 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1061 EXPORT_PC() @ need for resolve and alloc 1062 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1063 mov r10, rINST, lsr #8 @ r10<- AA or BA 1064 cmp r0, #0 @ already resolved? 1065 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10668: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1067 mov r2, #0 @ r2<- false 1068 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1069 bl dvmResolveClass @ r0<- call(clazz, ref) 1070 cmp r0, #0 @ got null? 1071 beq common_exceptionThrown @ yes, handle exception 1072 b .LOP_FILLED_NEW_ARRAY_continue 1073 1074/* ------------------------------ */ 1075 .balign 64 1076.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1077/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1078/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1079 /* 1080 * Create a new array with elements filled from registers. 1081 * 1082 * for: filled-new-array, filled-new-array/range 1083 */ 1084 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1085 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1086 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1087 FETCH(r1, 1) @ r1<- BBBB 1088 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1089 EXPORT_PC() @ need for resolve and alloc 1090 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1091 mov r10, rINST, lsr #8 @ r10<- AA or BA 1092 cmp r0, #0 @ already resolved? 1093 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10948: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1095 mov r2, #0 @ r2<- false 1096 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1097 bl dvmResolveClass @ r0<- call(clazz, ref) 1098 cmp r0, #0 @ got null? 1099 beq common_exceptionThrown @ yes, handle exception 1100 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1101 1102 1103/* ------------------------------ */ 1104 .balign 64 1105.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1106/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1107 /* fill-array-data vAA, +BBBBBBBB */ 1108 FETCH(r0, 1) @ r0<- bbbb (lo) 1109 FETCH(r1, 2) @ r1<- BBBB (hi) 1110 mov r3, rINST, lsr #8 @ r3<- AA 1111 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1112 GET_VREG(r0, r3) @ r0<- vAA (array object) 1113 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1114 EXPORT_PC(); 1115 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1116 cmp r0, #0 @ 0 means an exception is thrown 1117 beq common_exceptionThrown @ has exception 1118 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1119 GET_INST_OPCODE(ip) @ extract opcode from rINST 1120 GOTO_OPCODE(ip) @ jump to next instruction 1121 1122/* ------------------------------ */ 1123 .balign 64 1124.L_OP_THROW: /* 0x27 */ 1125/* File: armv5te/OP_THROW.S */ 1126 /* 1127 * Throw an exception object in the current thread. 1128 */ 1129 /* throw vAA */ 1130 mov r2, rINST, lsr #8 @ r2<- AA 1131 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1132 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1133 EXPORT_PC() @ exception handler can throw 1134 cmp r1, #0 @ null object? 1135 beq common_errNullObject @ yes, throw an NPE instead 1136 @ bypass dvmSetException, just store it 1137 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1138 b common_exceptionThrown 1139 1140/* ------------------------------ */ 1141 .balign 64 1142.L_OP_GOTO: /* 0x28 */ 1143/* File: armv5te/OP_GOTO.S */ 1144 /* 1145 * Unconditional branch, 8-bit offset. 1146 * 1147 * The branch distance is a signed code-unit offset, which we need to 1148 * double to get a byte offset. 1149 */ 1150 /* goto +AA */ 1151 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1152 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1153 mov r9, r9, lsl #1 @ r9<- byte offset 1154 bmi common_backwardBranch @ backward branch, do periodic checks 1155#if defined(WITH_JIT) 1156 GET_JIT_PROF_TABLE(r0) 1157 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1158 cmp r0,#0 1159 bne common_updateProfile 1160 GET_INST_OPCODE(ip) @ extract opcode from rINST 1161 GOTO_OPCODE(ip) @ jump to next instruction 1162#else 1163 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1164 GET_INST_OPCODE(ip) @ extract opcode from rINST 1165 GOTO_OPCODE(ip) @ jump to next instruction 1166#endif 1167 1168/* ------------------------------ */ 1169 .balign 64 1170.L_OP_GOTO_16: /* 0x29 */ 1171/* File: armv5te/OP_GOTO_16.S */ 1172 /* 1173 * Unconditional branch, 16-bit offset. 1174 * 1175 * The branch distance is a signed code-unit offset, which we need to 1176 * double to get a byte offset. 1177 */ 1178 /* goto/16 +AAAA */ 1179 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1180 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1181 bmi common_backwardBranch @ backward branch, do periodic checks 1182#if defined(WITH_JIT) 1183 GET_JIT_PROF_TABLE(r0) 1184 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1185 cmp r0,#0 1186 bne common_updateProfile 1187 GET_INST_OPCODE(ip) @ extract opcode from rINST 1188 GOTO_OPCODE(ip) @ jump to next instruction 1189#else 1190 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1191 GET_INST_OPCODE(ip) @ extract opcode from rINST 1192 GOTO_OPCODE(ip) @ jump to next instruction 1193#endif 1194 1195/* ------------------------------ */ 1196 .balign 64 1197.L_OP_GOTO_32: /* 0x2a */ 1198/* File: armv5te/OP_GOTO_32.S */ 1199 /* 1200 * Unconditional branch, 32-bit offset. 1201 * 1202 * The branch distance is a signed code-unit offset, which we need to 1203 * double to get a byte offset. 1204 * 1205 * Unlike most opcodes, this one is allowed to branch to itself, so 1206 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1207 * instruction doesn't affect the V flag, so we need to clear it 1208 * explicitly. 1209 */ 1210 /* goto/32 +AAAAAAAA */ 1211 FETCH(r0, 1) @ r0<- aaaa (lo) 1212 FETCH(r1, 2) @ r1<- AAAA (hi) 1213 cmp ip, ip @ (clear V flag during stall) 1214 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1215 mov r9, r0, asl #1 @ r9<- byte offset 1216 ble common_backwardBranch @ backward branch, do periodic checks 1217#if defined(WITH_JIT) 1218 GET_JIT_PROF_TABLE(r0) 1219 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1220 cmp r0,#0 1221 bne common_updateProfile 1222 GET_INST_OPCODE(ip) @ extract opcode from rINST 1223 GOTO_OPCODE(ip) @ jump to next instruction 1224#else 1225 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1226 GET_INST_OPCODE(ip) @ extract opcode from rINST 1227 GOTO_OPCODE(ip) @ jump to next instruction 1228#endif 1229 1230/* ------------------------------ */ 1231 .balign 64 1232.L_OP_PACKED_SWITCH: /* 0x2b */ 1233/* File: armv5te/OP_PACKED_SWITCH.S */ 1234 /* 1235 * Handle a packed-switch or sparse-switch instruction. In both cases 1236 * we decode it and hand it off to a helper function. 1237 * 1238 * We don't really expect backward branches in a switch statement, but 1239 * they're perfectly legal, so we check for them here. 1240 * 1241 * for: packed-switch, sparse-switch 1242 */ 1243 /* op vAA, +BBBB */ 1244 FETCH(r0, 1) @ r0<- bbbb (lo) 1245 FETCH(r1, 2) @ r1<- BBBB (hi) 1246 mov r3, rINST, lsr #8 @ r3<- AA 1247 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1248 GET_VREG(r1, r3) @ r1<- vAA 1249 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1250 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1251 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1252 bmi common_backwardBranch @ backward branch, do periodic checks 1253 beq common_backwardBranch @ (want to use BLE but V is unknown) 1254#if defined(WITH_JIT) 1255 GET_JIT_PROF_TABLE(r0) 1256 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1257 cmp r0,#0 1258 bne common_updateProfile 1259 GET_INST_OPCODE(ip) @ extract opcode from rINST 1260 GOTO_OPCODE(ip) @ jump to next instruction 1261#else 1262 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1263 GET_INST_OPCODE(ip) @ extract opcode from rINST 1264 GOTO_OPCODE(ip) @ jump to next instruction 1265#endif 1266 1267/* ------------------------------ */ 1268 .balign 64 1269.L_OP_SPARSE_SWITCH: /* 0x2c */ 1270/* File: armv5te/OP_SPARSE_SWITCH.S */ 1271/* File: armv5te/OP_PACKED_SWITCH.S */ 1272 /* 1273 * Handle a packed-switch or sparse-switch instruction. In both cases 1274 * we decode it and hand it off to a helper function. 1275 * 1276 * We don't really expect backward branches in a switch statement, but 1277 * they're perfectly legal, so we check for them here. 1278 * 1279 * for: packed-switch, sparse-switch 1280 */ 1281 /* op vAA, +BBBB */ 1282 FETCH(r0, 1) @ r0<- bbbb (lo) 1283 FETCH(r1, 2) @ r1<- BBBB (hi) 1284 mov r3, rINST, lsr #8 @ r3<- AA 1285 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1286 GET_VREG(r1, r3) @ r1<- vAA 1287 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1288 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1289 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1290 bmi common_backwardBranch @ backward branch, do periodic checks 1291 beq common_backwardBranch @ (want to use BLE but V is unknown) 1292#if defined(WITH_JIT) 1293 GET_JIT_PROF_TABLE(r0) 1294 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1295 cmp r0,#0 1296 bne common_updateProfile 1297 GET_INST_OPCODE(ip) @ extract opcode from rINST 1298 GOTO_OPCODE(ip) @ jump to next instruction 1299#else 1300 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1301 GET_INST_OPCODE(ip) @ extract opcode from rINST 1302 GOTO_OPCODE(ip) @ jump to next instruction 1303#endif 1304 1305 1306/* ------------------------------ */ 1307 .balign 64 1308.L_OP_CMPL_FLOAT: /* 0x2d */ 1309/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1310 /* 1311 * Compare two floating-point values. Puts 0, 1, or -1 into the 1312 * destination register based on the results of the comparison. 1313 * 1314 * int compare(x, y) { 1315 * if (x == y) { 1316 * return 0; 1317 * } else if (x > y) { 1318 * return 1; 1319 * } else if (x < y) { 1320 * return -1; 1321 * } else { 1322 * return -1; 1323 * } 1324 * } 1325 */ 1326 /* op vAA, vBB, vCC */ 1327 FETCH(r0, 1) @ r0<- CCBB 1328 mov r9, rINST, lsr #8 @ r9<- AA 1329 and r2, r0, #255 @ r2<- BB 1330 mov r3, r0, lsr #8 @ r3<- CC 1331 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1332 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1333 flds s0, [r2] @ s0<- vBB 1334 flds s1, [r3] @ s1<- vCC 1335 fcmpes s0, s1 @ compare (vBB, vCC) 1336 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1337 mvn r0, #0 @ r0<- -1 (default) 1338 GET_INST_OPCODE(ip) @ extract opcode from rINST 1339 fmstat @ export status flags 1340 movgt r0, #1 @ (greater than) r1<- 1 1341 moveq r0, #0 @ (equal) r1<- 0 1342 b .LOP_CMPL_FLOAT_finish @ argh 1343 1344 1345/* ------------------------------ */ 1346 .balign 64 1347.L_OP_CMPG_FLOAT: /* 0x2e */ 1348/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1349 /* 1350 * Compare two floating-point values. Puts 0, 1, or -1 into the 1351 * destination register based on the results of the comparison. 1352 * 1353 * int compare(x, y) { 1354 * if (x == y) { 1355 * return 0; 1356 * } else if (x < y) { 1357 * return -1; 1358 * } else if (x > y) { 1359 * return 1; 1360 * } else { 1361 * return 1; 1362 * } 1363 * } 1364 */ 1365 /* op vAA, vBB, vCC */ 1366 FETCH(r0, 1) @ r0<- CCBB 1367 mov r9, rINST, lsr #8 @ r9<- AA 1368 and r2, r0, #255 @ r2<- BB 1369 mov r3, r0, lsr #8 @ r3<- CC 1370 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1371 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1372 flds s0, [r2] @ s0<- vBB 1373 flds s1, [r3] @ s1<- vCC 1374 fcmpes s0, s1 @ compare (vBB, vCC) 1375 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1376 mov r0, #1 @ r0<- 1 (default) 1377 GET_INST_OPCODE(ip) @ extract opcode from rINST 1378 fmstat @ export status flags 1379 mvnmi r0, #0 @ (less than) r1<- -1 1380 moveq r0, #0 @ (equal) r1<- 0 1381 b .LOP_CMPG_FLOAT_finish @ argh 1382 1383 1384/* ------------------------------ */ 1385 .balign 64 1386.L_OP_CMPL_DOUBLE: /* 0x2f */ 1387/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1388 /* 1389 * Compare two floating-point values. Puts 0, 1, or -1 into the 1390 * destination register based on the results of the comparison. 1391 * 1392 * int compare(x, y) { 1393 * if (x == y) { 1394 * return 0; 1395 * } else if (x > y) { 1396 * return 1; 1397 * } else if (x < y) { 1398 * return -1; 1399 * } else { 1400 * return -1; 1401 * } 1402 * } 1403 */ 1404 /* op vAA, vBB, vCC */ 1405 FETCH(r0, 1) @ r0<- CCBB 1406 mov r9, rINST, lsr #8 @ r9<- AA 1407 and r2, r0, #255 @ r2<- BB 1408 mov r3, r0, lsr #8 @ r3<- CC 1409 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1410 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1411 fldd d0, [r2] @ d0<- vBB 1412 fldd d1, [r3] @ d1<- vCC 1413 fcmped d0, d1 @ compare (vBB, vCC) 1414 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1415 mvn r0, #0 @ r0<- -1 (default) 1416 GET_INST_OPCODE(ip) @ extract opcode from rINST 1417 fmstat @ export status flags 1418 movgt r0, #1 @ (greater than) r1<- 1 1419 moveq r0, #0 @ (equal) r1<- 0 1420 b .LOP_CMPL_DOUBLE_finish @ argh 1421 1422 1423/* ------------------------------ */ 1424 .balign 64 1425.L_OP_CMPG_DOUBLE: /* 0x30 */ 1426/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1427 /* 1428 * Compare two floating-point values. Puts 0, 1, or -1 into the 1429 * destination register based on the results of the comparison. 1430 * 1431 * int compare(x, y) { 1432 * if (x == y) { 1433 * return 0; 1434 * } else if (x < y) { 1435 * return -1; 1436 * } else if (x > y) { 1437 * return 1; 1438 * } else { 1439 * return 1; 1440 * } 1441 * } 1442 */ 1443 /* op vAA, vBB, vCC */ 1444 FETCH(r0, 1) @ r0<- CCBB 1445 mov r9, rINST, lsr #8 @ r9<- AA 1446 and r2, r0, #255 @ r2<- BB 1447 mov r3, r0, lsr #8 @ r3<- CC 1448 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1449 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1450 fldd d0, [r2] @ d0<- vBB 1451 fldd d1, [r3] @ d1<- vCC 1452 fcmped d0, d1 @ compare (vBB, vCC) 1453 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1454 mov r0, #1 @ r0<- 1 (default) 1455 GET_INST_OPCODE(ip) @ extract opcode from rINST 1456 fmstat @ export status flags 1457 mvnmi r0, #0 @ (less than) r1<- -1 1458 moveq r0, #0 @ (equal) r1<- 0 1459 b .LOP_CMPG_DOUBLE_finish @ argh 1460 1461 1462/* ------------------------------ */ 1463 .balign 64 1464.L_OP_CMP_LONG: /* 0x31 */ 1465/* File: armv5te/OP_CMP_LONG.S */ 1466 /* 1467 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1468 * register based on the results of the comparison. 1469 * 1470 * We load the full values with LDM, but in practice many values could 1471 * be resolved by only looking at the high word. This could be made 1472 * faster or slower by splitting the LDM into a pair of LDRs. 1473 * 1474 * If we just wanted to set condition flags, we could do this: 1475 * subs ip, r0, r2 1476 * sbcs ip, r1, r3 1477 * subeqs ip, r0, r2 1478 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1479 * integer value, which we can do with 2 conditional mov/mvn instructions 1480 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1481 * us a constant 5-cycle path plus a branch at the end to the 1482 * instruction epilogue code. The multi-compare approach below needs 1483 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1484 * in the worst case (the 64-bit values are equal). 1485 */ 1486 /* cmp-long vAA, vBB, vCC */ 1487 FETCH(r0, 1) @ r0<- CCBB 1488 mov r9, rINST, lsr #8 @ r9<- AA 1489 and r2, r0, #255 @ r2<- BB 1490 mov r3, r0, lsr #8 @ r3<- CC 1491 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1492 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1493 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1494 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1495 cmp r1, r3 @ compare (vBB+1, vCC+1) 1496 blt .LOP_CMP_LONG_less @ signed compare on high part 1497 bgt .LOP_CMP_LONG_greater 1498 subs r1, r0, r2 @ r1<- r0 - r2 1499 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1500 bne .LOP_CMP_LONG_less 1501 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1502 1503/* ------------------------------ */ 1504 .balign 64 1505.L_OP_IF_EQ: /* 0x32 */ 1506/* File: armv5te/OP_IF_EQ.S */ 1507/* File: armv5te/bincmp.S */ 1508 /* 1509 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1510 * fragment that specifies the *reverse* comparison to perform, e.g. 1511 * for "if-le" you would use "gt". 1512 * 1513 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1514 */ 1515 /* if-cmp vA, vB, +CCCC */ 1516 mov r0, rINST, lsr #8 @ r0<- A+ 1517 mov r1, rINST, lsr #12 @ r1<- B 1518 and r0, r0, #15 1519 GET_VREG(r3, r1) @ r3<- vB 1520 GET_VREG(r2, r0) @ r2<- vA 1521 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1522 cmp r2, r3 @ compare (vA, vB) 1523 bne 1f @ branch to 1 if comparison failed 1524 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1525 movs r9, r9, asl #1 @ convert to bytes, check sign 1526 bmi common_backwardBranch @ yes, do periodic checks 15271: 1528#if defined(WITH_JIT) 1529 GET_JIT_PROF_TABLE(r0) 1530 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1531 b common_testUpdateProfile 1532#else 1533 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1534 GET_INST_OPCODE(ip) @ extract opcode from rINST 1535 GOTO_OPCODE(ip) @ jump to next instruction 1536#endif 1537 1538 1539/* ------------------------------ */ 1540 .balign 64 1541.L_OP_IF_NE: /* 0x33 */ 1542/* File: armv5te/OP_IF_NE.S */ 1543/* File: armv5te/bincmp.S */ 1544 /* 1545 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1546 * fragment that specifies the *reverse* comparison to perform, e.g. 1547 * for "if-le" you would use "gt". 1548 * 1549 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1550 */ 1551 /* if-cmp vA, vB, +CCCC */ 1552 mov r0, rINST, lsr #8 @ r0<- A+ 1553 mov r1, rINST, lsr #12 @ r1<- B 1554 and r0, r0, #15 1555 GET_VREG(r3, r1) @ r3<- vB 1556 GET_VREG(r2, r0) @ r2<- vA 1557 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1558 cmp r2, r3 @ compare (vA, vB) 1559 beq 1f @ branch to 1 if comparison failed 1560 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1561 movs r9, r9, asl #1 @ convert to bytes, check sign 1562 bmi common_backwardBranch @ yes, do periodic checks 15631: 1564#if defined(WITH_JIT) 1565 GET_JIT_PROF_TABLE(r0) 1566 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1567 b common_testUpdateProfile 1568#else 1569 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1570 GET_INST_OPCODE(ip) @ extract opcode from rINST 1571 GOTO_OPCODE(ip) @ jump to next instruction 1572#endif 1573 1574 1575/* ------------------------------ */ 1576 .balign 64 1577.L_OP_IF_LT: /* 0x34 */ 1578/* File: armv5te/OP_IF_LT.S */ 1579/* File: armv5te/bincmp.S */ 1580 /* 1581 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1582 * fragment that specifies the *reverse* comparison to perform, e.g. 1583 * for "if-le" you would use "gt". 1584 * 1585 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1586 */ 1587 /* if-cmp vA, vB, +CCCC */ 1588 mov r0, rINST, lsr #8 @ r0<- A+ 1589 mov r1, rINST, lsr #12 @ r1<- B 1590 and r0, r0, #15 1591 GET_VREG(r3, r1) @ r3<- vB 1592 GET_VREG(r2, r0) @ r2<- vA 1593 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1594 cmp r2, r3 @ compare (vA, vB) 1595 bge 1f @ branch to 1 if comparison failed 1596 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1597 movs r9, r9, asl #1 @ convert to bytes, check sign 1598 bmi common_backwardBranch @ yes, do periodic checks 15991: 1600#if defined(WITH_JIT) 1601 GET_JIT_PROF_TABLE(r0) 1602 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1603 b common_testUpdateProfile 1604#else 1605 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1606 GET_INST_OPCODE(ip) @ extract opcode from rINST 1607 GOTO_OPCODE(ip) @ jump to next instruction 1608#endif 1609 1610 1611/* ------------------------------ */ 1612 .balign 64 1613.L_OP_IF_GE: /* 0x35 */ 1614/* File: armv5te/OP_IF_GE.S */ 1615/* File: armv5te/bincmp.S */ 1616 /* 1617 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1618 * fragment that specifies the *reverse* comparison to perform, e.g. 1619 * for "if-le" you would use "gt". 1620 * 1621 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1622 */ 1623 /* if-cmp vA, vB, +CCCC */ 1624 mov r0, rINST, lsr #8 @ r0<- A+ 1625 mov r1, rINST, lsr #12 @ r1<- B 1626 and r0, r0, #15 1627 GET_VREG(r3, r1) @ r3<- vB 1628 GET_VREG(r2, r0) @ r2<- vA 1629 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1630 cmp r2, r3 @ compare (vA, vB) 1631 blt 1f @ branch to 1 if comparison failed 1632 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1633 movs r9, r9, asl #1 @ convert to bytes, check sign 1634 bmi common_backwardBranch @ yes, do periodic checks 16351: 1636#if defined(WITH_JIT) 1637 GET_JIT_PROF_TABLE(r0) 1638 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1639 b common_testUpdateProfile 1640#else 1641 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1642 GET_INST_OPCODE(ip) @ extract opcode from rINST 1643 GOTO_OPCODE(ip) @ jump to next instruction 1644#endif 1645 1646 1647/* ------------------------------ */ 1648 .balign 64 1649.L_OP_IF_GT: /* 0x36 */ 1650/* File: armv5te/OP_IF_GT.S */ 1651/* File: armv5te/bincmp.S */ 1652 /* 1653 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1654 * fragment that specifies the *reverse* comparison to perform, e.g. 1655 * for "if-le" you would use "gt". 1656 * 1657 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1658 */ 1659 /* if-cmp vA, vB, +CCCC */ 1660 mov r0, rINST, lsr #8 @ r0<- A+ 1661 mov r1, rINST, lsr #12 @ r1<- B 1662 and r0, r0, #15 1663 GET_VREG(r3, r1) @ r3<- vB 1664 GET_VREG(r2, r0) @ r2<- vA 1665 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1666 cmp r2, r3 @ compare (vA, vB) 1667 ble 1f @ branch to 1 if comparison failed 1668 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1669 movs r9, r9, asl #1 @ convert to bytes, check sign 1670 bmi common_backwardBranch @ yes, do periodic checks 16711: 1672#if defined(WITH_JIT) 1673 GET_JIT_PROF_TABLE(r0) 1674 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1675 b common_testUpdateProfile 1676#else 1677 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1678 GET_INST_OPCODE(ip) @ extract opcode from rINST 1679 GOTO_OPCODE(ip) @ jump to next instruction 1680#endif 1681 1682 1683/* ------------------------------ */ 1684 .balign 64 1685.L_OP_IF_LE: /* 0x37 */ 1686/* File: armv5te/OP_IF_LE.S */ 1687/* File: armv5te/bincmp.S */ 1688 /* 1689 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1690 * fragment that specifies the *reverse* comparison to perform, e.g. 1691 * for "if-le" you would use "gt". 1692 * 1693 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1694 */ 1695 /* if-cmp vA, vB, +CCCC */ 1696 mov r0, rINST, lsr #8 @ r0<- A+ 1697 mov r1, rINST, lsr #12 @ r1<- B 1698 and r0, r0, #15 1699 GET_VREG(r3, r1) @ r3<- vB 1700 GET_VREG(r2, r0) @ r2<- vA 1701 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1702 cmp r2, r3 @ compare (vA, vB) 1703 bgt 1f @ branch to 1 if comparison failed 1704 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1705 movs r9, r9, asl #1 @ convert to bytes, check sign 1706 bmi common_backwardBranch @ yes, do periodic checks 17071: 1708#if defined(WITH_JIT) 1709 GET_JIT_PROF_TABLE(r0) 1710 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1711 b common_testUpdateProfile 1712#else 1713 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1714 GET_INST_OPCODE(ip) @ extract opcode from rINST 1715 GOTO_OPCODE(ip) @ jump to next instruction 1716#endif 1717 1718 1719/* ------------------------------ */ 1720 .balign 64 1721.L_OP_IF_EQZ: /* 0x38 */ 1722/* File: armv5te/OP_IF_EQZ.S */ 1723/* File: armv5te/zcmp.S */ 1724 /* 1725 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1726 * fragment that specifies the *reverse* comparison to perform, e.g. 1727 * for "if-le" you would use "gt". 1728 * 1729 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1730 */ 1731 /* if-cmp vAA, +BBBB */ 1732 mov r0, rINST, lsr #8 @ r0<- AA 1733 GET_VREG(r2, r0) @ r2<- vAA 1734 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1735 cmp r2, #0 @ compare (vA, 0) 1736 bne 1f @ branch to 1 if comparison failed 1737 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1738 movs r9, r9, asl #1 @ convert to bytes, check sign 1739 bmi common_backwardBranch @ backward branch, do periodic checks 17401: 1741#if defined(WITH_JIT) 1742 GET_JIT_PROF_TABLE(r0) 1743 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1744 cmp r0,#0 1745 bne common_updateProfile 1746 GET_INST_OPCODE(ip) @ extract opcode from rINST 1747 GOTO_OPCODE(ip) @ jump to next instruction 1748#else 1749 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1750 GET_INST_OPCODE(ip) @ extract opcode from rINST 1751 GOTO_OPCODE(ip) @ jump to next instruction 1752#endif 1753 1754 1755/* ------------------------------ */ 1756 .balign 64 1757.L_OP_IF_NEZ: /* 0x39 */ 1758/* File: armv5te/OP_IF_NEZ.S */ 1759/* File: armv5te/zcmp.S */ 1760 /* 1761 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1762 * fragment that specifies the *reverse* comparison to perform, e.g. 1763 * for "if-le" you would use "gt". 1764 * 1765 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1766 */ 1767 /* if-cmp vAA, +BBBB */ 1768 mov r0, rINST, lsr #8 @ r0<- AA 1769 GET_VREG(r2, r0) @ r2<- vAA 1770 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1771 cmp r2, #0 @ compare (vA, 0) 1772 beq 1f @ branch to 1 if comparison failed 1773 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1774 movs r9, r9, asl #1 @ convert to bytes, check sign 1775 bmi common_backwardBranch @ backward branch, do periodic checks 17761: 1777#if defined(WITH_JIT) 1778 GET_JIT_PROF_TABLE(r0) 1779 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1780 cmp r0,#0 1781 bne common_updateProfile 1782 GET_INST_OPCODE(ip) @ extract opcode from rINST 1783 GOTO_OPCODE(ip) @ jump to next instruction 1784#else 1785 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1786 GET_INST_OPCODE(ip) @ extract opcode from rINST 1787 GOTO_OPCODE(ip) @ jump to next instruction 1788#endif 1789 1790 1791/* ------------------------------ */ 1792 .balign 64 1793.L_OP_IF_LTZ: /* 0x3a */ 1794/* File: armv5te/OP_IF_LTZ.S */ 1795/* File: armv5te/zcmp.S */ 1796 /* 1797 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1798 * fragment that specifies the *reverse* comparison to perform, e.g. 1799 * for "if-le" you would use "gt". 1800 * 1801 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1802 */ 1803 /* if-cmp vAA, +BBBB */ 1804 mov r0, rINST, lsr #8 @ r0<- AA 1805 GET_VREG(r2, r0) @ r2<- vAA 1806 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1807 cmp r2, #0 @ compare (vA, 0) 1808 bge 1f @ branch to 1 if comparison failed 1809 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1810 movs r9, r9, asl #1 @ convert to bytes, check sign 1811 bmi common_backwardBranch @ backward branch, do periodic checks 18121: 1813#if defined(WITH_JIT) 1814 GET_JIT_PROF_TABLE(r0) 1815 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1816 cmp r0,#0 1817 bne common_updateProfile 1818 GET_INST_OPCODE(ip) @ extract opcode from rINST 1819 GOTO_OPCODE(ip) @ jump to next instruction 1820#else 1821 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1822 GET_INST_OPCODE(ip) @ extract opcode from rINST 1823 GOTO_OPCODE(ip) @ jump to next instruction 1824#endif 1825 1826 1827/* ------------------------------ */ 1828 .balign 64 1829.L_OP_IF_GEZ: /* 0x3b */ 1830/* File: armv5te/OP_IF_GEZ.S */ 1831/* File: armv5te/zcmp.S */ 1832 /* 1833 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1834 * fragment that specifies the *reverse* comparison to perform, e.g. 1835 * for "if-le" you would use "gt". 1836 * 1837 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1838 */ 1839 /* if-cmp vAA, +BBBB */ 1840 mov r0, rINST, lsr #8 @ r0<- AA 1841 GET_VREG(r2, r0) @ r2<- vAA 1842 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1843 cmp r2, #0 @ compare (vA, 0) 1844 blt 1f @ branch to 1 if comparison failed 1845 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1846 movs r9, r9, asl #1 @ convert to bytes, check sign 1847 bmi common_backwardBranch @ backward branch, do periodic checks 18481: 1849#if defined(WITH_JIT) 1850 GET_JIT_PROF_TABLE(r0) 1851 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1852 cmp r0,#0 1853 bne common_updateProfile 1854 GET_INST_OPCODE(ip) @ extract opcode from rINST 1855 GOTO_OPCODE(ip) @ jump to next instruction 1856#else 1857 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1858 GET_INST_OPCODE(ip) @ extract opcode from rINST 1859 GOTO_OPCODE(ip) @ jump to next instruction 1860#endif 1861 1862 1863/* ------------------------------ */ 1864 .balign 64 1865.L_OP_IF_GTZ: /* 0x3c */ 1866/* File: armv5te/OP_IF_GTZ.S */ 1867/* File: armv5te/zcmp.S */ 1868 /* 1869 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1870 * fragment that specifies the *reverse* comparison to perform, e.g. 1871 * for "if-le" you would use "gt". 1872 * 1873 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1874 */ 1875 /* if-cmp vAA, +BBBB */ 1876 mov r0, rINST, lsr #8 @ r0<- AA 1877 GET_VREG(r2, r0) @ r2<- vAA 1878 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1879 cmp r2, #0 @ compare (vA, 0) 1880 ble 1f @ branch to 1 if comparison failed 1881 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1882 movs r9, r9, asl #1 @ convert to bytes, check sign 1883 bmi common_backwardBranch @ backward branch, do periodic checks 18841: 1885#if defined(WITH_JIT) 1886 GET_JIT_PROF_TABLE(r0) 1887 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1888 cmp r0,#0 1889 bne common_updateProfile 1890 GET_INST_OPCODE(ip) @ extract opcode from rINST 1891 GOTO_OPCODE(ip) @ jump to next instruction 1892#else 1893 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1894 GET_INST_OPCODE(ip) @ extract opcode from rINST 1895 GOTO_OPCODE(ip) @ jump to next instruction 1896#endif 1897 1898 1899/* ------------------------------ */ 1900 .balign 64 1901.L_OP_IF_LEZ: /* 0x3d */ 1902/* File: armv5te/OP_IF_LEZ.S */ 1903/* File: armv5te/zcmp.S */ 1904 /* 1905 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1906 * fragment that specifies the *reverse* comparison to perform, e.g. 1907 * for "if-le" you would use "gt". 1908 * 1909 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1910 */ 1911 /* if-cmp vAA, +BBBB */ 1912 mov r0, rINST, lsr #8 @ r0<- AA 1913 GET_VREG(r2, r0) @ r2<- vAA 1914 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1915 cmp r2, #0 @ compare (vA, 0) 1916 bgt 1f @ branch to 1 if comparison failed 1917 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1918 movs r9, r9, asl #1 @ convert to bytes, check sign 1919 bmi common_backwardBranch @ backward branch, do periodic checks 19201: 1921#if defined(WITH_JIT) 1922 GET_JIT_PROF_TABLE(r0) 1923 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1924 cmp r0,#0 1925 bne common_updateProfile 1926 GET_INST_OPCODE(ip) @ extract opcode from rINST 1927 GOTO_OPCODE(ip) @ jump to next instruction 1928#else 1929 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1930 GET_INST_OPCODE(ip) @ extract opcode from rINST 1931 GOTO_OPCODE(ip) @ jump to next instruction 1932#endif 1933 1934 1935/* ------------------------------ */ 1936 .balign 64 1937.L_OP_UNUSED_3E: /* 0x3e */ 1938/* File: armv5te/OP_UNUSED_3E.S */ 1939/* File: armv5te/unused.S */ 1940 bl common_abort 1941 1942 1943/* ------------------------------ */ 1944 .balign 64 1945.L_OP_UNUSED_3F: /* 0x3f */ 1946/* File: armv5te/OP_UNUSED_3F.S */ 1947/* File: armv5te/unused.S */ 1948 bl common_abort 1949 1950 1951/* ------------------------------ */ 1952 .balign 64 1953.L_OP_UNUSED_40: /* 0x40 */ 1954/* File: armv5te/OP_UNUSED_40.S */ 1955/* File: armv5te/unused.S */ 1956 bl common_abort 1957 1958 1959/* ------------------------------ */ 1960 .balign 64 1961.L_OP_UNUSED_41: /* 0x41 */ 1962/* File: armv5te/OP_UNUSED_41.S */ 1963/* File: armv5te/unused.S */ 1964 bl common_abort 1965 1966 1967/* ------------------------------ */ 1968 .balign 64 1969.L_OP_UNUSED_42: /* 0x42 */ 1970/* File: armv5te/OP_UNUSED_42.S */ 1971/* File: armv5te/unused.S */ 1972 bl common_abort 1973 1974 1975/* ------------------------------ */ 1976 .balign 64 1977.L_OP_UNUSED_43: /* 0x43 */ 1978/* File: armv5te/OP_UNUSED_43.S */ 1979/* File: armv5te/unused.S */ 1980 bl common_abort 1981 1982 1983/* ------------------------------ */ 1984 .balign 64 1985.L_OP_AGET: /* 0x44 */ 1986/* File: armv5te/OP_AGET.S */ 1987 /* 1988 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1989 * 1990 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1991 * instructions. We use a pair of FETCH_Bs instead. 1992 * 1993 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 1994 */ 1995 /* op vAA, vBB, vCC */ 1996 FETCH_B(r2, 1, 0) @ r2<- BB 1997 mov r9, rINST, lsr #8 @ r9<- AA 1998 FETCH_B(r3, 1, 1) @ r3<- CC 1999 GET_VREG(r0, r2) @ r0<- vBB (array object) 2000 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2001 cmp r0, #0 @ null array object? 2002 beq common_errNullObject @ yes, bail 2003 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2004 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2005 cmp r1, r3 @ compare unsigned index, length 2006 bcs common_errArrayIndex @ index >= length, bail 2007 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2008 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2009 GET_INST_OPCODE(ip) @ extract opcode from rINST 2010 SET_VREG(r2, r9) @ vAA<- r2 2011 GOTO_OPCODE(ip) @ jump to next instruction 2012 2013/* ------------------------------ */ 2014 .balign 64 2015.L_OP_AGET_WIDE: /* 0x45 */ 2016/* File: armv5te/OP_AGET_WIDE.S */ 2017 /* 2018 * Array get, 64 bits. vAA <- vBB[vCC]. 2019 * 2020 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2021 */ 2022 /* aget-wide vAA, vBB, vCC */ 2023 FETCH(r0, 1) @ r0<- CCBB 2024 mov r9, rINST, lsr #8 @ r9<- AA 2025 and r2, r0, #255 @ r2<- BB 2026 mov r3, r0, lsr #8 @ r3<- CC 2027 GET_VREG(r0, r2) @ r0<- vBB (array object) 2028 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2029 cmp r0, #0 @ null array object? 2030 beq common_errNullObject @ yes, bail 2031 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2032 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2033 cmp r1, r3 @ compare unsigned index, length 2034 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2035 b common_errArrayIndex @ index >= length, bail 2036 @ May want to swap the order of these two branches depending on how the 2037 @ branch prediction (if any) handles conditional forward branches vs. 2038 @ unconditional forward branches. 2039 2040/* ------------------------------ */ 2041 .balign 64 2042.L_OP_AGET_OBJECT: /* 0x46 */ 2043/* File: armv5te/OP_AGET_OBJECT.S */ 2044/* File: armv5te/OP_AGET.S */ 2045 /* 2046 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2047 * 2048 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2049 * instructions. We use a pair of FETCH_Bs instead. 2050 * 2051 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2052 */ 2053 /* op vAA, vBB, vCC */ 2054 FETCH_B(r2, 1, 0) @ r2<- BB 2055 mov r9, rINST, lsr #8 @ r9<- AA 2056 FETCH_B(r3, 1, 1) @ r3<- CC 2057 GET_VREG(r0, r2) @ r0<- vBB (array object) 2058 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2059 cmp r0, #0 @ null array object? 2060 beq common_errNullObject @ yes, bail 2061 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2062 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2063 cmp r1, r3 @ compare unsigned index, length 2064 bcs common_errArrayIndex @ index >= length, bail 2065 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2066 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2067 GET_INST_OPCODE(ip) @ extract opcode from rINST 2068 SET_VREG(r2, r9) @ vAA<- r2 2069 GOTO_OPCODE(ip) @ jump to next instruction 2070 2071 2072/* ------------------------------ */ 2073 .balign 64 2074.L_OP_AGET_BOOLEAN: /* 0x47 */ 2075/* File: armv5te/OP_AGET_BOOLEAN.S */ 2076/* File: armv5te/OP_AGET.S */ 2077 /* 2078 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2079 * 2080 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2081 * instructions. We use a pair of FETCH_Bs instead. 2082 * 2083 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2084 */ 2085 /* op vAA, vBB, vCC */ 2086 FETCH_B(r2, 1, 0) @ r2<- BB 2087 mov r9, rINST, lsr #8 @ r9<- AA 2088 FETCH_B(r3, 1, 1) @ r3<- CC 2089 GET_VREG(r0, r2) @ r0<- vBB (array object) 2090 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2091 cmp r0, #0 @ null array object? 2092 beq common_errNullObject @ yes, bail 2093 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2094 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2095 cmp r1, r3 @ compare unsigned index, length 2096 bcs common_errArrayIndex @ index >= length, bail 2097 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2098 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2099 GET_INST_OPCODE(ip) @ extract opcode from rINST 2100 SET_VREG(r2, r9) @ vAA<- r2 2101 GOTO_OPCODE(ip) @ jump to next instruction 2102 2103 2104/* ------------------------------ */ 2105 .balign 64 2106.L_OP_AGET_BYTE: /* 0x48 */ 2107/* File: armv5te/OP_AGET_BYTE.S */ 2108/* File: armv5te/OP_AGET.S */ 2109 /* 2110 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2111 * 2112 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2113 * instructions. We use a pair of FETCH_Bs instead. 2114 * 2115 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2116 */ 2117 /* op vAA, vBB, vCC */ 2118 FETCH_B(r2, 1, 0) @ r2<- BB 2119 mov r9, rINST, lsr #8 @ r9<- AA 2120 FETCH_B(r3, 1, 1) @ r3<- CC 2121 GET_VREG(r0, r2) @ r0<- vBB (array object) 2122 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2123 cmp r0, #0 @ null array object? 2124 beq common_errNullObject @ yes, bail 2125 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2126 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2127 cmp r1, r3 @ compare unsigned index, length 2128 bcs common_errArrayIndex @ index >= length, bail 2129 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2130 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2131 GET_INST_OPCODE(ip) @ extract opcode from rINST 2132 SET_VREG(r2, r9) @ vAA<- r2 2133 GOTO_OPCODE(ip) @ jump to next instruction 2134 2135 2136/* ------------------------------ */ 2137 .balign 64 2138.L_OP_AGET_CHAR: /* 0x49 */ 2139/* File: armv5te/OP_AGET_CHAR.S */ 2140/* File: armv5te/OP_AGET.S */ 2141 /* 2142 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2143 * 2144 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2145 * instructions. We use a pair of FETCH_Bs instead. 2146 * 2147 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2148 */ 2149 /* op vAA, vBB, vCC */ 2150 FETCH_B(r2, 1, 0) @ r2<- BB 2151 mov r9, rINST, lsr #8 @ r9<- AA 2152 FETCH_B(r3, 1, 1) @ r3<- CC 2153 GET_VREG(r0, r2) @ r0<- vBB (array object) 2154 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2155 cmp r0, #0 @ null array object? 2156 beq common_errNullObject @ yes, bail 2157 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2158 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2159 cmp r1, r3 @ compare unsigned index, length 2160 bcs common_errArrayIndex @ index >= length, bail 2161 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2162 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2163 GET_INST_OPCODE(ip) @ extract opcode from rINST 2164 SET_VREG(r2, r9) @ vAA<- r2 2165 GOTO_OPCODE(ip) @ jump to next instruction 2166 2167 2168/* ------------------------------ */ 2169 .balign 64 2170.L_OP_AGET_SHORT: /* 0x4a */ 2171/* File: armv5te/OP_AGET_SHORT.S */ 2172/* File: armv5te/OP_AGET.S */ 2173 /* 2174 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2175 * 2176 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2177 * instructions. We use a pair of FETCH_Bs instead. 2178 * 2179 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2180 */ 2181 /* op vAA, vBB, vCC */ 2182 FETCH_B(r2, 1, 0) @ r2<- BB 2183 mov r9, rINST, lsr #8 @ r9<- AA 2184 FETCH_B(r3, 1, 1) @ r3<- CC 2185 GET_VREG(r0, r2) @ r0<- vBB (array object) 2186 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2187 cmp r0, #0 @ null array object? 2188 beq common_errNullObject @ yes, bail 2189 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2190 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2191 cmp r1, r3 @ compare unsigned index, length 2192 bcs common_errArrayIndex @ index >= length, bail 2193 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2194 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2195 GET_INST_OPCODE(ip) @ extract opcode from rINST 2196 SET_VREG(r2, r9) @ vAA<- r2 2197 GOTO_OPCODE(ip) @ jump to next instruction 2198 2199 2200/* ------------------------------ */ 2201 .balign 64 2202.L_OP_APUT: /* 0x4b */ 2203/* File: armv5te/OP_APUT.S */ 2204 /* 2205 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2206 * 2207 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2208 * instructions. We use a pair of FETCH_Bs instead. 2209 * 2210 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2211 */ 2212 /* op vAA, vBB, vCC */ 2213 FETCH_B(r2, 1, 0) @ r2<- BB 2214 mov r9, rINST, lsr #8 @ r9<- AA 2215 FETCH_B(r3, 1, 1) @ r3<- CC 2216 GET_VREG(r0, r2) @ r0<- vBB (array object) 2217 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2218 cmp r0, #0 @ null array object? 2219 beq common_errNullObject @ yes, bail 2220 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2221 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2222 cmp r1, r3 @ compare unsigned index, length 2223 bcs common_errArrayIndex @ index >= length, bail 2224 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2225 GET_VREG(r2, r9) @ r2<- vAA 2226 GET_INST_OPCODE(ip) @ extract opcode from rINST 2227 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2228 GOTO_OPCODE(ip) @ jump to next instruction 2229 2230/* ------------------------------ */ 2231 .balign 64 2232.L_OP_APUT_WIDE: /* 0x4c */ 2233/* File: armv5te/OP_APUT_WIDE.S */ 2234 /* 2235 * Array put, 64 bits. vBB[vCC] <- vAA. 2236 * 2237 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2238 */ 2239 /* aput-wide vAA, vBB, vCC */ 2240 FETCH(r0, 1) @ r0<- CCBB 2241 mov r9, rINST, lsr #8 @ r9<- AA 2242 and r2, r0, #255 @ r2<- BB 2243 mov r3, r0, lsr #8 @ r3<- CC 2244 GET_VREG(r0, r2) @ r0<- vBB (array object) 2245 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2246 cmp r0, #0 @ null array object? 2247 beq common_errNullObject @ yes, bail 2248 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2249 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2250 cmp r1, r3 @ compare unsigned index, length 2251 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2252 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2253 b common_errArrayIndex @ index >= length, bail 2254 @ May want to swap the order of these two branches depending on how the 2255 @ branch prediction (if any) handles conditional forward branches vs. 2256 @ unconditional forward branches. 2257 2258/* ------------------------------ */ 2259 .balign 64 2260.L_OP_APUT_OBJECT: /* 0x4d */ 2261/* File: armv5te/OP_APUT_OBJECT.S */ 2262 /* 2263 * Store an object into an array. vBB[vCC] <- vAA. 2264 * 2265 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2266 * instructions. We use a pair of FETCH_Bs instead. 2267 */ 2268 /* op vAA, vBB, vCC */ 2269 FETCH(r0, 1) @ r0<- CCBB 2270 mov r9, rINST, lsr #8 @ r9<- AA 2271 and r2, r0, #255 @ r2<- BB 2272 mov r3, r0, lsr #8 @ r3<- CC 2273 GET_VREG(r1, r2) @ r1<- vBB (array object) 2274 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2275 cmp r1, #0 @ null array object? 2276 GET_VREG(r9, r9) @ r9<- vAA 2277 beq common_errNullObject @ yes, bail 2278 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2279 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2280 cmp r0, r3 @ compare unsigned index, length 2281 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2282 b common_errArrayIndex @ index >= length, bail 2283 2284 2285/* ------------------------------ */ 2286 .balign 64 2287.L_OP_APUT_BOOLEAN: /* 0x4e */ 2288/* File: armv5te/OP_APUT_BOOLEAN.S */ 2289/* File: armv5te/OP_APUT.S */ 2290 /* 2291 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2292 * 2293 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2294 * instructions. We use a pair of FETCH_Bs instead. 2295 * 2296 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2297 */ 2298 /* op vAA, vBB, vCC */ 2299 FETCH_B(r2, 1, 0) @ r2<- BB 2300 mov r9, rINST, lsr #8 @ r9<- AA 2301 FETCH_B(r3, 1, 1) @ r3<- CC 2302 GET_VREG(r0, r2) @ r0<- vBB (array object) 2303 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2304 cmp r0, #0 @ null array object? 2305 beq common_errNullObject @ yes, bail 2306 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2307 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2308 cmp r1, r3 @ compare unsigned index, length 2309 bcs common_errArrayIndex @ index >= length, bail 2310 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2311 GET_VREG(r2, r9) @ r2<- vAA 2312 GET_INST_OPCODE(ip) @ extract opcode from rINST 2313 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2314 GOTO_OPCODE(ip) @ jump to next instruction 2315 2316 2317/* ------------------------------ */ 2318 .balign 64 2319.L_OP_APUT_BYTE: /* 0x4f */ 2320/* File: armv5te/OP_APUT_BYTE.S */ 2321/* File: armv5te/OP_APUT.S */ 2322 /* 2323 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2324 * 2325 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2326 * instructions. We use a pair of FETCH_Bs instead. 2327 * 2328 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2329 */ 2330 /* op vAA, vBB, vCC */ 2331 FETCH_B(r2, 1, 0) @ r2<- BB 2332 mov r9, rINST, lsr #8 @ r9<- AA 2333 FETCH_B(r3, 1, 1) @ r3<- CC 2334 GET_VREG(r0, r2) @ r0<- vBB (array object) 2335 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2336 cmp r0, #0 @ null array object? 2337 beq common_errNullObject @ yes, bail 2338 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2339 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2340 cmp r1, r3 @ compare unsigned index, length 2341 bcs common_errArrayIndex @ index >= length, bail 2342 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2343 GET_VREG(r2, r9) @ r2<- vAA 2344 GET_INST_OPCODE(ip) @ extract opcode from rINST 2345 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2346 GOTO_OPCODE(ip) @ jump to next instruction 2347 2348 2349/* ------------------------------ */ 2350 .balign 64 2351.L_OP_APUT_CHAR: /* 0x50 */ 2352/* File: armv5te/OP_APUT_CHAR.S */ 2353/* File: armv5te/OP_APUT.S */ 2354 /* 2355 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2356 * 2357 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2358 * instructions. We use a pair of FETCH_Bs instead. 2359 * 2360 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2361 */ 2362 /* op vAA, vBB, vCC */ 2363 FETCH_B(r2, 1, 0) @ r2<- BB 2364 mov r9, rINST, lsr #8 @ r9<- AA 2365 FETCH_B(r3, 1, 1) @ r3<- CC 2366 GET_VREG(r0, r2) @ r0<- vBB (array object) 2367 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2368 cmp r0, #0 @ null array object? 2369 beq common_errNullObject @ yes, bail 2370 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2371 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2372 cmp r1, r3 @ compare unsigned index, length 2373 bcs common_errArrayIndex @ index >= length, bail 2374 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2375 GET_VREG(r2, r9) @ r2<- vAA 2376 GET_INST_OPCODE(ip) @ extract opcode from rINST 2377 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2378 GOTO_OPCODE(ip) @ jump to next instruction 2379 2380 2381/* ------------------------------ */ 2382 .balign 64 2383.L_OP_APUT_SHORT: /* 0x51 */ 2384/* File: armv5te/OP_APUT_SHORT.S */ 2385/* File: armv5te/OP_APUT.S */ 2386 /* 2387 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2388 * 2389 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2390 * instructions. We use a pair of FETCH_Bs instead. 2391 * 2392 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2393 */ 2394 /* op vAA, vBB, vCC */ 2395 FETCH_B(r2, 1, 0) @ r2<- BB 2396 mov r9, rINST, lsr #8 @ r9<- AA 2397 FETCH_B(r3, 1, 1) @ r3<- CC 2398 GET_VREG(r0, r2) @ r0<- vBB (array object) 2399 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2400 cmp r0, #0 @ null array object? 2401 beq common_errNullObject @ yes, bail 2402 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2403 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2404 cmp r1, r3 @ compare unsigned index, length 2405 bcs common_errArrayIndex @ index >= length, bail 2406 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2407 GET_VREG(r2, r9) @ r2<- vAA 2408 GET_INST_OPCODE(ip) @ extract opcode from rINST 2409 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2410 GOTO_OPCODE(ip) @ jump to next instruction 2411 2412 2413/* ------------------------------ */ 2414 .balign 64 2415.L_OP_IGET: /* 0x52 */ 2416/* File: armv5te/OP_IGET.S */ 2417 /* 2418 * General 32-bit instance field get. 2419 * 2420 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2421 */ 2422 /* op vA, vB, field@CCCC */ 2423 mov r0, rINST, lsr #12 @ r0<- B 2424 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2425 FETCH(r1, 1) @ r1<- field ref CCCC 2426 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2427 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2428 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2429 cmp r0, #0 @ is resolved entry null? 2430 bne .LOP_IGET_finish @ no, already resolved 24318: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2432 EXPORT_PC() @ resolve() could throw 2433 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2434 bl dvmResolveInstField @ r0<- resolved InstField ptr 2435 cmp r0, #0 2436 bne .LOP_IGET_finish 2437 b common_exceptionThrown 2438 2439/* ------------------------------ */ 2440 .balign 64 2441.L_OP_IGET_WIDE: /* 0x53 */ 2442/* File: armv5te/OP_IGET_WIDE.S */ 2443 /* 2444 * Wide 32-bit instance field get. 2445 */ 2446 /* iget-wide vA, vB, field@CCCC */ 2447 mov r0, rINST, lsr #12 @ r0<- B 2448 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2449 FETCH(r1, 1) @ r1<- field ref CCCC 2450 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2451 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2452 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2453 cmp r0, #0 @ is resolved entry null? 2454 bne .LOP_IGET_WIDE_finish @ no, already resolved 24558: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2456 EXPORT_PC() @ resolve() could throw 2457 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2458 bl dvmResolveInstField @ r0<- resolved InstField ptr 2459 cmp r0, #0 2460 bne .LOP_IGET_WIDE_finish 2461 b common_exceptionThrown 2462 2463/* ------------------------------ */ 2464 .balign 64 2465.L_OP_IGET_OBJECT: /* 0x54 */ 2466/* File: armv5te/OP_IGET_OBJECT.S */ 2467/* File: armv5te/OP_IGET.S */ 2468 /* 2469 * General 32-bit instance field get. 2470 * 2471 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2472 */ 2473 /* op vA, vB, field@CCCC */ 2474 mov r0, rINST, lsr #12 @ r0<- B 2475 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2476 FETCH(r1, 1) @ r1<- field ref CCCC 2477 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2478 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2479 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2480 cmp r0, #0 @ is resolved entry null? 2481 bne .LOP_IGET_OBJECT_finish @ no, already resolved 24828: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2483 EXPORT_PC() @ resolve() could throw 2484 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2485 bl dvmResolveInstField @ r0<- resolved InstField ptr 2486 cmp r0, #0 2487 bne .LOP_IGET_OBJECT_finish 2488 b common_exceptionThrown 2489 2490 2491/* ------------------------------ */ 2492 .balign 64 2493.L_OP_IGET_BOOLEAN: /* 0x55 */ 2494/* File: armv5te/OP_IGET_BOOLEAN.S */ 2495@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2496/* File: armv5te/OP_IGET.S */ 2497 /* 2498 * General 32-bit instance field get. 2499 * 2500 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2501 */ 2502 /* op vA, vB, field@CCCC */ 2503 mov r0, rINST, lsr #12 @ r0<- B 2504 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2505 FETCH(r1, 1) @ r1<- field ref CCCC 2506 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2507 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2508 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2509 cmp r0, #0 @ is resolved entry null? 2510 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25118: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2512 EXPORT_PC() @ resolve() could throw 2513 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2514 bl dvmResolveInstField @ r0<- resolved InstField ptr 2515 cmp r0, #0 2516 bne .LOP_IGET_BOOLEAN_finish 2517 b common_exceptionThrown 2518 2519 2520/* ------------------------------ */ 2521 .balign 64 2522.L_OP_IGET_BYTE: /* 0x56 */ 2523/* File: armv5te/OP_IGET_BYTE.S */ 2524@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2525/* File: armv5te/OP_IGET.S */ 2526 /* 2527 * General 32-bit instance field get. 2528 * 2529 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2530 */ 2531 /* op vA, vB, field@CCCC */ 2532 mov r0, rINST, lsr #12 @ r0<- B 2533 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2534 FETCH(r1, 1) @ r1<- field ref CCCC 2535 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2536 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2537 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2538 cmp r0, #0 @ is resolved entry null? 2539 bne .LOP_IGET_BYTE_finish @ no, already resolved 25408: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2541 EXPORT_PC() @ resolve() could throw 2542 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2543 bl dvmResolveInstField @ r0<- resolved InstField ptr 2544 cmp r0, #0 2545 bne .LOP_IGET_BYTE_finish 2546 b common_exceptionThrown 2547 2548 2549/* ------------------------------ */ 2550 .balign 64 2551.L_OP_IGET_CHAR: /* 0x57 */ 2552/* File: armv5te/OP_IGET_CHAR.S */ 2553@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2554/* File: armv5te/OP_IGET.S */ 2555 /* 2556 * General 32-bit instance field get. 2557 * 2558 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2559 */ 2560 /* op vA, vB, field@CCCC */ 2561 mov r0, rINST, lsr #12 @ r0<- B 2562 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2563 FETCH(r1, 1) @ r1<- field ref CCCC 2564 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2565 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2566 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2567 cmp r0, #0 @ is resolved entry null? 2568 bne .LOP_IGET_CHAR_finish @ no, already resolved 25698: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2570 EXPORT_PC() @ resolve() could throw 2571 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2572 bl dvmResolveInstField @ r0<- resolved InstField ptr 2573 cmp r0, #0 2574 bne .LOP_IGET_CHAR_finish 2575 b common_exceptionThrown 2576 2577 2578/* ------------------------------ */ 2579 .balign 64 2580.L_OP_IGET_SHORT: /* 0x58 */ 2581/* File: armv5te/OP_IGET_SHORT.S */ 2582@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2583/* File: armv5te/OP_IGET.S */ 2584 /* 2585 * General 32-bit instance field get. 2586 * 2587 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2588 */ 2589 /* op vA, vB, field@CCCC */ 2590 mov r0, rINST, lsr #12 @ r0<- B 2591 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2592 FETCH(r1, 1) @ r1<- field ref CCCC 2593 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2594 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2595 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2596 cmp r0, #0 @ is resolved entry null? 2597 bne .LOP_IGET_SHORT_finish @ no, already resolved 25988: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2599 EXPORT_PC() @ resolve() could throw 2600 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2601 bl dvmResolveInstField @ r0<- resolved InstField ptr 2602 cmp r0, #0 2603 bne .LOP_IGET_SHORT_finish 2604 b common_exceptionThrown 2605 2606 2607/* ------------------------------ */ 2608 .balign 64 2609.L_OP_IPUT: /* 0x59 */ 2610/* File: armv5te/OP_IPUT.S */ 2611 /* 2612 * General 32-bit instance field put. 2613 * 2614 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2615 */ 2616 /* op vA, vB, field@CCCC */ 2617 mov r0, rINST, lsr #12 @ r0<- B 2618 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2619 FETCH(r1, 1) @ r1<- field ref CCCC 2620 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2621 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2622 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2623 cmp r0, #0 @ is resolved entry null? 2624 bne .LOP_IPUT_finish @ no, already resolved 26258: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2626 EXPORT_PC() @ resolve() could throw 2627 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2628 bl dvmResolveInstField @ r0<- resolved InstField ptr 2629 cmp r0, #0 @ success? 2630 bne .LOP_IPUT_finish @ yes, finish up 2631 b common_exceptionThrown 2632 2633/* ------------------------------ */ 2634 .balign 64 2635.L_OP_IPUT_WIDE: /* 0x5a */ 2636/* File: armv5te/OP_IPUT_WIDE.S */ 2637 /* iput-wide vA, vB, field@CCCC */ 2638 mov r0, rINST, lsr #12 @ r0<- B 2639 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2640 FETCH(r1, 1) @ r1<- field ref CCCC 2641 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2642 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2643 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2644 cmp r0, #0 @ is resolved entry null? 2645 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2647 EXPORT_PC() @ resolve() could throw 2648 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2649 bl dvmResolveInstField @ r0<- resolved InstField ptr 2650 cmp r0, #0 @ success? 2651 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2652 b common_exceptionThrown 2653 2654/* ------------------------------ */ 2655 .balign 64 2656.L_OP_IPUT_OBJECT: /* 0x5b */ 2657/* File: armv5te/OP_IPUT_OBJECT.S */ 2658 /* 2659 * 32-bit instance field put. 2660 * 2661 * for: iput-object, iput-object-volatile 2662 */ 2663 /* op vA, vB, field@CCCC */ 2664 mov r0, rINST, lsr #12 @ r0<- B 2665 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2666 FETCH(r1, 1) @ r1<- field ref CCCC 2667 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2668 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2669 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2670 cmp r0, #0 @ is resolved entry null? 2671 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26728: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2673 EXPORT_PC() @ resolve() could throw 2674 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2675 bl dvmResolveInstField @ r0<- resolved InstField ptr 2676 cmp r0, #0 @ success? 2677 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2678 b common_exceptionThrown 2679 2680/* ------------------------------ */ 2681 .balign 64 2682.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2683/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2684@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2685/* File: armv5te/OP_IPUT.S */ 2686 /* 2687 * General 32-bit instance field put. 2688 * 2689 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2690 */ 2691 /* op vA, vB, field@CCCC */ 2692 mov r0, rINST, lsr #12 @ r0<- B 2693 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2694 FETCH(r1, 1) @ r1<- field ref CCCC 2695 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2696 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2697 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2698 cmp r0, #0 @ is resolved entry null? 2699 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27008: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2701 EXPORT_PC() @ resolve() could throw 2702 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2703 bl dvmResolveInstField @ r0<- resolved InstField ptr 2704 cmp r0, #0 @ success? 2705 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2706 b common_exceptionThrown 2707 2708 2709/* ------------------------------ */ 2710 .balign 64 2711.L_OP_IPUT_BYTE: /* 0x5d */ 2712/* File: armv5te/OP_IPUT_BYTE.S */ 2713@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2714/* File: armv5te/OP_IPUT.S */ 2715 /* 2716 * General 32-bit instance field put. 2717 * 2718 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2719 */ 2720 /* op vA, vB, field@CCCC */ 2721 mov r0, rINST, lsr #12 @ r0<- B 2722 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2723 FETCH(r1, 1) @ r1<- field ref CCCC 2724 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2725 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2726 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2727 cmp r0, #0 @ is resolved entry null? 2728 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27298: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2730 EXPORT_PC() @ resolve() could throw 2731 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2732 bl dvmResolveInstField @ r0<- resolved InstField ptr 2733 cmp r0, #0 @ success? 2734 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2735 b common_exceptionThrown 2736 2737 2738/* ------------------------------ */ 2739 .balign 64 2740.L_OP_IPUT_CHAR: /* 0x5e */ 2741/* File: armv5te/OP_IPUT_CHAR.S */ 2742@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2743/* File: armv5te/OP_IPUT.S */ 2744 /* 2745 * General 32-bit instance field put. 2746 * 2747 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2748 */ 2749 /* op vA, vB, field@CCCC */ 2750 mov r0, rINST, lsr #12 @ r0<- B 2751 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2752 FETCH(r1, 1) @ r1<- field ref CCCC 2753 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2754 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2755 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2756 cmp r0, #0 @ is resolved entry null? 2757 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27588: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2759 EXPORT_PC() @ resolve() could throw 2760 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2761 bl dvmResolveInstField @ r0<- resolved InstField ptr 2762 cmp r0, #0 @ success? 2763 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2764 b common_exceptionThrown 2765 2766 2767/* ------------------------------ */ 2768 .balign 64 2769.L_OP_IPUT_SHORT: /* 0x5f */ 2770/* File: armv5te/OP_IPUT_SHORT.S */ 2771@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2772/* File: armv5te/OP_IPUT.S */ 2773 /* 2774 * General 32-bit instance field put. 2775 * 2776 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2777 */ 2778 /* op vA, vB, field@CCCC */ 2779 mov r0, rINST, lsr #12 @ r0<- B 2780 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2781 FETCH(r1, 1) @ r1<- field ref CCCC 2782 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2783 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2784 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2785 cmp r0, #0 @ is resolved entry null? 2786 bne .LOP_IPUT_SHORT_finish @ no, already resolved 27878: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2788 EXPORT_PC() @ resolve() could throw 2789 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2790 bl dvmResolveInstField @ r0<- resolved InstField ptr 2791 cmp r0, #0 @ success? 2792 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2793 b common_exceptionThrown 2794 2795 2796/* ------------------------------ */ 2797 .balign 64 2798.L_OP_SGET: /* 0x60 */ 2799/* File: armv5te/OP_SGET.S */ 2800 /* 2801 * General 32-bit SGET handler. 2802 * 2803 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2804 */ 2805 /* op vAA, field@BBBB */ 2806 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2807 FETCH(r1, 1) @ r1<- field ref BBBB 2808 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2809 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2810 cmp r0, #0 @ is resolved entry null? 2811 beq .LOP_SGET_resolve @ yes, do resolve 2812.LOP_SGET_finish: @ field ptr in r0 2813 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2814 @ no-op @ acquiring load 2815 mov r2, rINST, lsr #8 @ r2<- AA 2816 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2817 SET_VREG(r1, r2) @ fp[AA]<- r1 2818 GET_INST_OPCODE(ip) @ extract opcode from rINST 2819 GOTO_OPCODE(ip) @ jump to next instruction 2820 2821/* ------------------------------ */ 2822 .balign 64 2823.L_OP_SGET_WIDE: /* 0x61 */ 2824/* File: armv5te/OP_SGET_WIDE.S */ 2825 /* 2826 * 64-bit SGET handler. 2827 */ 2828 /* sget-wide vAA, field@BBBB */ 2829 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2830 FETCH(r1, 1) @ r1<- field ref BBBB 2831 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2832 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2833 cmp r0, #0 @ is resolved entry null? 2834 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2835.LOP_SGET_WIDE_finish: 2836 mov r9, rINST, lsr #8 @ r9<- AA 2837 .if 0 2838 add r0, r0, #offStaticField_value @ r0<- pointer to data 2839 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 2840 .else 2841 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2842 .endif 2843 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2844 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2845 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2846 GET_INST_OPCODE(ip) @ extract opcode from rINST 2847 GOTO_OPCODE(ip) @ jump to next instruction 2848 2849/* ------------------------------ */ 2850 .balign 64 2851.L_OP_SGET_OBJECT: /* 0x62 */ 2852/* File: armv5te/OP_SGET_OBJECT.S */ 2853/* File: armv5te/OP_SGET.S */ 2854 /* 2855 * General 32-bit SGET handler. 2856 * 2857 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2858 */ 2859 /* op vAA, field@BBBB */ 2860 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2861 FETCH(r1, 1) @ r1<- field ref BBBB 2862 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2863 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2864 cmp r0, #0 @ is resolved entry null? 2865 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2866.LOP_SGET_OBJECT_finish: @ field ptr in r0 2867 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2868 @ no-op @ acquiring load 2869 mov r2, rINST, lsr #8 @ r2<- AA 2870 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2871 SET_VREG(r1, r2) @ fp[AA]<- r1 2872 GET_INST_OPCODE(ip) @ extract opcode from rINST 2873 GOTO_OPCODE(ip) @ jump to next instruction 2874 2875 2876/* ------------------------------ */ 2877 .balign 64 2878.L_OP_SGET_BOOLEAN: /* 0x63 */ 2879/* File: armv5te/OP_SGET_BOOLEAN.S */ 2880/* File: armv5te/OP_SGET.S */ 2881 /* 2882 * General 32-bit SGET handler. 2883 * 2884 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2885 */ 2886 /* op vAA, field@BBBB */ 2887 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2888 FETCH(r1, 1) @ r1<- field ref BBBB 2889 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2890 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2891 cmp r0, #0 @ is resolved entry null? 2892 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2893.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2894 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2895 @ no-op @ acquiring load 2896 mov r2, rINST, lsr #8 @ r2<- AA 2897 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2898 SET_VREG(r1, r2) @ fp[AA]<- r1 2899 GET_INST_OPCODE(ip) @ extract opcode from rINST 2900 GOTO_OPCODE(ip) @ jump to next instruction 2901 2902 2903/* ------------------------------ */ 2904 .balign 64 2905.L_OP_SGET_BYTE: /* 0x64 */ 2906/* File: armv5te/OP_SGET_BYTE.S */ 2907/* File: armv5te/OP_SGET.S */ 2908 /* 2909 * General 32-bit SGET handler. 2910 * 2911 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2912 */ 2913 /* op vAA, field@BBBB */ 2914 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2915 FETCH(r1, 1) @ r1<- field ref BBBB 2916 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2917 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2918 cmp r0, #0 @ is resolved entry null? 2919 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2920.LOP_SGET_BYTE_finish: @ field ptr in r0 2921 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2922 @ no-op @ acquiring load 2923 mov r2, rINST, lsr #8 @ r2<- AA 2924 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2925 SET_VREG(r1, r2) @ fp[AA]<- r1 2926 GET_INST_OPCODE(ip) @ extract opcode from rINST 2927 GOTO_OPCODE(ip) @ jump to next instruction 2928 2929 2930/* ------------------------------ */ 2931 .balign 64 2932.L_OP_SGET_CHAR: /* 0x65 */ 2933/* File: armv5te/OP_SGET_CHAR.S */ 2934/* File: armv5te/OP_SGET.S */ 2935 /* 2936 * General 32-bit SGET handler. 2937 * 2938 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2939 */ 2940 /* op vAA, field@BBBB */ 2941 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2942 FETCH(r1, 1) @ r1<- field ref BBBB 2943 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2944 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2945 cmp r0, #0 @ is resolved entry null? 2946 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2947.LOP_SGET_CHAR_finish: @ field ptr in r0 2948 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2949 @ no-op @ acquiring load 2950 mov r2, rINST, lsr #8 @ r2<- AA 2951 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2952 SET_VREG(r1, r2) @ fp[AA]<- r1 2953 GET_INST_OPCODE(ip) @ extract opcode from rINST 2954 GOTO_OPCODE(ip) @ jump to next instruction 2955 2956 2957/* ------------------------------ */ 2958 .balign 64 2959.L_OP_SGET_SHORT: /* 0x66 */ 2960/* File: armv5te/OP_SGET_SHORT.S */ 2961/* File: armv5te/OP_SGET.S */ 2962 /* 2963 * General 32-bit SGET handler. 2964 * 2965 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2966 */ 2967 /* op vAA, field@BBBB */ 2968 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2969 FETCH(r1, 1) @ r1<- field ref BBBB 2970 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2971 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2972 cmp r0, #0 @ is resolved entry null? 2973 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2974.LOP_SGET_SHORT_finish: @ field ptr in r0 2975 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2976 @ no-op @ acquiring load 2977 mov r2, rINST, lsr #8 @ r2<- AA 2978 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2979 SET_VREG(r1, r2) @ fp[AA]<- r1 2980 GET_INST_OPCODE(ip) @ extract opcode from rINST 2981 GOTO_OPCODE(ip) @ jump to next instruction 2982 2983 2984/* ------------------------------ */ 2985 .balign 64 2986.L_OP_SPUT: /* 0x67 */ 2987/* File: armv5te/OP_SPUT.S */ 2988 /* 2989 * General 32-bit SPUT handler. 2990 * 2991 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 2992 */ 2993 /* op vAA, field@BBBB */ 2994 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2995 FETCH(r1, 1) @ r1<- field ref BBBB 2996 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2997 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2998 cmp r0, #0 @ is resolved entry null? 2999 beq .LOP_SPUT_resolve @ yes, do resolve 3000.LOP_SPUT_finish: @ field ptr in r0 3001 mov r2, rINST, lsr #8 @ r2<- AA 3002 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3003 GET_VREG(r1, r2) @ r1<- fp[AA] 3004 GET_INST_OPCODE(ip) @ extract opcode from rINST 3005 @ no-op @ releasing store 3006 str r1, [r0, #offStaticField_value] @ field<- vAA 3007 GOTO_OPCODE(ip) @ jump to next instruction 3008 3009/* ------------------------------ */ 3010 .balign 64 3011.L_OP_SPUT_WIDE: /* 0x68 */ 3012/* File: armv5te/OP_SPUT_WIDE.S */ 3013 /* 3014 * 64-bit SPUT handler. 3015 */ 3016 /* sput-wide vAA, field@BBBB */ 3017 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 3018 FETCH(r1, 1) @ r1<- field ref BBBB 3019 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 3020 mov r9, rINST, lsr #8 @ r9<- AA 3021 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 3022 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3023 cmp r2, #0 @ is resolved entry null? 3024 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3025.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 3026 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3027 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 3028 GET_INST_OPCODE(r10) @ extract opcode from rINST 3029 .if 0 3030 add r2, r2, #offStaticField_value @ r2<- pointer to data 3031 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 3032 .else 3033 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3034 .endif 3035 GOTO_OPCODE(r10) @ jump to next instruction 3036 3037/* ------------------------------ */ 3038 .balign 64 3039.L_OP_SPUT_OBJECT: /* 0x69 */ 3040/* File: armv5te/OP_SPUT_OBJECT.S */ 3041 /* 3042 * 32-bit SPUT handler for objects 3043 * 3044 * for: sput-object, sput-object-volatile 3045 */ 3046 /* op vAA, field@BBBB */ 3047 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3048 FETCH(r1, 1) @ r1<- field ref BBBB 3049 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3050 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3051 cmp r0, #0 @ is resolved entry null? 3052 bne .LOP_SPUT_OBJECT_finish @ no, continue 3053 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 3054 EXPORT_PC() @ resolve() could throw, so export now 3055 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 3056 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 3057 cmp r0, #0 @ success? 3058 bne .LOP_SPUT_OBJECT_finish @ yes, finish 3059 b common_exceptionThrown @ no, handle exception 3060 3061 3062/* ------------------------------ */ 3063 .balign 64 3064.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3065/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3066/* File: armv5te/OP_SPUT.S */ 3067 /* 3068 * General 32-bit SPUT handler. 3069 * 3070 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3071 */ 3072 /* op vAA, field@BBBB */ 3073 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3074 FETCH(r1, 1) @ r1<- field ref BBBB 3075 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3076 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3077 cmp r0, #0 @ is resolved entry null? 3078 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3079.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3080 mov r2, rINST, lsr #8 @ r2<- AA 3081 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3082 GET_VREG(r1, r2) @ r1<- fp[AA] 3083 GET_INST_OPCODE(ip) @ extract opcode from rINST 3084 @ no-op @ releasing store 3085 str r1, [r0, #offStaticField_value] @ field<- vAA 3086 GOTO_OPCODE(ip) @ jump to next instruction 3087 3088 3089/* ------------------------------ */ 3090 .balign 64 3091.L_OP_SPUT_BYTE: /* 0x6b */ 3092/* File: armv5te/OP_SPUT_BYTE.S */ 3093/* File: armv5te/OP_SPUT.S */ 3094 /* 3095 * General 32-bit SPUT handler. 3096 * 3097 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3098 */ 3099 /* op vAA, field@BBBB */ 3100 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3101 FETCH(r1, 1) @ r1<- field ref BBBB 3102 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3103 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3104 cmp r0, #0 @ is resolved entry null? 3105 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3106.LOP_SPUT_BYTE_finish: @ field ptr in r0 3107 mov r2, rINST, lsr #8 @ r2<- AA 3108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3109 GET_VREG(r1, r2) @ r1<- fp[AA] 3110 GET_INST_OPCODE(ip) @ extract opcode from rINST 3111 @ no-op @ releasing store 3112 str r1, [r0, #offStaticField_value] @ field<- vAA 3113 GOTO_OPCODE(ip) @ jump to next instruction 3114 3115 3116/* ------------------------------ */ 3117 .balign 64 3118.L_OP_SPUT_CHAR: /* 0x6c */ 3119/* File: armv5te/OP_SPUT_CHAR.S */ 3120/* File: armv5te/OP_SPUT.S */ 3121 /* 3122 * General 32-bit SPUT handler. 3123 * 3124 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3125 */ 3126 /* op vAA, field@BBBB */ 3127 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3128 FETCH(r1, 1) @ r1<- field ref BBBB 3129 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3130 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3131 cmp r0, #0 @ is resolved entry null? 3132 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3133.LOP_SPUT_CHAR_finish: @ field ptr in r0 3134 mov r2, rINST, lsr #8 @ r2<- AA 3135 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3136 GET_VREG(r1, r2) @ r1<- fp[AA] 3137 GET_INST_OPCODE(ip) @ extract opcode from rINST 3138 @ no-op @ releasing store 3139 str r1, [r0, #offStaticField_value] @ field<- vAA 3140 GOTO_OPCODE(ip) @ jump to next instruction 3141 3142 3143/* ------------------------------ */ 3144 .balign 64 3145.L_OP_SPUT_SHORT: /* 0x6d */ 3146/* File: armv5te/OP_SPUT_SHORT.S */ 3147/* File: armv5te/OP_SPUT.S */ 3148 /* 3149 * General 32-bit SPUT handler. 3150 * 3151 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3152 */ 3153 /* op vAA, field@BBBB */ 3154 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3155 FETCH(r1, 1) @ r1<- field ref BBBB 3156 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3157 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3158 cmp r0, #0 @ is resolved entry null? 3159 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3160.LOP_SPUT_SHORT_finish: @ field ptr in r0 3161 mov r2, rINST, lsr #8 @ r2<- AA 3162 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3163 GET_VREG(r1, r2) @ r1<- fp[AA] 3164 GET_INST_OPCODE(ip) @ extract opcode from rINST 3165 @ no-op @ releasing store 3166 str r1, [r0, #offStaticField_value] @ field<- vAA 3167 GOTO_OPCODE(ip) @ jump to next instruction 3168 3169 3170/* ------------------------------ */ 3171 .balign 64 3172.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3173/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3174 /* 3175 * Handle a virtual method call. 3176 * 3177 * for: invoke-virtual, invoke-virtual/range 3178 */ 3179 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3180 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3181 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3182 FETCH(r1, 1) @ r1<- BBBB 3183 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3184 FETCH(r10, 2) @ r10<- GFED or CCCC 3185 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3186 .if (!0) 3187 and r10, r10, #15 @ r10<- D (or stays CCCC) 3188 .endif 3189 cmp r0, #0 @ already resolved? 3190 EXPORT_PC() @ must export for invoke 3191 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3192 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3193 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3194 mov r2, #METHOD_VIRTUAL @ resolver method type 3195 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3196 cmp r0, #0 @ got null? 3197 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3198 b common_exceptionThrown @ yes, handle exception 3199 3200/* ------------------------------ */ 3201 .balign 64 3202.L_OP_INVOKE_SUPER: /* 0x6f */ 3203/* File: armv5te/OP_INVOKE_SUPER.S */ 3204 /* 3205 * Handle a "super" method call. 3206 * 3207 * for: invoke-super, invoke-super/range 3208 */ 3209 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3210 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3211 FETCH(r10, 2) @ r10<- GFED or CCCC 3212 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3213 .if (!0) 3214 and r10, r10, #15 @ r10<- D (or stays CCCC) 3215 .endif 3216 FETCH(r1, 1) @ r1<- BBBB 3217 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3218 GET_VREG(r2, r10) @ r2<- "this" ptr 3219 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3220 cmp r2, #0 @ null "this"? 3221 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3222 beq common_errNullObject @ null "this", throw exception 3223 cmp r0, #0 @ already resolved? 3224 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3225 EXPORT_PC() @ must export for invoke 3226 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3227 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3228 3229/* ------------------------------ */ 3230 .balign 64 3231.L_OP_INVOKE_DIRECT: /* 0x70 */ 3232/* File: armv5te/OP_INVOKE_DIRECT.S */ 3233 /* 3234 * Handle a direct method call. 3235 * 3236 * (We could defer the "is 'this' pointer null" test to the common 3237 * method invocation code, and use a flag to indicate that static 3238 * calls don't count. If we do this as part of copying the arguments 3239 * out we could avoiding loading the first arg twice.) 3240 * 3241 * for: invoke-direct, invoke-direct/range 3242 */ 3243 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3244 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3245 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3246 FETCH(r1, 1) @ r1<- BBBB 3247 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3248 FETCH(r10, 2) @ r10<- GFED or CCCC 3249 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3250 .if (!0) 3251 and r10, r10, #15 @ r10<- D (or stays CCCC) 3252 .endif 3253 cmp r0, #0 @ already resolved? 3254 EXPORT_PC() @ must export for invoke 3255 GET_VREG(r2, r10) @ r2<- "this" ptr 3256 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3257.LOP_INVOKE_DIRECT_finish: 3258 cmp r2, #0 @ null "this" ref? 3259 bne common_invokeMethodNoRange @ no, continue on 3260 b common_errNullObject @ yes, throw exception 3261 3262/* ------------------------------ */ 3263 .balign 64 3264.L_OP_INVOKE_STATIC: /* 0x71 */ 3265/* File: armv5te/OP_INVOKE_STATIC.S */ 3266 /* 3267 * Handle a static method call. 3268 * 3269 * for: invoke-static, invoke-static/range 3270 */ 3271 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3272 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3273 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3274 FETCH(r1, 1) @ r1<- BBBB 3275 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3276 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3277 cmp r0, #0 @ already resolved? 3278 EXPORT_PC() @ must export for invoke 3279 bne common_invokeMethodNoRange @ yes, continue on 32800: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3281 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3282 mov r2, #METHOD_STATIC @ resolver method type 3283 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3284 cmp r0, #0 @ got null? 3285 bne common_invokeMethodNoRange @ no, continue 3286 b common_exceptionThrown @ yes, handle exception 3287 3288/* ------------------------------ */ 3289 .balign 64 3290.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3291/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3292 /* 3293 * Handle an interface method call. 3294 * 3295 * for: invoke-interface, invoke-interface/range 3296 */ 3297 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3298 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3299 FETCH(r2, 2) @ r2<- FEDC or CCCC 3300 FETCH(r1, 1) @ r1<- BBBB 3301 .if (!0) 3302 and r2, r2, #15 @ r2<- C (or stays CCCC) 3303 .endif 3304 EXPORT_PC() @ must export for invoke 3305 GET_VREG(r0, r2) @ r0<- first arg ("this") 3306 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3307 cmp r0, #0 @ null obj? 3308 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3309 beq common_errNullObject @ yes, fail 3310 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3311 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3312 cmp r0, #0 @ failed? 3313 beq common_exceptionThrown @ yes, handle exception 3314 b common_invokeMethodNoRange @ jump to common handler 3315 3316/* ------------------------------ */ 3317 .balign 64 3318.L_OP_UNUSED_73: /* 0x73 */ 3319/* File: armv5te/OP_UNUSED_73.S */ 3320/* File: armv5te/unused.S */ 3321 bl common_abort 3322 3323 3324/* ------------------------------ */ 3325 .balign 64 3326.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3327/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3328/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3329 /* 3330 * Handle a virtual method call. 3331 * 3332 * for: invoke-virtual, invoke-virtual/range 3333 */ 3334 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3335 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3336 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3337 FETCH(r1, 1) @ r1<- BBBB 3338 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3339 FETCH(r10, 2) @ r10<- GFED or CCCC 3340 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3341 .if (!1) 3342 and r10, r10, #15 @ r10<- D (or stays CCCC) 3343 .endif 3344 cmp r0, #0 @ already resolved? 3345 EXPORT_PC() @ must export for invoke 3346 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3347 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3348 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3349 mov r2, #METHOD_VIRTUAL @ resolver method type 3350 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3351 cmp r0, #0 @ got null? 3352 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3353 b common_exceptionThrown @ yes, handle exception 3354 3355 3356/* ------------------------------ */ 3357 .balign 64 3358.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3359/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3360/* File: armv5te/OP_INVOKE_SUPER.S */ 3361 /* 3362 * Handle a "super" method call. 3363 * 3364 * for: invoke-super, invoke-super/range 3365 */ 3366 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3367 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3368 FETCH(r10, 2) @ r10<- GFED or CCCC 3369 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3370 .if (!1) 3371 and r10, r10, #15 @ r10<- D (or stays CCCC) 3372 .endif 3373 FETCH(r1, 1) @ r1<- BBBB 3374 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3375 GET_VREG(r2, r10) @ r2<- "this" ptr 3376 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3377 cmp r2, #0 @ null "this"? 3378 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3379 beq common_errNullObject @ null "this", throw exception 3380 cmp r0, #0 @ already resolved? 3381 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3382 EXPORT_PC() @ must export for invoke 3383 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3384 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3385 3386 3387/* ------------------------------ */ 3388 .balign 64 3389.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3390/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3391/* File: armv5te/OP_INVOKE_DIRECT.S */ 3392 /* 3393 * Handle a direct method call. 3394 * 3395 * (We could defer the "is 'this' pointer null" test to the common 3396 * method invocation code, and use a flag to indicate that static 3397 * calls don't count. If we do this as part of copying the arguments 3398 * out we could avoiding loading the first arg twice.) 3399 * 3400 * for: invoke-direct, invoke-direct/range 3401 */ 3402 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3403 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3404 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3405 FETCH(r1, 1) @ r1<- BBBB 3406 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3407 FETCH(r10, 2) @ r10<- GFED or CCCC 3408 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3409 .if (!1) 3410 and r10, r10, #15 @ r10<- D (or stays CCCC) 3411 .endif 3412 cmp r0, #0 @ already resolved? 3413 EXPORT_PC() @ must export for invoke 3414 GET_VREG(r2, r10) @ r2<- "this" ptr 3415 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3416.LOP_INVOKE_DIRECT_RANGE_finish: 3417 cmp r2, #0 @ null "this" ref? 3418 bne common_invokeMethodRange @ no, continue on 3419 b common_errNullObject @ yes, throw exception 3420 3421 3422/* ------------------------------ */ 3423 .balign 64 3424.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3425/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3426/* File: armv5te/OP_INVOKE_STATIC.S */ 3427 /* 3428 * Handle a static method call. 3429 * 3430 * for: invoke-static, invoke-static/range 3431 */ 3432 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3433 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3434 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3435 FETCH(r1, 1) @ r1<- BBBB 3436 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3437 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3438 cmp r0, #0 @ already resolved? 3439 EXPORT_PC() @ must export for invoke 3440 bne common_invokeMethodRange @ yes, continue on 34410: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3442 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3443 mov r2, #METHOD_STATIC @ resolver method type 3444 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3445 cmp r0, #0 @ got null? 3446 bne common_invokeMethodRange @ no, continue 3447 b common_exceptionThrown @ yes, handle exception 3448 3449 3450/* ------------------------------ */ 3451 .balign 64 3452.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3453/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3454/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3455 /* 3456 * Handle an interface method call. 3457 * 3458 * for: invoke-interface, invoke-interface/range 3459 */ 3460 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3461 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3462 FETCH(r2, 2) @ r2<- FEDC or CCCC 3463 FETCH(r1, 1) @ r1<- BBBB 3464 .if (!1) 3465 and r2, r2, #15 @ r2<- C (or stays CCCC) 3466 .endif 3467 EXPORT_PC() @ must export for invoke 3468 GET_VREG(r0, r2) @ r0<- first arg ("this") 3469 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3470 cmp r0, #0 @ null obj? 3471 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3472 beq common_errNullObject @ yes, fail 3473 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3474 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3475 cmp r0, #0 @ failed? 3476 beq common_exceptionThrown @ yes, handle exception 3477 b common_invokeMethodRange @ jump to common handler 3478 3479 3480/* ------------------------------ */ 3481 .balign 64 3482.L_OP_UNUSED_79: /* 0x79 */ 3483/* File: armv5te/OP_UNUSED_79.S */ 3484/* File: armv5te/unused.S */ 3485 bl common_abort 3486 3487 3488/* ------------------------------ */ 3489 .balign 64 3490.L_OP_UNUSED_7A: /* 0x7a */ 3491/* File: armv5te/OP_UNUSED_7A.S */ 3492/* File: armv5te/unused.S */ 3493 bl common_abort 3494 3495 3496/* ------------------------------ */ 3497 .balign 64 3498.L_OP_NEG_INT: /* 0x7b */ 3499/* File: armv5te/OP_NEG_INT.S */ 3500/* File: armv5te/unop.S */ 3501 /* 3502 * Generic 32-bit unary operation. Provide an "instr" line that 3503 * specifies an instruction that performs "result = op r0". 3504 * This could be an ARM instruction or a function call. 3505 * 3506 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3507 * int-to-byte, int-to-char, int-to-short 3508 */ 3509 /* unop vA, vB */ 3510 mov r3, rINST, lsr #12 @ r3<- B 3511 mov r9, rINST, lsr #8 @ r9<- A+ 3512 GET_VREG(r0, r3) @ r0<- vB 3513 and r9, r9, #15 3514 @ optional op; may set condition codes 3515 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3516 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3517 GET_INST_OPCODE(ip) @ extract opcode from rINST 3518 SET_VREG(r0, r9) @ vAA<- r0 3519 GOTO_OPCODE(ip) @ jump to next instruction 3520 /* 9-10 instructions */ 3521 3522 3523/* ------------------------------ */ 3524 .balign 64 3525.L_OP_NOT_INT: /* 0x7c */ 3526/* File: armv5te/OP_NOT_INT.S */ 3527/* File: armv5te/unop.S */ 3528 /* 3529 * Generic 32-bit unary operation. Provide an "instr" line that 3530 * specifies an instruction that performs "result = op r0". 3531 * This could be an ARM instruction or a function call. 3532 * 3533 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3534 * int-to-byte, int-to-char, int-to-short 3535 */ 3536 /* unop vA, vB */ 3537 mov r3, rINST, lsr #12 @ r3<- B 3538 mov r9, rINST, lsr #8 @ r9<- A+ 3539 GET_VREG(r0, r3) @ r0<- vB 3540 and r9, r9, #15 3541 @ optional op; may set condition codes 3542 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3543 mvn r0, r0 @ r0<- op, r0-r3 changed 3544 GET_INST_OPCODE(ip) @ extract opcode from rINST 3545 SET_VREG(r0, r9) @ vAA<- r0 3546 GOTO_OPCODE(ip) @ jump to next instruction 3547 /* 9-10 instructions */ 3548 3549 3550/* ------------------------------ */ 3551 .balign 64 3552.L_OP_NEG_LONG: /* 0x7d */ 3553/* File: armv5te/OP_NEG_LONG.S */ 3554/* File: armv5te/unopWide.S */ 3555 /* 3556 * Generic 64-bit unary operation. Provide an "instr" line that 3557 * specifies an instruction that performs "result = op r0/r1". 3558 * This could be an ARM instruction or a function call. 3559 * 3560 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3561 */ 3562 /* unop vA, vB */ 3563 mov r9, rINST, lsr #8 @ r9<- A+ 3564 mov r3, rINST, lsr #12 @ r3<- B 3565 and r9, r9, #15 3566 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3567 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3568 ldmia r3, {r0-r1} @ r0/r1<- vAA 3569 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3570 rsbs r0, r0, #0 @ optional op; may set condition codes 3571 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3572 GET_INST_OPCODE(ip) @ extract opcode from rINST 3573 stmia r9, {r0-r1} @ vAA<- r0/r1 3574 GOTO_OPCODE(ip) @ jump to next instruction 3575 /* 12-13 instructions */ 3576 3577 3578/* ------------------------------ */ 3579 .balign 64 3580.L_OP_NOT_LONG: /* 0x7e */ 3581/* File: armv5te/OP_NOT_LONG.S */ 3582/* File: armv5te/unopWide.S */ 3583 /* 3584 * Generic 64-bit unary operation. Provide an "instr" line that 3585 * specifies an instruction that performs "result = op r0/r1". 3586 * This could be an ARM instruction or a function call. 3587 * 3588 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3589 */ 3590 /* unop vA, vB */ 3591 mov r9, rINST, lsr #8 @ r9<- A+ 3592 mov r3, rINST, lsr #12 @ r3<- B 3593 and r9, r9, #15 3594 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3595 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3596 ldmia r3, {r0-r1} @ r0/r1<- vAA 3597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3598 mvn r0, r0 @ optional op; may set condition codes 3599 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3600 GET_INST_OPCODE(ip) @ extract opcode from rINST 3601 stmia r9, {r0-r1} @ vAA<- r0/r1 3602 GOTO_OPCODE(ip) @ jump to next instruction 3603 /* 12-13 instructions */ 3604 3605 3606/* ------------------------------ */ 3607 .balign 64 3608.L_OP_NEG_FLOAT: /* 0x7f */ 3609/* File: armv5te/OP_NEG_FLOAT.S */ 3610/* File: armv5te/unop.S */ 3611 /* 3612 * Generic 32-bit unary operation. Provide an "instr" line that 3613 * specifies an instruction that performs "result = op r0". 3614 * This could be an ARM instruction or a function call. 3615 * 3616 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3617 * int-to-byte, int-to-char, int-to-short 3618 */ 3619 /* unop vA, vB */ 3620 mov r3, rINST, lsr #12 @ r3<- B 3621 mov r9, rINST, lsr #8 @ r9<- A+ 3622 GET_VREG(r0, r3) @ r0<- vB 3623 and r9, r9, #15 3624 @ optional op; may set condition codes 3625 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3626 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3627 GET_INST_OPCODE(ip) @ extract opcode from rINST 3628 SET_VREG(r0, r9) @ vAA<- r0 3629 GOTO_OPCODE(ip) @ jump to next instruction 3630 /* 9-10 instructions */ 3631 3632 3633/* ------------------------------ */ 3634 .balign 64 3635.L_OP_NEG_DOUBLE: /* 0x80 */ 3636/* File: armv5te/OP_NEG_DOUBLE.S */ 3637/* File: armv5te/unopWide.S */ 3638 /* 3639 * Generic 64-bit unary operation. Provide an "instr" line that 3640 * specifies an instruction that performs "result = op r0/r1". 3641 * This could be an ARM instruction or a function call. 3642 * 3643 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3644 */ 3645 /* unop vA, vB */ 3646 mov r9, rINST, lsr #8 @ r9<- A+ 3647 mov r3, rINST, lsr #12 @ r3<- B 3648 and r9, r9, #15 3649 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3650 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3651 ldmia r3, {r0-r1} @ r0/r1<- vAA 3652 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3653 @ optional op; may set condition codes 3654 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3655 GET_INST_OPCODE(ip) @ extract opcode from rINST 3656 stmia r9, {r0-r1} @ vAA<- r0/r1 3657 GOTO_OPCODE(ip) @ jump to next instruction 3658 /* 12-13 instructions */ 3659 3660 3661/* ------------------------------ */ 3662 .balign 64 3663.L_OP_INT_TO_LONG: /* 0x81 */ 3664/* File: armv5te/OP_INT_TO_LONG.S */ 3665/* File: armv5te/unopWider.S */ 3666 /* 3667 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3668 * that specifies an instruction that performs "result = op r0", where 3669 * "result" is a 64-bit quantity in r0/r1. 3670 * 3671 * For: int-to-long, int-to-double, float-to-long, float-to-double 3672 */ 3673 /* unop vA, vB */ 3674 mov r9, rINST, lsr #8 @ r9<- A+ 3675 mov r3, rINST, lsr #12 @ r3<- B 3676 and r9, r9, #15 3677 GET_VREG(r0, r3) @ r0<- vB 3678 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3679 @ optional op; may set condition codes 3680 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3681 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3682 GET_INST_OPCODE(ip) @ extract opcode from rINST 3683 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3684 GOTO_OPCODE(ip) @ jump to next instruction 3685 /* 10-11 instructions */ 3686 3687 3688/* ------------------------------ */ 3689 .balign 64 3690.L_OP_INT_TO_FLOAT: /* 0x82 */ 3691/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3692/* File: arm-vfp/funop.S */ 3693 /* 3694 * Generic 32-bit unary floating-point operation. Provide an "instr" 3695 * line that specifies an instruction that performs "s1 = op s0". 3696 * 3697 * for: int-to-float, float-to-int 3698 */ 3699 /* unop vA, vB */ 3700 mov r3, rINST, lsr #12 @ r3<- B 3701 mov r9, rINST, lsr #8 @ r9<- A+ 3702 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3703 flds s0, [r3] @ s0<- vB 3704 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3705 and r9, r9, #15 @ r9<- A 3706 fsitos s1, s0 @ s1<- op 3707 GET_INST_OPCODE(ip) @ extract opcode from rINST 3708 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3709 fsts s1, [r9] @ vA<- s1 3710 GOTO_OPCODE(ip) @ jump to next instruction 3711 3712 3713/* ------------------------------ */ 3714 .balign 64 3715.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3716/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3717/* File: arm-vfp/funopWider.S */ 3718 /* 3719 * Generic 32bit-to-64bit floating point unary operation. Provide an 3720 * "instr" line that specifies an instruction that performs "d0 = op s0". 3721 * 3722 * For: int-to-double, float-to-double 3723 */ 3724 /* unop vA, vB */ 3725 mov r3, rINST, lsr #12 @ r3<- B 3726 mov r9, rINST, lsr #8 @ r9<- A+ 3727 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3728 flds s0, [r3] @ s0<- vB 3729 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3730 and r9, r9, #15 @ r9<- A 3731 fsitod d0, s0 @ d0<- op 3732 GET_INST_OPCODE(ip) @ extract opcode from rINST 3733 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3734 fstd d0, [r9] @ vA<- d0 3735 GOTO_OPCODE(ip) @ jump to next instruction 3736 3737 3738/* ------------------------------ */ 3739 .balign 64 3740.L_OP_LONG_TO_INT: /* 0x84 */ 3741/* File: armv5te/OP_LONG_TO_INT.S */ 3742/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3743/* File: armv5te/OP_MOVE.S */ 3744 /* for move, move-object, long-to-int */ 3745 /* op vA, vB */ 3746 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3747 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3748 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3749 GET_VREG(r2, r1) @ r2<- fp[B] 3750 and r0, r0, #15 3751 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3752 SET_VREG(r2, r0) @ fp[A]<- r2 3753 GOTO_OPCODE(ip) @ execute next instruction 3754 3755 3756/* ------------------------------ */ 3757 .balign 64 3758.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3759/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3760/* File: armv5te/unopNarrower.S */ 3761 /* 3762 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3763 * that specifies an instruction that performs "result = op r0/r1", where 3764 * "result" is a 32-bit quantity in r0. 3765 * 3766 * For: long-to-float, double-to-int, double-to-float 3767 * 3768 * (This would work for long-to-int, but that instruction is actually 3769 * an exact match for OP_MOVE.) 3770 */ 3771 /* unop vA, vB */ 3772 mov r3, rINST, lsr #12 @ r3<- B 3773 mov r9, rINST, lsr #8 @ r9<- A+ 3774 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3775 and r9, r9, #15 3776 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3777 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3778 @ optional op; may set condition codes 3779 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3780 GET_INST_OPCODE(ip) @ extract opcode from rINST 3781 SET_VREG(r0, r9) @ vA<- r0 3782 GOTO_OPCODE(ip) @ jump to next instruction 3783 /* 10-11 instructions */ 3784 3785 3786/* ------------------------------ */ 3787 .balign 64 3788.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3789/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3790/* File: armv5te/unopWide.S */ 3791 /* 3792 * Generic 64-bit unary operation. Provide an "instr" line that 3793 * specifies an instruction that performs "result = op r0/r1". 3794 * This could be an ARM instruction or a function call. 3795 * 3796 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3797 */ 3798 /* unop vA, vB */ 3799 mov r9, rINST, lsr #8 @ r9<- A+ 3800 mov r3, rINST, lsr #12 @ r3<- B 3801 and r9, r9, #15 3802 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3803 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3804 ldmia r3, {r0-r1} @ r0/r1<- vAA 3805 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3806 @ optional op; may set condition codes 3807 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3808 GET_INST_OPCODE(ip) @ extract opcode from rINST 3809 stmia r9, {r0-r1} @ vAA<- r0/r1 3810 GOTO_OPCODE(ip) @ jump to next instruction 3811 /* 12-13 instructions */ 3812 3813 3814/* ------------------------------ */ 3815 .balign 64 3816.L_OP_FLOAT_TO_INT: /* 0x87 */ 3817/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3818/* File: arm-vfp/funop.S */ 3819 /* 3820 * Generic 32-bit unary floating-point operation. Provide an "instr" 3821 * line that specifies an instruction that performs "s1 = op s0". 3822 * 3823 * for: int-to-float, float-to-int 3824 */ 3825 /* unop vA, vB */ 3826 mov r3, rINST, lsr #12 @ r3<- B 3827 mov r9, rINST, lsr #8 @ r9<- A+ 3828 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3829 flds s0, [r3] @ s0<- vB 3830 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3831 and r9, r9, #15 @ r9<- A 3832 ftosizs s1, s0 @ s1<- op 3833 GET_INST_OPCODE(ip) @ extract opcode from rINST 3834 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3835 fsts s1, [r9] @ vA<- s1 3836 GOTO_OPCODE(ip) @ jump to next instruction 3837 3838 3839/* ------------------------------ */ 3840 .balign 64 3841.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3842/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3843@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3844/* File: armv5te/unopWider.S */ 3845 /* 3846 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3847 * that specifies an instruction that performs "result = op r0", where 3848 * "result" is a 64-bit quantity in r0/r1. 3849 * 3850 * For: int-to-long, int-to-double, float-to-long, float-to-double 3851 */ 3852 /* unop vA, vB */ 3853 mov r9, rINST, lsr #8 @ r9<- A+ 3854 mov r3, rINST, lsr #12 @ r3<- B 3855 and r9, r9, #15 3856 GET_VREG(r0, r3) @ r0<- vB 3857 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3858 @ optional op; may set condition codes 3859 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3860 bl f2l_doconv @ r0<- op, r0-r3 changed 3861 GET_INST_OPCODE(ip) @ extract opcode from rINST 3862 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3863 GOTO_OPCODE(ip) @ jump to next instruction 3864 /* 10-11 instructions */ 3865 3866 3867 3868/* ------------------------------ */ 3869 .balign 64 3870.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3871/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3872/* File: arm-vfp/funopWider.S */ 3873 /* 3874 * Generic 32bit-to-64bit floating point unary operation. Provide an 3875 * "instr" line that specifies an instruction that performs "d0 = op s0". 3876 * 3877 * For: int-to-double, float-to-double 3878 */ 3879 /* unop vA, vB */ 3880 mov r3, rINST, lsr #12 @ r3<- B 3881 mov r9, rINST, lsr #8 @ r9<- A+ 3882 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3883 flds s0, [r3] @ s0<- vB 3884 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3885 and r9, r9, #15 @ r9<- A 3886 fcvtds d0, s0 @ d0<- op 3887 GET_INST_OPCODE(ip) @ extract opcode from rINST 3888 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3889 fstd d0, [r9] @ vA<- d0 3890 GOTO_OPCODE(ip) @ jump to next instruction 3891 3892 3893/* ------------------------------ */ 3894 .balign 64 3895.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3896/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3897/* File: arm-vfp/funopNarrower.S */ 3898 /* 3899 * Generic 64bit-to-32bit unary floating point operation. Provide an 3900 * "instr" line that specifies an instruction that performs "s0 = op d0". 3901 * 3902 * For: double-to-int, double-to-float 3903 */ 3904 /* unop vA, vB */ 3905 mov r3, rINST, lsr #12 @ r3<- B 3906 mov r9, rINST, lsr #8 @ r9<- A+ 3907 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3908 fldd d0, [r3] @ d0<- vB 3909 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3910 and r9, r9, #15 @ r9<- A 3911 ftosizd s0, d0 @ s0<- op 3912 GET_INST_OPCODE(ip) @ extract opcode from rINST 3913 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3914 fsts s0, [r9] @ vA<- s0 3915 GOTO_OPCODE(ip) @ jump to next instruction 3916 3917 3918/* ------------------------------ */ 3919 .balign 64 3920.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3921/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 3922@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3923/* File: armv5te/unopWide.S */ 3924 /* 3925 * Generic 64-bit unary operation. Provide an "instr" line that 3926 * specifies an instruction that performs "result = op r0/r1". 3927 * This could be an ARM instruction or a function call. 3928 * 3929 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3930 */ 3931 /* unop vA, vB */ 3932 mov r9, rINST, lsr #8 @ r9<- A+ 3933 mov r3, rINST, lsr #12 @ r3<- B 3934 and r9, r9, #15 3935 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3936 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3937 ldmia r3, {r0-r1} @ r0/r1<- vAA 3938 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3939 @ optional op; may set condition codes 3940 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3941 GET_INST_OPCODE(ip) @ extract opcode from rINST 3942 stmia r9, {r0-r1} @ vAA<- r0/r1 3943 GOTO_OPCODE(ip) @ jump to next instruction 3944 /* 12-13 instructions */ 3945 3946 3947 3948/* ------------------------------ */ 3949 .balign 64 3950.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3951/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3952/* File: arm-vfp/funopNarrower.S */ 3953 /* 3954 * Generic 64bit-to-32bit unary floating point operation. Provide an 3955 * "instr" line that specifies an instruction that performs "s0 = op d0". 3956 * 3957 * For: double-to-int, double-to-float 3958 */ 3959 /* unop vA, vB */ 3960 mov r3, rINST, lsr #12 @ r3<- B 3961 mov r9, rINST, lsr #8 @ r9<- A+ 3962 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3963 fldd d0, [r3] @ d0<- vB 3964 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3965 and r9, r9, #15 @ r9<- A 3966 fcvtsd s0, d0 @ s0<- op 3967 GET_INST_OPCODE(ip) @ extract opcode from rINST 3968 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3969 fsts s0, [r9] @ vA<- s0 3970 GOTO_OPCODE(ip) @ jump to next instruction 3971 3972 3973/* ------------------------------ */ 3974 .balign 64 3975.L_OP_INT_TO_BYTE: /* 0x8d */ 3976/* File: armv5te/OP_INT_TO_BYTE.S */ 3977/* File: armv5te/unop.S */ 3978 /* 3979 * Generic 32-bit unary operation. Provide an "instr" line that 3980 * specifies an instruction that performs "result = op r0". 3981 * This could be an ARM instruction or a function call. 3982 * 3983 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3984 * int-to-byte, int-to-char, int-to-short 3985 */ 3986 /* unop vA, vB */ 3987 mov r3, rINST, lsr #12 @ r3<- B 3988 mov r9, rINST, lsr #8 @ r9<- A+ 3989 GET_VREG(r0, r3) @ r0<- vB 3990 and r9, r9, #15 3991 mov r0, r0, asl #24 @ optional op; may set condition codes 3992 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3993 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 3994 GET_INST_OPCODE(ip) @ extract opcode from rINST 3995 SET_VREG(r0, r9) @ vAA<- r0 3996 GOTO_OPCODE(ip) @ jump to next instruction 3997 /* 9-10 instructions */ 3998 3999 4000/* ------------------------------ */ 4001 .balign 64 4002.L_OP_INT_TO_CHAR: /* 0x8e */ 4003/* File: armv5te/OP_INT_TO_CHAR.S */ 4004/* File: armv5te/unop.S */ 4005 /* 4006 * Generic 32-bit unary operation. Provide an "instr" line that 4007 * specifies an instruction that performs "result = op r0". 4008 * This could be an ARM instruction or a function call. 4009 * 4010 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4011 * int-to-byte, int-to-char, int-to-short 4012 */ 4013 /* unop vA, vB */ 4014 mov r3, rINST, lsr #12 @ r3<- B 4015 mov r9, rINST, lsr #8 @ r9<- A+ 4016 GET_VREG(r0, r3) @ r0<- vB 4017 and r9, r9, #15 4018 mov r0, r0, asl #16 @ optional op; may set condition codes 4019 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4020 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4021 GET_INST_OPCODE(ip) @ extract opcode from rINST 4022 SET_VREG(r0, r9) @ vAA<- r0 4023 GOTO_OPCODE(ip) @ jump to next instruction 4024 /* 9-10 instructions */ 4025 4026 4027/* ------------------------------ */ 4028 .balign 64 4029.L_OP_INT_TO_SHORT: /* 0x8f */ 4030/* File: armv5te/OP_INT_TO_SHORT.S */ 4031/* File: armv5te/unop.S */ 4032 /* 4033 * Generic 32-bit unary operation. Provide an "instr" line that 4034 * specifies an instruction that performs "result = op r0". 4035 * This could be an ARM instruction or a function call. 4036 * 4037 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4038 * int-to-byte, int-to-char, int-to-short 4039 */ 4040 /* unop vA, vB */ 4041 mov r3, rINST, lsr #12 @ r3<- B 4042 mov r9, rINST, lsr #8 @ r9<- A+ 4043 GET_VREG(r0, r3) @ r0<- vB 4044 and r9, r9, #15 4045 mov r0, r0, asl #16 @ optional op; may set condition codes 4046 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4047 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4048 GET_INST_OPCODE(ip) @ extract opcode from rINST 4049 SET_VREG(r0, r9) @ vAA<- r0 4050 GOTO_OPCODE(ip) @ jump to next instruction 4051 /* 9-10 instructions */ 4052 4053 4054/* ------------------------------ */ 4055 .balign 64 4056.L_OP_ADD_INT: /* 0x90 */ 4057/* File: armv5te/OP_ADD_INT.S */ 4058/* File: armv5te/binop.S */ 4059 /* 4060 * Generic 32-bit binary operation. Provide an "instr" line that 4061 * specifies an instruction that performs "result = r0 op r1". 4062 * This could be an ARM instruction or a function call. (If the result 4063 * comes back in a register other than r0, you can override "result".) 4064 * 4065 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4066 * vCC (r1). Useful for integer division and modulus. Note that we 4067 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4068 * handles it correctly. 4069 * 4070 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4071 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4072 * mul-float, div-float, rem-float 4073 */ 4074 /* binop vAA, vBB, vCC */ 4075 FETCH(r0, 1) @ r0<- CCBB 4076 mov r9, rINST, lsr #8 @ r9<- AA 4077 mov r3, r0, lsr #8 @ r3<- CC 4078 and r2, r0, #255 @ r2<- BB 4079 GET_VREG(r1, r3) @ r1<- vCC 4080 GET_VREG(r0, r2) @ r0<- vBB 4081 .if 0 4082 cmp r1, #0 @ is second operand zero? 4083 beq common_errDivideByZero 4084 .endif 4085 4086 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4087 @ optional op; may set condition codes 4088 add r0, r0, r1 @ r0<- op, r0-r3 changed 4089 GET_INST_OPCODE(ip) @ extract opcode from rINST 4090 SET_VREG(r0, r9) @ vAA<- r0 4091 GOTO_OPCODE(ip) @ jump to next instruction 4092 /* 11-14 instructions */ 4093 4094 4095/* ------------------------------ */ 4096 .balign 64 4097.L_OP_SUB_INT: /* 0x91 */ 4098/* File: armv5te/OP_SUB_INT.S */ 4099/* File: armv5te/binop.S */ 4100 /* 4101 * Generic 32-bit binary operation. Provide an "instr" line that 4102 * specifies an instruction that performs "result = r0 op r1". 4103 * This could be an ARM instruction or a function call. (If the result 4104 * comes back in a register other than r0, you can override "result".) 4105 * 4106 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4107 * vCC (r1). Useful for integer division and modulus. Note that we 4108 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4109 * handles it correctly. 4110 * 4111 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4112 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4113 * mul-float, div-float, rem-float 4114 */ 4115 /* binop vAA, vBB, vCC */ 4116 FETCH(r0, 1) @ r0<- CCBB 4117 mov r9, rINST, lsr #8 @ r9<- AA 4118 mov r3, r0, lsr #8 @ r3<- CC 4119 and r2, r0, #255 @ r2<- BB 4120 GET_VREG(r1, r3) @ r1<- vCC 4121 GET_VREG(r0, r2) @ r0<- vBB 4122 .if 0 4123 cmp r1, #0 @ is second operand zero? 4124 beq common_errDivideByZero 4125 .endif 4126 4127 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4128 @ optional op; may set condition codes 4129 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4130 GET_INST_OPCODE(ip) @ extract opcode from rINST 4131 SET_VREG(r0, r9) @ vAA<- r0 4132 GOTO_OPCODE(ip) @ jump to next instruction 4133 /* 11-14 instructions */ 4134 4135 4136/* ------------------------------ */ 4137 .balign 64 4138.L_OP_MUL_INT: /* 0x92 */ 4139/* File: armv5te/OP_MUL_INT.S */ 4140/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4141/* File: armv5te/binop.S */ 4142 /* 4143 * Generic 32-bit binary operation. Provide an "instr" line that 4144 * specifies an instruction that performs "result = r0 op r1". 4145 * This could be an ARM instruction or a function call. (If the result 4146 * comes back in a register other than r0, you can override "result".) 4147 * 4148 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4149 * vCC (r1). Useful for integer division and modulus. Note that we 4150 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4151 * handles it correctly. 4152 * 4153 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4154 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4155 * mul-float, div-float, rem-float 4156 */ 4157 /* binop vAA, vBB, vCC */ 4158 FETCH(r0, 1) @ r0<- CCBB 4159 mov r9, rINST, lsr #8 @ r9<- AA 4160 mov r3, r0, lsr #8 @ r3<- CC 4161 and r2, r0, #255 @ r2<- BB 4162 GET_VREG(r1, r3) @ r1<- vCC 4163 GET_VREG(r0, r2) @ r0<- vBB 4164 .if 0 4165 cmp r1, #0 @ is second operand zero? 4166 beq common_errDivideByZero 4167 .endif 4168 4169 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4170 @ optional op; may set condition codes 4171 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4172 GET_INST_OPCODE(ip) @ extract opcode from rINST 4173 SET_VREG(r0, r9) @ vAA<- r0 4174 GOTO_OPCODE(ip) @ jump to next instruction 4175 /* 11-14 instructions */ 4176 4177 4178/* ------------------------------ */ 4179 .balign 64 4180.L_OP_DIV_INT: /* 0x93 */ 4181/* File: armv5te/OP_DIV_INT.S */ 4182/* File: armv5te/binop.S */ 4183 /* 4184 * Generic 32-bit binary operation. Provide an "instr" line that 4185 * specifies an instruction that performs "result = r0 op r1". 4186 * This could be an ARM instruction or a function call. (If the result 4187 * comes back in a register other than r0, you can override "result".) 4188 * 4189 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4190 * vCC (r1). Useful for integer division and modulus. Note that we 4191 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4192 * handles it correctly. 4193 * 4194 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4195 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4196 * mul-float, div-float, rem-float 4197 */ 4198 /* binop vAA, vBB, vCC */ 4199 FETCH(r0, 1) @ r0<- CCBB 4200 mov r9, rINST, lsr #8 @ r9<- AA 4201 mov r3, r0, lsr #8 @ r3<- CC 4202 and r2, r0, #255 @ r2<- BB 4203 GET_VREG(r1, r3) @ r1<- vCC 4204 GET_VREG(r0, r2) @ r0<- vBB 4205 .if 1 4206 cmp r1, #0 @ is second operand zero? 4207 beq common_errDivideByZero 4208 .endif 4209 4210 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4211 @ optional op; may set condition codes 4212 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4213 GET_INST_OPCODE(ip) @ extract opcode from rINST 4214 SET_VREG(r0, r9) @ vAA<- r0 4215 GOTO_OPCODE(ip) @ jump to next instruction 4216 /* 11-14 instructions */ 4217 4218 4219/* ------------------------------ */ 4220 .balign 64 4221.L_OP_REM_INT: /* 0x94 */ 4222/* File: armv5te/OP_REM_INT.S */ 4223/* idivmod returns quotient in r0 and remainder in r1 */ 4224/* File: armv5te/binop.S */ 4225 /* 4226 * Generic 32-bit binary operation. Provide an "instr" line that 4227 * specifies an instruction that performs "result = r0 op r1". 4228 * This could be an ARM instruction or a function call. (If the result 4229 * comes back in a register other than r0, you can override "result".) 4230 * 4231 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4232 * vCC (r1). Useful for integer division and modulus. Note that we 4233 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4234 * handles it correctly. 4235 * 4236 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4237 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4238 * mul-float, div-float, rem-float 4239 */ 4240 /* binop vAA, vBB, vCC */ 4241 FETCH(r0, 1) @ r0<- CCBB 4242 mov r9, rINST, lsr #8 @ r9<- AA 4243 mov r3, r0, lsr #8 @ r3<- CC 4244 and r2, r0, #255 @ r2<- BB 4245 GET_VREG(r1, r3) @ r1<- vCC 4246 GET_VREG(r0, r2) @ r0<- vBB 4247 .if 1 4248 cmp r1, #0 @ is second operand zero? 4249 beq common_errDivideByZero 4250 .endif 4251 4252 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4253 @ optional op; may set condition codes 4254 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4255 GET_INST_OPCODE(ip) @ extract opcode from rINST 4256 SET_VREG(r1, r9) @ vAA<- r1 4257 GOTO_OPCODE(ip) @ jump to next instruction 4258 /* 11-14 instructions */ 4259 4260 4261/* ------------------------------ */ 4262 .balign 64 4263.L_OP_AND_INT: /* 0x95 */ 4264/* File: armv5te/OP_AND_INT.S */ 4265/* File: armv5te/binop.S */ 4266 /* 4267 * Generic 32-bit binary operation. Provide an "instr" line that 4268 * specifies an instruction that performs "result = r0 op r1". 4269 * This could be an ARM instruction or a function call. (If the result 4270 * comes back in a register other than r0, you can override "result".) 4271 * 4272 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4273 * vCC (r1). Useful for integer division and modulus. Note that we 4274 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4275 * handles it correctly. 4276 * 4277 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4278 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4279 * mul-float, div-float, rem-float 4280 */ 4281 /* binop vAA, vBB, vCC */ 4282 FETCH(r0, 1) @ r0<- CCBB 4283 mov r9, rINST, lsr #8 @ r9<- AA 4284 mov r3, r0, lsr #8 @ r3<- CC 4285 and r2, r0, #255 @ r2<- BB 4286 GET_VREG(r1, r3) @ r1<- vCC 4287 GET_VREG(r0, r2) @ r0<- vBB 4288 .if 0 4289 cmp r1, #0 @ is second operand zero? 4290 beq common_errDivideByZero 4291 .endif 4292 4293 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4294 @ optional op; may set condition codes 4295 and r0, r0, r1 @ r0<- op, r0-r3 changed 4296 GET_INST_OPCODE(ip) @ extract opcode from rINST 4297 SET_VREG(r0, r9) @ vAA<- r0 4298 GOTO_OPCODE(ip) @ jump to next instruction 4299 /* 11-14 instructions */ 4300 4301 4302/* ------------------------------ */ 4303 .balign 64 4304.L_OP_OR_INT: /* 0x96 */ 4305/* File: armv5te/OP_OR_INT.S */ 4306/* File: armv5te/binop.S */ 4307 /* 4308 * Generic 32-bit binary operation. Provide an "instr" line that 4309 * specifies an instruction that performs "result = r0 op r1". 4310 * This could be an ARM instruction or a function call. (If the result 4311 * comes back in a register other than r0, you can override "result".) 4312 * 4313 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4314 * vCC (r1). Useful for integer division and modulus. Note that we 4315 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4316 * handles it correctly. 4317 * 4318 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4319 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4320 * mul-float, div-float, rem-float 4321 */ 4322 /* binop vAA, vBB, vCC */ 4323 FETCH(r0, 1) @ r0<- CCBB 4324 mov r9, rINST, lsr #8 @ r9<- AA 4325 mov r3, r0, lsr #8 @ r3<- CC 4326 and r2, r0, #255 @ r2<- BB 4327 GET_VREG(r1, r3) @ r1<- vCC 4328 GET_VREG(r0, r2) @ r0<- vBB 4329 .if 0 4330 cmp r1, #0 @ is second operand zero? 4331 beq common_errDivideByZero 4332 .endif 4333 4334 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4335 @ optional op; may set condition codes 4336 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4337 GET_INST_OPCODE(ip) @ extract opcode from rINST 4338 SET_VREG(r0, r9) @ vAA<- r0 4339 GOTO_OPCODE(ip) @ jump to next instruction 4340 /* 11-14 instructions */ 4341 4342 4343/* ------------------------------ */ 4344 .balign 64 4345.L_OP_XOR_INT: /* 0x97 */ 4346/* File: armv5te/OP_XOR_INT.S */ 4347/* File: armv5te/binop.S */ 4348 /* 4349 * Generic 32-bit binary operation. Provide an "instr" line that 4350 * specifies an instruction that performs "result = r0 op r1". 4351 * This could be an ARM instruction or a function call. (If the result 4352 * comes back in a register other than r0, you can override "result".) 4353 * 4354 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4355 * vCC (r1). Useful for integer division and modulus. Note that we 4356 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4357 * handles it correctly. 4358 * 4359 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4360 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4361 * mul-float, div-float, rem-float 4362 */ 4363 /* binop vAA, vBB, vCC */ 4364 FETCH(r0, 1) @ r0<- CCBB 4365 mov r9, rINST, lsr #8 @ r9<- AA 4366 mov r3, r0, lsr #8 @ r3<- CC 4367 and r2, r0, #255 @ r2<- BB 4368 GET_VREG(r1, r3) @ r1<- vCC 4369 GET_VREG(r0, r2) @ r0<- vBB 4370 .if 0 4371 cmp r1, #0 @ is second operand zero? 4372 beq common_errDivideByZero 4373 .endif 4374 4375 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4376 @ optional op; may set condition codes 4377 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4378 GET_INST_OPCODE(ip) @ extract opcode from rINST 4379 SET_VREG(r0, r9) @ vAA<- r0 4380 GOTO_OPCODE(ip) @ jump to next instruction 4381 /* 11-14 instructions */ 4382 4383 4384/* ------------------------------ */ 4385 .balign 64 4386.L_OP_SHL_INT: /* 0x98 */ 4387/* File: armv5te/OP_SHL_INT.S */ 4388/* File: armv5te/binop.S */ 4389 /* 4390 * Generic 32-bit binary operation. Provide an "instr" line that 4391 * specifies an instruction that performs "result = r0 op r1". 4392 * This could be an ARM instruction or a function call. (If the result 4393 * comes back in a register other than r0, you can override "result".) 4394 * 4395 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4396 * vCC (r1). Useful for integer division and modulus. Note that we 4397 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4398 * handles it correctly. 4399 * 4400 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4401 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4402 * mul-float, div-float, rem-float 4403 */ 4404 /* binop vAA, vBB, vCC */ 4405 FETCH(r0, 1) @ r0<- CCBB 4406 mov r9, rINST, lsr #8 @ r9<- AA 4407 mov r3, r0, lsr #8 @ r3<- CC 4408 and r2, r0, #255 @ r2<- BB 4409 GET_VREG(r1, r3) @ r1<- vCC 4410 GET_VREG(r0, r2) @ r0<- vBB 4411 .if 0 4412 cmp r1, #0 @ is second operand zero? 4413 beq common_errDivideByZero 4414 .endif 4415 4416 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4417 and r1, r1, #31 @ optional op; may set condition codes 4418 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4419 GET_INST_OPCODE(ip) @ extract opcode from rINST 4420 SET_VREG(r0, r9) @ vAA<- r0 4421 GOTO_OPCODE(ip) @ jump to next instruction 4422 /* 11-14 instructions */ 4423 4424 4425/* ------------------------------ */ 4426 .balign 64 4427.L_OP_SHR_INT: /* 0x99 */ 4428/* File: armv5te/OP_SHR_INT.S */ 4429/* File: armv5te/binop.S */ 4430 /* 4431 * Generic 32-bit binary operation. Provide an "instr" line that 4432 * specifies an instruction that performs "result = r0 op r1". 4433 * This could be an ARM instruction or a function call. (If the result 4434 * comes back in a register other than r0, you can override "result".) 4435 * 4436 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4437 * vCC (r1). Useful for integer division and modulus. Note that we 4438 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4439 * handles it correctly. 4440 * 4441 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4442 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4443 * mul-float, div-float, rem-float 4444 */ 4445 /* binop vAA, vBB, vCC */ 4446 FETCH(r0, 1) @ r0<- CCBB 4447 mov r9, rINST, lsr #8 @ r9<- AA 4448 mov r3, r0, lsr #8 @ r3<- CC 4449 and r2, r0, #255 @ r2<- BB 4450 GET_VREG(r1, r3) @ r1<- vCC 4451 GET_VREG(r0, r2) @ r0<- vBB 4452 .if 0 4453 cmp r1, #0 @ is second operand zero? 4454 beq common_errDivideByZero 4455 .endif 4456 4457 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4458 and r1, r1, #31 @ optional op; may set condition codes 4459 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4460 GET_INST_OPCODE(ip) @ extract opcode from rINST 4461 SET_VREG(r0, r9) @ vAA<- r0 4462 GOTO_OPCODE(ip) @ jump to next instruction 4463 /* 11-14 instructions */ 4464 4465 4466/* ------------------------------ */ 4467 .balign 64 4468.L_OP_USHR_INT: /* 0x9a */ 4469/* File: armv5te/OP_USHR_INT.S */ 4470/* File: armv5te/binop.S */ 4471 /* 4472 * Generic 32-bit binary operation. Provide an "instr" line that 4473 * specifies an instruction that performs "result = r0 op r1". 4474 * This could be an ARM instruction or a function call. (If the result 4475 * comes back in a register other than r0, you can override "result".) 4476 * 4477 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4478 * vCC (r1). Useful for integer division and modulus. Note that we 4479 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4480 * handles it correctly. 4481 * 4482 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4483 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4484 * mul-float, div-float, rem-float 4485 */ 4486 /* binop vAA, vBB, vCC */ 4487 FETCH(r0, 1) @ r0<- CCBB 4488 mov r9, rINST, lsr #8 @ r9<- AA 4489 mov r3, r0, lsr #8 @ r3<- CC 4490 and r2, r0, #255 @ r2<- BB 4491 GET_VREG(r1, r3) @ r1<- vCC 4492 GET_VREG(r0, r2) @ r0<- vBB 4493 .if 0 4494 cmp r1, #0 @ is second operand zero? 4495 beq common_errDivideByZero 4496 .endif 4497 4498 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4499 and r1, r1, #31 @ optional op; may set condition codes 4500 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4501 GET_INST_OPCODE(ip) @ extract opcode from rINST 4502 SET_VREG(r0, r9) @ vAA<- r0 4503 GOTO_OPCODE(ip) @ jump to next instruction 4504 /* 11-14 instructions */ 4505 4506 4507/* ------------------------------ */ 4508 .balign 64 4509.L_OP_ADD_LONG: /* 0x9b */ 4510/* File: armv5te/OP_ADD_LONG.S */ 4511/* File: armv5te/binopWide.S */ 4512 /* 4513 * Generic 64-bit binary operation. Provide an "instr" line that 4514 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4515 * This could be an ARM instruction or a function call. (If the result 4516 * comes back in a register other than r0, you can override "result".) 4517 * 4518 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4519 * vCC (r1). Useful for integer division and modulus. 4520 * 4521 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4522 * xor-long, add-double, sub-double, mul-double, div-double, 4523 * rem-double 4524 * 4525 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4526 */ 4527 /* binop vAA, vBB, vCC */ 4528 FETCH(r0, 1) @ r0<- CCBB 4529 mov r9, rINST, lsr #8 @ r9<- AA 4530 and r2, r0, #255 @ r2<- BB 4531 mov r3, r0, lsr #8 @ r3<- CC 4532 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4533 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4534 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4535 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4536 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4537 .if 0 4538 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4539 beq common_errDivideByZero 4540 .endif 4541 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4542 4543 adds r0, r0, r2 @ optional op; may set condition codes 4544 adc r1, r1, r3 @ result<- op, r0-r3 changed 4545 GET_INST_OPCODE(ip) @ extract opcode from rINST 4546 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4547 GOTO_OPCODE(ip) @ jump to next instruction 4548 /* 14-17 instructions */ 4549 4550 4551/* ------------------------------ */ 4552 .balign 64 4553.L_OP_SUB_LONG: /* 0x9c */ 4554/* File: armv5te/OP_SUB_LONG.S */ 4555/* File: armv5te/binopWide.S */ 4556 /* 4557 * Generic 64-bit binary operation. Provide an "instr" line that 4558 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4559 * This could be an ARM instruction or a function call. (If the result 4560 * comes back in a register other than r0, you can override "result".) 4561 * 4562 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4563 * vCC (r1). Useful for integer division and modulus. 4564 * 4565 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4566 * xor-long, add-double, sub-double, mul-double, div-double, 4567 * rem-double 4568 * 4569 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4570 */ 4571 /* binop vAA, vBB, vCC */ 4572 FETCH(r0, 1) @ r0<- CCBB 4573 mov r9, rINST, lsr #8 @ r9<- AA 4574 and r2, r0, #255 @ r2<- BB 4575 mov r3, r0, lsr #8 @ r3<- CC 4576 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4577 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4578 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4579 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4580 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4581 .if 0 4582 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4583 beq common_errDivideByZero 4584 .endif 4585 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4586 4587 subs r0, r0, r2 @ optional op; may set condition codes 4588 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4589 GET_INST_OPCODE(ip) @ extract opcode from rINST 4590 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4591 GOTO_OPCODE(ip) @ jump to next instruction 4592 /* 14-17 instructions */ 4593 4594 4595/* ------------------------------ */ 4596 .balign 64 4597.L_OP_MUL_LONG: /* 0x9d */ 4598/* File: armv5te/OP_MUL_LONG.S */ 4599 /* 4600 * Signed 64-bit integer multiply. 4601 * 4602 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4603 * WX 4604 * x YZ 4605 * -------- 4606 * ZW ZX 4607 * YW YX 4608 * 4609 * The low word of the result holds ZX, the high word holds 4610 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4611 * it doesn't fit in the low 64 bits. 4612 * 4613 * Unlike most ARM math operations, multiply instructions have 4614 * restrictions on using the same register more than once (Rd and Rm 4615 * cannot be the same). 4616 */ 4617 /* mul-long vAA, vBB, vCC */ 4618 FETCH(r0, 1) @ r0<- CCBB 4619 and r2, r0, #255 @ r2<- BB 4620 mov r3, r0, lsr #8 @ r3<- CC 4621 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4622 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4623 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4624 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4625 mul ip, r2, r1 @ ip<- ZxW 4626 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4627 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4628 mov r0, rINST, lsr #8 @ r0<- AA 4629 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4630 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4631 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4632 b .LOP_MUL_LONG_finish 4633 4634/* ------------------------------ */ 4635 .balign 64 4636.L_OP_DIV_LONG: /* 0x9e */ 4637/* File: armv5te/OP_DIV_LONG.S */ 4638/* File: armv5te/binopWide.S */ 4639 /* 4640 * Generic 64-bit binary operation. Provide an "instr" line that 4641 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4642 * This could be an ARM instruction or a function call. (If the result 4643 * comes back in a register other than r0, you can override "result".) 4644 * 4645 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4646 * vCC (r1). Useful for integer division and modulus. 4647 * 4648 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4649 * xor-long, add-double, sub-double, mul-double, div-double, 4650 * rem-double 4651 * 4652 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4653 */ 4654 /* binop vAA, vBB, vCC */ 4655 FETCH(r0, 1) @ r0<- CCBB 4656 mov r9, rINST, lsr #8 @ r9<- AA 4657 and r2, r0, #255 @ r2<- BB 4658 mov r3, r0, lsr #8 @ r3<- CC 4659 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4660 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4661 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4662 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4663 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4664 .if 1 4665 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4666 beq common_errDivideByZero 4667 .endif 4668 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4669 4670 @ optional op; may set condition codes 4671 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4672 GET_INST_OPCODE(ip) @ extract opcode from rINST 4673 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4674 GOTO_OPCODE(ip) @ jump to next instruction 4675 /* 14-17 instructions */ 4676 4677 4678/* ------------------------------ */ 4679 .balign 64 4680.L_OP_REM_LONG: /* 0x9f */ 4681/* File: armv5te/OP_REM_LONG.S */ 4682/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4683/* File: armv5te/binopWide.S */ 4684 /* 4685 * Generic 64-bit binary operation. Provide an "instr" line that 4686 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4687 * This could be an ARM instruction or a function call. (If the result 4688 * comes back in a register other than r0, you can override "result".) 4689 * 4690 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4691 * vCC (r1). Useful for integer division and modulus. 4692 * 4693 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4694 * xor-long, add-double, sub-double, mul-double, div-double, 4695 * rem-double 4696 * 4697 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4698 */ 4699 /* binop vAA, vBB, vCC */ 4700 FETCH(r0, 1) @ r0<- CCBB 4701 mov r9, rINST, lsr #8 @ r9<- AA 4702 and r2, r0, #255 @ r2<- BB 4703 mov r3, r0, lsr #8 @ r3<- CC 4704 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4705 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4706 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4707 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4708 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4709 .if 1 4710 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4711 beq common_errDivideByZero 4712 .endif 4713 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4714 4715 @ optional op; may set condition codes 4716 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4717 GET_INST_OPCODE(ip) @ extract opcode from rINST 4718 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4719 GOTO_OPCODE(ip) @ jump to next instruction 4720 /* 14-17 instructions */ 4721 4722 4723/* ------------------------------ */ 4724 .balign 64 4725.L_OP_AND_LONG: /* 0xa0 */ 4726/* File: armv5te/OP_AND_LONG.S */ 4727/* File: armv5te/binopWide.S */ 4728 /* 4729 * Generic 64-bit binary operation. Provide an "instr" line that 4730 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4731 * This could be an ARM instruction or a function call. (If the result 4732 * comes back in a register other than r0, you can override "result".) 4733 * 4734 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4735 * vCC (r1). Useful for integer division and modulus. 4736 * 4737 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4738 * xor-long, add-double, sub-double, mul-double, div-double, 4739 * rem-double 4740 * 4741 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4742 */ 4743 /* binop vAA, vBB, vCC */ 4744 FETCH(r0, 1) @ r0<- CCBB 4745 mov r9, rINST, lsr #8 @ r9<- AA 4746 and r2, r0, #255 @ r2<- BB 4747 mov r3, r0, lsr #8 @ r3<- CC 4748 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4749 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4750 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4751 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4752 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4753 .if 0 4754 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4755 beq common_errDivideByZero 4756 .endif 4757 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4758 4759 and r0, r0, r2 @ optional op; may set condition codes 4760 and r1, r1, r3 @ result<- op, r0-r3 changed 4761 GET_INST_OPCODE(ip) @ extract opcode from rINST 4762 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4763 GOTO_OPCODE(ip) @ jump to next instruction 4764 /* 14-17 instructions */ 4765 4766 4767/* ------------------------------ */ 4768 .balign 64 4769.L_OP_OR_LONG: /* 0xa1 */ 4770/* File: armv5te/OP_OR_LONG.S */ 4771/* File: armv5te/binopWide.S */ 4772 /* 4773 * Generic 64-bit binary operation. Provide an "instr" line that 4774 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4775 * This could be an ARM instruction or a function call. (If the result 4776 * comes back in a register other than r0, you can override "result".) 4777 * 4778 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4779 * vCC (r1). Useful for integer division and modulus. 4780 * 4781 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4782 * xor-long, add-double, sub-double, mul-double, div-double, 4783 * rem-double 4784 * 4785 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4786 */ 4787 /* binop vAA, vBB, vCC */ 4788 FETCH(r0, 1) @ r0<- CCBB 4789 mov r9, rINST, lsr #8 @ r9<- AA 4790 and r2, r0, #255 @ r2<- BB 4791 mov r3, r0, lsr #8 @ r3<- CC 4792 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4793 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4794 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4795 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4796 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4797 .if 0 4798 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4799 beq common_errDivideByZero 4800 .endif 4801 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4802 4803 orr r0, r0, r2 @ optional op; may set condition codes 4804 orr r1, r1, r3 @ result<- op, r0-r3 changed 4805 GET_INST_OPCODE(ip) @ extract opcode from rINST 4806 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4807 GOTO_OPCODE(ip) @ jump to next instruction 4808 /* 14-17 instructions */ 4809 4810 4811/* ------------------------------ */ 4812 .balign 64 4813.L_OP_XOR_LONG: /* 0xa2 */ 4814/* File: armv5te/OP_XOR_LONG.S */ 4815/* File: armv5te/binopWide.S */ 4816 /* 4817 * Generic 64-bit binary operation. Provide an "instr" line that 4818 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4819 * This could be an ARM instruction or a function call. (If the result 4820 * comes back in a register other than r0, you can override "result".) 4821 * 4822 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4823 * vCC (r1). Useful for integer division and modulus. 4824 * 4825 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4826 * xor-long, add-double, sub-double, mul-double, div-double, 4827 * rem-double 4828 * 4829 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4830 */ 4831 /* binop vAA, vBB, vCC */ 4832 FETCH(r0, 1) @ r0<- CCBB 4833 mov r9, rINST, lsr #8 @ r9<- AA 4834 and r2, r0, #255 @ r2<- BB 4835 mov r3, r0, lsr #8 @ r3<- CC 4836 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4837 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4838 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4839 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4840 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4841 .if 0 4842 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4843 beq common_errDivideByZero 4844 .endif 4845 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4846 4847 eor r0, r0, r2 @ optional op; may set condition codes 4848 eor r1, r1, r3 @ result<- op, r0-r3 changed 4849 GET_INST_OPCODE(ip) @ extract opcode from rINST 4850 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4851 GOTO_OPCODE(ip) @ jump to next instruction 4852 /* 14-17 instructions */ 4853 4854 4855/* ------------------------------ */ 4856 .balign 64 4857.L_OP_SHL_LONG: /* 0xa3 */ 4858/* File: armv5te/OP_SHL_LONG.S */ 4859 /* 4860 * Long integer shift. This is different from the generic 32/64-bit 4861 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4862 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4863 * 6 bits of the shift distance. 4864 */ 4865 /* shl-long vAA, vBB, vCC */ 4866 FETCH(r0, 1) @ r0<- CCBB 4867 mov r9, rINST, lsr #8 @ r9<- AA 4868 and r3, r0, #255 @ r3<- BB 4869 mov r0, r0, lsr #8 @ r0<- CC 4870 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4871 GET_VREG(r2, r0) @ r2<- vCC 4872 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4873 and r2, r2, #63 @ r2<- r2 & 0x3f 4874 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4875 4876 mov r1, r1, asl r2 @ r1<- r1 << r2 4877 rsb r3, r2, #32 @ r3<- 32 - r2 4878 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4879 subs ip, r2, #32 @ ip<- r2 - 32 4880 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4881 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4882 b .LOP_SHL_LONG_finish 4883 4884/* ------------------------------ */ 4885 .balign 64 4886.L_OP_SHR_LONG: /* 0xa4 */ 4887/* File: armv5te/OP_SHR_LONG.S */ 4888 /* 4889 * Long integer shift. This is different from the generic 32/64-bit 4890 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4891 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4892 * 6 bits of the shift distance. 4893 */ 4894 /* shr-long vAA, vBB, vCC */ 4895 FETCH(r0, 1) @ r0<- CCBB 4896 mov r9, rINST, lsr #8 @ r9<- AA 4897 and r3, r0, #255 @ r3<- BB 4898 mov r0, r0, lsr #8 @ r0<- CC 4899 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4900 GET_VREG(r2, r0) @ r2<- vCC 4901 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4902 and r2, r2, #63 @ r0<- r0 & 0x3f 4903 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4904 4905 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4906 rsb r3, r2, #32 @ r3<- 32 - r2 4907 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4908 subs ip, r2, #32 @ ip<- r2 - 32 4909 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4910 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4911 b .LOP_SHR_LONG_finish 4912 4913/* ------------------------------ */ 4914 .balign 64 4915.L_OP_USHR_LONG: /* 0xa5 */ 4916/* File: armv5te/OP_USHR_LONG.S */ 4917 /* 4918 * Long integer shift. This is different from the generic 32/64-bit 4919 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4920 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4921 * 6 bits of the shift distance. 4922 */ 4923 /* ushr-long vAA, vBB, vCC */ 4924 FETCH(r0, 1) @ r0<- CCBB 4925 mov r9, rINST, lsr #8 @ r9<- AA 4926 and r3, r0, #255 @ r3<- BB 4927 mov r0, r0, lsr #8 @ r0<- CC 4928 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4929 GET_VREG(r2, r0) @ r2<- vCC 4930 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4931 and r2, r2, #63 @ r0<- r0 & 0x3f 4932 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4933 4934 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4935 rsb r3, r2, #32 @ r3<- 32 - r2 4936 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4937 subs ip, r2, #32 @ ip<- r2 - 32 4938 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4939 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4940 b .LOP_USHR_LONG_finish 4941 4942/* ------------------------------ */ 4943 .balign 64 4944.L_OP_ADD_FLOAT: /* 0xa6 */ 4945/* File: arm-vfp/OP_ADD_FLOAT.S */ 4946/* File: arm-vfp/fbinop.S */ 4947 /* 4948 * Generic 32-bit floating-point operation. Provide an "instr" line that 4949 * specifies an instruction that performs "s2 = s0 op s1". Because we 4950 * use the "softfp" ABI, this must be an instruction, not a function call. 4951 * 4952 * For: add-float, sub-float, mul-float, div-float 4953 */ 4954 /* floatop vAA, vBB, vCC */ 4955 FETCH(r0, 1) @ r0<- CCBB 4956 mov r9, rINST, lsr #8 @ r9<- AA 4957 mov r3, r0, lsr #8 @ r3<- CC 4958 and r2, r0, #255 @ r2<- BB 4959 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4960 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4961 flds s1, [r3] @ s1<- vCC 4962 flds s0, [r2] @ s0<- vBB 4963 4964 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4965 fadds s2, s0, s1 @ s2<- op 4966 GET_INST_OPCODE(ip) @ extract opcode from rINST 4967 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4968 fsts s2, [r9] @ vAA<- s2 4969 GOTO_OPCODE(ip) @ jump to next instruction 4970 4971 4972/* ------------------------------ */ 4973 .balign 64 4974.L_OP_SUB_FLOAT: /* 0xa7 */ 4975/* File: arm-vfp/OP_SUB_FLOAT.S */ 4976/* File: arm-vfp/fbinop.S */ 4977 /* 4978 * Generic 32-bit floating-point operation. Provide an "instr" line that 4979 * specifies an instruction that performs "s2 = s0 op s1". Because we 4980 * use the "softfp" ABI, this must be an instruction, not a function call. 4981 * 4982 * For: add-float, sub-float, mul-float, div-float 4983 */ 4984 /* floatop vAA, vBB, vCC */ 4985 FETCH(r0, 1) @ r0<- CCBB 4986 mov r9, rINST, lsr #8 @ r9<- AA 4987 mov r3, r0, lsr #8 @ r3<- CC 4988 and r2, r0, #255 @ r2<- BB 4989 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4990 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4991 flds s1, [r3] @ s1<- vCC 4992 flds s0, [r2] @ s0<- vBB 4993 4994 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4995 fsubs s2, s0, s1 @ s2<- op 4996 GET_INST_OPCODE(ip) @ extract opcode from rINST 4997 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4998 fsts s2, [r9] @ vAA<- s2 4999 GOTO_OPCODE(ip) @ jump to next instruction 5000 5001 5002/* ------------------------------ */ 5003 .balign 64 5004.L_OP_MUL_FLOAT: /* 0xa8 */ 5005/* File: arm-vfp/OP_MUL_FLOAT.S */ 5006/* File: arm-vfp/fbinop.S */ 5007 /* 5008 * Generic 32-bit floating-point operation. Provide an "instr" line that 5009 * specifies an instruction that performs "s2 = s0 op s1". Because we 5010 * use the "softfp" ABI, this must be an instruction, not a function call. 5011 * 5012 * For: add-float, sub-float, mul-float, div-float 5013 */ 5014 /* floatop vAA, vBB, vCC */ 5015 FETCH(r0, 1) @ r0<- CCBB 5016 mov r9, rINST, lsr #8 @ r9<- AA 5017 mov r3, r0, lsr #8 @ r3<- CC 5018 and r2, r0, #255 @ r2<- BB 5019 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5020 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5021 flds s1, [r3] @ s1<- vCC 5022 flds s0, [r2] @ s0<- vBB 5023 5024 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5025 fmuls s2, s0, s1 @ s2<- op 5026 GET_INST_OPCODE(ip) @ extract opcode from rINST 5027 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5028 fsts s2, [r9] @ vAA<- s2 5029 GOTO_OPCODE(ip) @ jump to next instruction 5030 5031 5032/* ------------------------------ */ 5033 .balign 64 5034.L_OP_DIV_FLOAT: /* 0xa9 */ 5035/* File: arm-vfp/OP_DIV_FLOAT.S */ 5036/* File: arm-vfp/fbinop.S */ 5037 /* 5038 * Generic 32-bit floating-point operation. Provide an "instr" line that 5039 * specifies an instruction that performs "s2 = s0 op s1". Because we 5040 * use the "softfp" ABI, this must be an instruction, not a function call. 5041 * 5042 * For: add-float, sub-float, mul-float, div-float 5043 */ 5044 /* floatop vAA, vBB, vCC */ 5045 FETCH(r0, 1) @ r0<- CCBB 5046 mov r9, rINST, lsr #8 @ r9<- AA 5047 mov r3, r0, lsr #8 @ r3<- CC 5048 and r2, r0, #255 @ r2<- BB 5049 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5050 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5051 flds s1, [r3] @ s1<- vCC 5052 flds s0, [r2] @ s0<- vBB 5053 5054 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5055 fdivs s2, s0, s1 @ s2<- op 5056 GET_INST_OPCODE(ip) @ extract opcode from rINST 5057 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5058 fsts s2, [r9] @ vAA<- s2 5059 GOTO_OPCODE(ip) @ jump to next instruction 5060 5061 5062/* ------------------------------ */ 5063 .balign 64 5064.L_OP_REM_FLOAT: /* 0xaa */ 5065/* File: armv5te/OP_REM_FLOAT.S */ 5066/* EABI doesn't define a float remainder function, but libm does */ 5067/* File: armv5te/binop.S */ 5068 /* 5069 * Generic 32-bit binary operation. Provide an "instr" line that 5070 * specifies an instruction that performs "result = r0 op r1". 5071 * This could be an ARM instruction or a function call. (If the result 5072 * comes back in a register other than r0, you can override "result".) 5073 * 5074 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5075 * vCC (r1). Useful for integer division and modulus. Note that we 5076 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5077 * handles it correctly. 5078 * 5079 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5080 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5081 * mul-float, div-float, rem-float 5082 */ 5083 /* binop vAA, vBB, vCC */ 5084 FETCH(r0, 1) @ r0<- CCBB 5085 mov r9, rINST, lsr #8 @ r9<- AA 5086 mov r3, r0, lsr #8 @ r3<- CC 5087 and r2, r0, #255 @ r2<- BB 5088 GET_VREG(r1, r3) @ r1<- vCC 5089 GET_VREG(r0, r2) @ r0<- vBB 5090 .if 0 5091 cmp r1, #0 @ is second operand zero? 5092 beq common_errDivideByZero 5093 .endif 5094 5095 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5096 @ optional op; may set condition codes 5097 bl fmodf @ r0<- op, r0-r3 changed 5098 GET_INST_OPCODE(ip) @ extract opcode from rINST 5099 SET_VREG(r0, r9) @ vAA<- r0 5100 GOTO_OPCODE(ip) @ jump to next instruction 5101 /* 11-14 instructions */ 5102 5103 5104/* ------------------------------ */ 5105 .balign 64 5106.L_OP_ADD_DOUBLE: /* 0xab */ 5107/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5108/* File: arm-vfp/fbinopWide.S */ 5109 /* 5110 * Generic 64-bit double-precision floating point binary operation. 5111 * Provide an "instr" line that specifies an instruction that performs 5112 * "d2 = d0 op d1". 5113 * 5114 * for: add-double, sub-double, mul-double, div-double 5115 */ 5116 /* doubleop vAA, vBB, vCC */ 5117 FETCH(r0, 1) @ r0<- CCBB 5118 mov r9, rINST, lsr #8 @ r9<- AA 5119 mov r3, r0, lsr #8 @ r3<- CC 5120 and r2, r0, #255 @ r2<- BB 5121 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5122 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5123 fldd d1, [r3] @ d1<- vCC 5124 fldd d0, [r2] @ d0<- vBB 5125 5126 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5127 faddd d2, d0, d1 @ s2<- op 5128 GET_INST_OPCODE(ip) @ extract opcode from rINST 5129 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5130 fstd d2, [r9] @ vAA<- d2 5131 GOTO_OPCODE(ip) @ jump to next instruction 5132 5133 5134/* ------------------------------ */ 5135 .balign 64 5136.L_OP_SUB_DOUBLE: /* 0xac */ 5137/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5138/* File: arm-vfp/fbinopWide.S */ 5139 /* 5140 * Generic 64-bit double-precision floating point binary operation. 5141 * Provide an "instr" line that specifies an instruction that performs 5142 * "d2 = d0 op d1". 5143 * 5144 * for: add-double, sub-double, mul-double, div-double 5145 */ 5146 /* doubleop vAA, vBB, vCC */ 5147 FETCH(r0, 1) @ r0<- CCBB 5148 mov r9, rINST, lsr #8 @ r9<- AA 5149 mov r3, r0, lsr #8 @ r3<- CC 5150 and r2, r0, #255 @ r2<- BB 5151 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5152 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5153 fldd d1, [r3] @ d1<- vCC 5154 fldd d0, [r2] @ d0<- vBB 5155 5156 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5157 fsubd d2, d0, d1 @ s2<- op 5158 GET_INST_OPCODE(ip) @ extract opcode from rINST 5159 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5160 fstd d2, [r9] @ vAA<- d2 5161 GOTO_OPCODE(ip) @ jump to next instruction 5162 5163 5164/* ------------------------------ */ 5165 .balign 64 5166.L_OP_MUL_DOUBLE: /* 0xad */ 5167/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5168/* File: arm-vfp/fbinopWide.S */ 5169 /* 5170 * Generic 64-bit double-precision floating point binary operation. 5171 * Provide an "instr" line that specifies an instruction that performs 5172 * "d2 = d0 op d1". 5173 * 5174 * for: add-double, sub-double, mul-double, div-double 5175 */ 5176 /* doubleop vAA, vBB, vCC */ 5177 FETCH(r0, 1) @ r0<- CCBB 5178 mov r9, rINST, lsr #8 @ r9<- AA 5179 mov r3, r0, lsr #8 @ r3<- CC 5180 and r2, r0, #255 @ r2<- BB 5181 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5182 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5183 fldd d1, [r3] @ d1<- vCC 5184 fldd d0, [r2] @ d0<- vBB 5185 5186 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5187 fmuld d2, d0, d1 @ s2<- op 5188 GET_INST_OPCODE(ip) @ extract opcode from rINST 5189 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5190 fstd d2, [r9] @ vAA<- d2 5191 GOTO_OPCODE(ip) @ jump to next instruction 5192 5193 5194/* ------------------------------ */ 5195 .balign 64 5196.L_OP_DIV_DOUBLE: /* 0xae */ 5197/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5198/* File: arm-vfp/fbinopWide.S */ 5199 /* 5200 * Generic 64-bit double-precision floating point binary operation. 5201 * Provide an "instr" line that specifies an instruction that performs 5202 * "d2 = d0 op d1". 5203 * 5204 * for: add-double, sub-double, mul-double, div-double 5205 */ 5206 /* doubleop vAA, vBB, vCC */ 5207 FETCH(r0, 1) @ r0<- CCBB 5208 mov r9, rINST, lsr #8 @ r9<- AA 5209 mov r3, r0, lsr #8 @ r3<- CC 5210 and r2, r0, #255 @ r2<- BB 5211 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5212 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5213 fldd d1, [r3] @ d1<- vCC 5214 fldd d0, [r2] @ d0<- vBB 5215 5216 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5217 fdivd d2, d0, d1 @ s2<- op 5218 GET_INST_OPCODE(ip) @ extract opcode from rINST 5219 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5220 fstd d2, [r9] @ vAA<- d2 5221 GOTO_OPCODE(ip) @ jump to next instruction 5222 5223 5224/* ------------------------------ */ 5225 .balign 64 5226.L_OP_REM_DOUBLE: /* 0xaf */ 5227/* File: armv5te/OP_REM_DOUBLE.S */ 5228/* EABI doesn't define a double remainder function, but libm does */ 5229/* File: armv5te/binopWide.S */ 5230 /* 5231 * Generic 64-bit binary operation. Provide an "instr" line that 5232 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5233 * This could be an ARM instruction or a function call. (If the result 5234 * comes back in a register other than r0, you can override "result".) 5235 * 5236 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5237 * vCC (r1). Useful for integer division and modulus. 5238 * 5239 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5240 * xor-long, add-double, sub-double, mul-double, div-double, 5241 * rem-double 5242 * 5243 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5244 */ 5245 /* binop vAA, vBB, vCC */ 5246 FETCH(r0, 1) @ r0<- CCBB 5247 mov r9, rINST, lsr #8 @ r9<- AA 5248 and r2, r0, #255 @ r2<- BB 5249 mov r3, r0, lsr #8 @ r3<- CC 5250 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5251 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5252 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5253 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5254 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5255 .if 0 5256 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5257 beq common_errDivideByZero 5258 .endif 5259 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5260 5261 @ optional op; may set condition codes 5262 bl fmod @ result<- op, r0-r3 changed 5263 GET_INST_OPCODE(ip) @ extract opcode from rINST 5264 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5265 GOTO_OPCODE(ip) @ jump to next instruction 5266 /* 14-17 instructions */ 5267 5268 5269/* ------------------------------ */ 5270 .balign 64 5271.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5272/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5273/* File: armv5te/binop2addr.S */ 5274 /* 5275 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5276 * that specifies an instruction that performs "result = r0 op r1". 5277 * This could be an ARM instruction or a function call. (If the result 5278 * comes back in a register other than r0, you can override "result".) 5279 * 5280 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5281 * vCC (r1). Useful for integer division and modulus. 5282 * 5283 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5284 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5285 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5286 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5287 */ 5288 /* binop/2addr vA, vB */ 5289 mov r9, rINST, lsr #8 @ r9<- A+ 5290 mov r3, rINST, lsr #12 @ r3<- B 5291 and r9, r9, #15 5292 GET_VREG(r1, r3) @ r1<- vB 5293 GET_VREG(r0, r9) @ r0<- vA 5294 .if 0 5295 cmp r1, #0 @ is second operand zero? 5296 beq common_errDivideByZero 5297 .endif 5298 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5299 5300 @ optional op; may set condition codes 5301 add r0, r0, r1 @ r0<- op, r0-r3 changed 5302 GET_INST_OPCODE(ip) @ extract opcode from rINST 5303 SET_VREG(r0, r9) @ vAA<- r0 5304 GOTO_OPCODE(ip) @ jump to next instruction 5305 /* 10-13 instructions */ 5306 5307 5308/* ------------------------------ */ 5309 .balign 64 5310.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5311/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5312/* File: armv5te/binop2addr.S */ 5313 /* 5314 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5315 * that specifies an instruction that performs "result = r0 op r1". 5316 * This could be an ARM instruction or a function call. (If the result 5317 * comes back in a register other than r0, you can override "result".) 5318 * 5319 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5320 * vCC (r1). Useful for integer division and modulus. 5321 * 5322 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5323 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5324 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5325 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5326 */ 5327 /* binop/2addr vA, vB */ 5328 mov r9, rINST, lsr #8 @ r9<- A+ 5329 mov r3, rINST, lsr #12 @ r3<- B 5330 and r9, r9, #15 5331 GET_VREG(r1, r3) @ r1<- vB 5332 GET_VREG(r0, r9) @ r0<- vA 5333 .if 0 5334 cmp r1, #0 @ is second operand zero? 5335 beq common_errDivideByZero 5336 .endif 5337 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5338 5339 @ optional op; may set condition codes 5340 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5341 GET_INST_OPCODE(ip) @ extract opcode from rINST 5342 SET_VREG(r0, r9) @ vAA<- r0 5343 GOTO_OPCODE(ip) @ jump to next instruction 5344 /* 10-13 instructions */ 5345 5346 5347/* ------------------------------ */ 5348 .balign 64 5349.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5350/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5351/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5352/* File: armv5te/binop2addr.S */ 5353 /* 5354 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5355 * that specifies an instruction that performs "result = r0 op r1". 5356 * This could be an ARM instruction or a function call. (If the result 5357 * comes back in a register other than r0, you can override "result".) 5358 * 5359 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5360 * vCC (r1). Useful for integer division and modulus. 5361 * 5362 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5363 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5364 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5365 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5366 */ 5367 /* binop/2addr vA, vB */ 5368 mov r9, rINST, lsr #8 @ r9<- A+ 5369 mov r3, rINST, lsr #12 @ r3<- B 5370 and r9, r9, #15 5371 GET_VREG(r1, r3) @ r1<- vB 5372 GET_VREG(r0, r9) @ r0<- vA 5373 .if 0 5374 cmp r1, #0 @ is second operand zero? 5375 beq common_errDivideByZero 5376 .endif 5377 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5378 5379 @ optional op; may set condition codes 5380 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5381 GET_INST_OPCODE(ip) @ extract opcode from rINST 5382 SET_VREG(r0, r9) @ vAA<- r0 5383 GOTO_OPCODE(ip) @ jump to next instruction 5384 /* 10-13 instructions */ 5385 5386 5387/* ------------------------------ */ 5388 .balign 64 5389.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5390/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5391/* File: armv5te/binop2addr.S */ 5392 /* 5393 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5394 * that specifies an instruction that performs "result = r0 op r1". 5395 * This could be an ARM instruction or a function call. (If the result 5396 * comes back in a register other than r0, you can override "result".) 5397 * 5398 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5399 * vCC (r1). Useful for integer division and modulus. 5400 * 5401 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5402 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5403 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5404 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5405 */ 5406 /* binop/2addr vA, vB */ 5407 mov r9, rINST, lsr #8 @ r9<- A+ 5408 mov r3, rINST, lsr #12 @ r3<- B 5409 and r9, r9, #15 5410 GET_VREG(r1, r3) @ r1<- vB 5411 GET_VREG(r0, r9) @ r0<- vA 5412 .if 1 5413 cmp r1, #0 @ is second operand zero? 5414 beq common_errDivideByZero 5415 .endif 5416 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5417 5418 @ optional op; may set condition codes 5419 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5420 GET_INST_OPCODE(ip) @ extract opcode from rINST 5421 SET_VREG(r0, r9) @ vAA<- r0 5422 GOTO_OPCODE(ip) @ jump to next instruction 5423 /* 10-13 instructions */ 5424 5425 5426/* ------------------------------ */ 5427 .balign 64 5428.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5429/* File: armv5te/OP_REM_INT_2ADDR.S */ 5430/* idivmod returns quotient in r0 and remainder in r1 */ 5431/* File: armv5te/binop2addr.S */ 5432 /* 5433 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5434 * that specifies an instruction that performs "result = r0 op r1". 5435 * This could be an ARM instruction or a function call. (If the result 5436 * comes back in a register other than r0, you can override "result".) 5437 * 5438 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5439 * vCC (r1). Useful for integer division and modulus. 5440 * 5441 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5442 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5443 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5444 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5445 */ 5446 /* binop/2addr vA, vB */ 5447 mov r9, rINST, lsr #8 @ r9<- A+ 5448 mov r3, rINST, lsr #12 @ r3<- B 5449 and r9, r9, #15 5450 GET_VREG(r1, r3) @ r1<- vB 5451 GET_VREG(r0, r9) @ r0<- vA 5452 .if 1 5453 cmp r1, #0 @ is second operand zero? 5454 beq common_errDivideByZero 5455 .endif 5456 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5457 5458 @ optional op; may set condition codes 5459 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5460 GET_INST_OPCODE(ip) @ extract opcode from rINST 5461 SET_VREG(r1, r9) @ vAA<- r1 5462 GOTO_OPCODE(ip) @ jump to next instruction 5463 /* 10-13 instructions */ 5464 5465 5466/* ------------------------------ */ 5467 .balign 64 5468.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5469/* File: armv5te/OP_AND_INT_2ADDR.S */ 5470/* File: armv5te/binop2addr.S */ 5471 /* 5472 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5473 * that specifies an instruction that performs "result = r0 op r1". 5474 * This could be an ARM instruction or a function call. (If the result 5475 * comes back in a register other than r0, you can override "result".) 5476 * 5477 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5478 * vCC (r1). Useful for integer division and modulus. 5479 * 5480 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5481 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5482 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5483 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5484 */ 5485 /* binop/2addr vA, vB */ 5486 mov r9, rINST, lsr #8 @ r9<- A+ 5487 mov r3, rINST, lsr #12 @ r3<- B 5488 and r9, r9, #15 5489 GET_VREG(r1, r3) @ r1<- vB 5490 GET_VREG(r0, r9) @ r0<- vA 5491 .if 0 5492 cmp r1, #0 @ is second operand zero? 5493 beq common_errDivideByZero 5494 .endif 5495 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5496 5497 @ optional op; may set condition codes 5498 and r0, r0, r1 @ r0<- op, r0-r3 changed 5499 GET_INST_OPCODE(ip) @ extract opcode from rINST 5500 SET_VREG(r0, r9) @ vAA<- r0 5501 GOTO_OPCODE(ip) @ jump to next instruction 5502 /* 10-13 instructions */ 5503 5504 5505/* ------------------------------ */ 5506 .balign 64 5507.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5508/* File: armv5te/OP_OR_INT_2ADDR.S */ 5509/* File: armv5te/binop2addr.S */ 5510 /* 5511 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5512 * that specifies an instruction that performs "result = r0 op r1". 5513 * This could be an ARM instruction or a function call. (If the result 5514 * comes back in a register other than r0, you can override "result".) 5515 * 5516 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5517 * vCC (r1). Useful for integer division and modulus. 5518 * 5519 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5520 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5521 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5522 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5523 */ 5524 /* binop/2addr vA, vB */ 5525 mov r9, rINST, lsr #8 @ r9<- A+ 5526 mov r3, rINST, lsr #12 @ r3<- B 5527 and r9, r9, #15 5528 GET_VREG(r1, r3) @ r1<- vB 5529 GET_VREG(r0, r9) @ r0<- vA 5530 .if 0 5531 cmp r1, #0 @ is second operand zero? 5532 beq common_errDivideByZero 5533 .endif 5534 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5535 5536 @ optional op; may set condition codes 5537 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5538 GET_INST_OPCODE(ip) @ extract opcode from rINST 5539 SET_VREG(r0, r9) @ vAA<- r0 5540 GOTO_OPCODE(ip) @ jump to next instruction 5541 /* 10-13 instructions */ 5542 5543 5544/* ------------------------------ */ 5545 .balign 64 5546.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5547/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5548/* File: armv5te/binop2addr.S */ 5549 /* 5550 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5551 * that specifies an instruction that performs "result = r0 op r1". 5552 * This could be an ARM instruction or a function call. (If the result 5553 * comes back in a register other than r0, you can override "result".) 5554 * 5555 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5556 * vCC (r1). Useful for integer division and modulus. 5557 * 5558 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5559 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5560 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5561 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5562 */ 5563 /* binop/2addr vA, vB */ 5564 mov r9, rINST, lsr #8 @ r9<- A+ 5565 mov r3, rINST, lsr #12 @ r3<- B 5566 and r9, r9, #15 5567 GET_VREG(r1, r3) @ r1<- vB 5568 GET_VREG(r0, r9) @ r0<- vA 5569 .if 0 5570 cmp r1, #0 @ is second operand zero? 5571 beq common_errDivideByZero 5572 .endif 5573 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5574 5575 @ optional op; may set condition codes 5576 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5577 GET_INST_OPCODE(ip) @ extract opcode from rINST 5578 SET_VREG(r0, r9) @ vAA<- r0 5579 GOTO_OPCODE(ip) @ jump to next instruction 5580 /* 10-13 instructions */ 5581 5582 5583/* ------------------------------ */ 5584 .balign 64 5585.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5586/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5587/* File: armv5te/binop2addr.S */ 5588 /* 5589 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5590 * that specifies an instruction that performs "result = r0 op r1". 5591 * This could be an ARM instruction or a function call. (If the result 5592 * comes back in a register other than r0, you can override "result".) 5593 * 5594 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5595 * vCC (r1). Useful for integer division and modulus. 5596 * 5597 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5598 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5599 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5600 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5601 */ 5602 /* binop/2addr vA, vB */ 5603 mov r9, rINST, lsr #8 @ r9<- A+ 5604 mov r3, rINST, lsr #12 @ r3<- B 5605 and r9, r9, #15 5606 GET_VREG(r1, r3) @ r1<- vB 5607 GET_VREG(r0, r9) @ r0<- vA 5608 .if 0 5609 cmp r1, #0 @ is second operand zero? 5610 beq common_errDivideByZero 5611 .endif 5612 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5613 5614 and r1, r1, #31 @ optional op; may set condition codes 5615 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5616 GET_INST_OPCODE(ip) @ extract opcode from rINST 5617 SET_VREG(r0, r9) @ vAA<- r0 5618 GOTO_OPCODE(ip) @ jump to next instruction 5619 /* 10-13 instructions */ 5620 5621 5622/* ------------------------------ */ 5623 .balign 64 5624.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5625/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5626/* File: armv5te/binop2addr.S */ 5627 /* 5628 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5629 * that specifies an instruction that performs "result = r0 op r1". 5630 * This could be an ARM instruction or a function call. (If the result 5631 * comes back in a register other than r0, you can override "result".) 5632 * 5633 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5634 * vCC (r1). Useful for integer division and modulus. 5635 * 5636 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5637 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5638 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5639 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5640 */ 5641 /* binop/2addr vA, vB */ 5642 mov r9, rINST, lsr #8 @ r9<- A+ 5643 mov r3, rINST, lsr #12 @ r3<- B 5644 and r9, r9, #15 5645 GET_VREG(r1, r3) @ r1<- vB 5646 GET_VREG(r0, r9) @ r0<- vA 5647 .if 0 5648 cmp r1, #0 @ is second operand zero? 5649 beq common_errDivideByZero 5650 .endif 5651 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5652 5653 and r1, r1, #31 @ optional op; may set condition codes 5654 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5655 GET_INST_OPCODE(ip) @ extract opcode from rINST 5656 SET_VREG(r0, r9) @ vAA<- r0 5657 GOTO_OPCODE(ip) @ jump to next instruction 5658 /* 10-13 instructions */ 5659 5660 5661/* ------------------------------ */ 5662 .balign 64 5663.L_OP_USHR_INT_2ADDR: /* 0xba */ 5664/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5665/* File: armv5te/binop2addr.S */ 5666 /* 5667 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5668 * that specifies an instruction that performs "result = r0 op r1". 5669 * This could be an ARM instruction or a function call. (If the result 5670 * comes back in a register other than r0, you can override "result".) 5671 * 5672 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5673 * vCC (r1). Useful for integer division and modulus. 5674 * 5675 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5676 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5677 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5678 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5679 */ 5680 /* binop/2addr vA, vB */ 5681 mov r9, rINST, lsr #8 @ r9<- A+ 5682 mov r3, rINST, lsr #12 @ r3<- B 5683 and r9, r9, #15 5684 GET_VREG(r1, r3) @ r1<- vB 5685 GET_VREG(r0, r9) @ r0<- vA 5686 .if 0 5687 cmp r1, #0 @ is second operand zero? 5688 beq common_errDivideByZero 5689 .endif 5690 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5691 5692 and r1, r1, #31 @ optional op; may set condition codes 5693 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5694 GET_INST_OPCODE(ip) @ extract opcode from rINST 5695 SET_VREG(r0, r9) @ vAA<- r0 5696 GOTO_OPCODE(ip) @ jump to next instruction 5697 /* 10-13 instructions */ 5698 5699 5700/* ------------------------------ */ 5701 .balign 64 5702.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5703/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5704/* File: armv5te/binopWide2addr.S */ 5705 /* 5706 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5707 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5708 * This could be an ARM instruction or a function call. (If the result 5709 * comes back in a register other than r0, you can override "result".) 5710 * 5711 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5712 * vCC (r1). Useful for integer division and modulus. 5713 * 5714 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5715 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5716 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5717 * rem-double/2addr 5718 */ 5719 /* binop/2addr vA, vB */ 5720 mov r9, rINST, lsr #8 @ r9<- A+ 5721 mov r1, rINST, lsr #12 @ r1<- B 5722 and r9, r9, #15 5723 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5724 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5725 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5726 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5727 .if 0 5728 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5729 beq common_errDivideByZero 5730 .endif 5731 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5732 5733 adds r0, r0, r2 @ optional op; may set condition codes 5734 adc r1, r1, r3 @ result<- op, r0-r3 changed 5735 GET_INST_OPCODE(ip) @ extract opcode from rINST 5736 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5737 GOTO_OPCODE(ip) @ jump to next instruction 5738 /* 12-15 instructions */ 5739 5740 5741/* ------------------------------ */ 5742 .balign 64 5743.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5744/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5745/* File: armv5te/binopWide2addr.S */ 5746 /* 5747 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5748 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5749 * This could be an ARM instruction or a function call. (If the result 5750 * comes back in a register other than r0, you can override "result".) 5751 * 5752 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5753 * vCC (r1). Useful for integer division and modulus. 5754 * 5755 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5756 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5757 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5758 * rem-double/2addr 5759 */ 5760 /* binop/2addr vA, vB */ 5761 mov r9, rINST, lsr #8 @ r9<- A+ 5762 mov r1, rINST, lsr #12 @ r1<- B 5763 and r9, r9, #15 5764 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5765 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5766 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5767 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5768 .if 0 5769 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5770 beq common_errDivideByZero 5771 .endif 5772 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5773 5774 subs r0, r0, r2 @ optional op; may set condition codes 5775 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5776 GET_INST_OPCODE(ip) @ extract opcode from rINST 5777 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5778 GOTO_OPCODE(ip) @ jump to next instruction 5779 /* 12-15 instructions */ 5780 5781 5782/* ------------------------------ */ 5783 .balign 64 5784.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5785/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 5786 /* 5787 * Signed 64-bit integer multiply, "/2addr" version. 5788 * 5789 * See OP_MUL_LONG for an explanation. 5790 * 5791 * We get a little tight on registers, so to avoid looking up &fp[A] 5792 * again we stuff it into rINST. 5793 */ 5794 /* mul-long/2addr vA, vB */ 5795 mov r9, rINST, lsr #8 @ r9<- A+ 5796 mov r1, rINST, lsr #12 @ r1<- B 5797 and r9, r9, #15 5798 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5799 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5800 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5801 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5802 mul ip, r2, r1 @ ip<- ZxW 5803 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5804 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5805 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5806 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5807 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5808 GET_INST_OPCODE(ip) @ extract opcode from rINST 5809 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5810 GOTO_OPCODE(ip) @ jump to next instruction 5811 5812/* ------------------------------ */ 5813 .balign 64 5814.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5815/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 5816/* File: armv5te/binopWide2addr.S */ 5817 /* 5818 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5819 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5820 * This could be an ARM instruction or a function call. (If the result 5821 * comes back in a register other than r0, you can override "result".) 5822 * 5823 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5824 * vCC (r1). Useful for integer division and modulus. 5825 * 5826 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5827 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5828 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5829 * rem-double/2addr 5830 */ 5831 /* binop/2addr vA, vB */ 5832 mov r9, rINST, lsr #8 @ r9<- A+ 5833 mov r1, rINST, lsr #12 @ r1<- B 5834 and r9, r9, #15 5835 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5836 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5837 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5838 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5839 .if 1 5840 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5841 beq common_errDivideByZero 5842 .endif 5843 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5844 5845 @ optional op; may set condition codes 5846 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5847 GET_INST_OPCODE(ip) @ extract opcode from rINST 5848 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5849 GOTO_OPCODE(ip) @ jump to next instruction 5850 /* 12-15 instructions */ 5851 5852 5853/* ------------------------------ */ 5854 .balign 64 5855.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5856/* File: armv5te/OP_REM_LONG_2ADDR.S */ 5857/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5858/* File: armv5te/binopWide2addr.S */ 5859 /* 5860 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5861 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5862 * This could be an ARM instruction or a function call. (If the result 5863 * comes back in a register other than r0, you can override "result".) 5864 * 5865 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5866 * vCC (r1). Useful for integer division and modulus. 5867 * 5868 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5869 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5870 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5871 * rem-double/2addr 5872 */ 5873 /* binop/2addr vA, vB */ 5874 mov r9, rINST, lsr #8 @ r9<- A+ 5875 mov r1, rINST, lsr #12 @ r1<- B 5876 and r9, r9, #15 5877 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5878 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5879 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5880 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5881 .if 1 5882 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5883 beq common_errDivideByZero 5884 .endif 5885 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5886 5887 @ optional op; may set condition codes 5888 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5889 GET_INST_OPCODE(ip) @ extract opcode from rINST 5890 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5891 GOTO_OPCODE(ip) @ jump to next instruction 5892 /* 12-15 instructions */ 5893 5894 5895/* ------------------------------ */ 5896 .balign 64 5897.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5898/* File: armv5te/OP_AND_LONG_2ADDR.S */ 5899/* File: armv5te/binopWide2addr.S */ 5900 /* 5901 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5902 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5903 * This could be an ARM instruction or a function call. (If the result 5904 * comes back in a register other than r0, you can override "result".) 5905 * 5906 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5907 * vCC (r1). Useful for integer division and modulus. 5908 * 5909 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5910 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5911 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5912 * rem-double/2addr 5913 */ 5914 /* binop/2addr vA, vB */ 5915 mov r9, rINST, lsr #8 @ r9<- A+ 5916 mov r1, rINST, lsr #12 @ r1<- B 5917 and r9, r9, #15 5918 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5919 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5920 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5921 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5922 .if 0 5923 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5924 beq common_errDivideByZero 5925 .endif 5926 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5927 5928 and r0, r0, r2 @ optional op; may set condition codes 5929 and r1, r1, r3 @ result<- op, r0-r3 changed 5930 GET_INST_OPCODE(ip) @ extract opcode from rINST 5931 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5932 GOTO_OPCODE(ip) @ jump to next instruction 5933 /* 12-15 instructions */ 5934 5935 5936/* ------------------------------ */ 5937 .balign 64 5938.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5939/* File: armv5te/OP_OR_LONG_2ADDR.S */ 5940/* File: armv5te/binopWide2addr.S */ 5941 /* 5942 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5943 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5944 * This could be an ARM instruction or a function call. (If the result 5945 * comes back in a register other than r0, you can override "result".) 5946 * 5947 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5948 * vCC (r1). Useful for integer division and modulus. 5949 * 5950 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5951 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5952 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5953 * rem-double/2addr 5954 */ 5955 /* binop/2addr vA, vB */ 5956 mov r9, rINST, lsr #8 @ r9<- A+ 5957 mov r1, rINST, lsr #12 @ r1<- B 5958 and r9, r9, #15 5959 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5960 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5961 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5962 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5963 .if 0 5964 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5965 beq common_errDivideByZero 5966 .endif 5967 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5968 5969 orr r0, r0, r2 @ optional op; may set condition codes 5970 orr r1, r1, r3 @ result<- op, r0-r3 changed 5971 GET_INST_OPCODE(ip) @ extract opcode from rINST 5972 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5973 GOTO_OPCODE(ip) @ jump to next instruction 5974 /* 12-15 instructions */ 5975 5976 5977/* ------------------------------ */ 5978 .balign 64 5979.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 5980/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 5981/* File: armv5te/binopWide2addr.S */ 5982 /* 5983 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5984 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5985 * This could be an ARM instruction or a function call. (If the result 5986 * comes back in a register other than r0, you can override "result".) 5987 * 5988 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5989 * vCC (r1). Useful for integer division and modulus. 5990 * 5991 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5992 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5993 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5994 * rem-double/2addr 5995 */ 5996 /* binop/2addr vA, vB */ 5997 mov r9, rINST, lsr #8 @ r9<- A+ 5998 mov r1, rINST, lsr #12 @ r1<- B 5999 and r9, r9, #15 6000 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6001 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6002 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6003 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6004 .if 0 6005 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6006 beq common_errDivideByZero 6007 .endif 6008 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6009 6010 eor r0, r0, r2 @ optional op; may set condition codes 6011 eor r1, r1, r3 @ result<- op, r0-r3 changed 6012 GET_INST_OPCODE(ip) @ extract opcode from rINST 6013 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6014 GOTO_OPCODE(ip) @ jump to next instruction 6015 /* 12-15 instructions */ 6016 6017 6018/* ------------------------------ */ 6019 .balign 64 6020.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6021/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6022 /* 6023 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6024 * 32-bit shift distance. 6025 */ 6026 /* shl-long/2addr vA, vB */ 6027 mov r9, rINST, lsr #8 @ r9<- A+ 6028 mov r3, rINST, lsr #12 @ r3<- B 6029 and r9, r9, #15 6030 GET_VREG(r2, r3) @ r2<- vB 6031 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6032 and r2, r2, #63 @ r2<- r2 & 0x3f 6033 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6034 6035 mov r1, r1, asl r2 @ r1<- r1 << r2 6036 rsb r3, r2, #32 @ r3<- 32 - r2 6037 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6038 subs ip, r2, #32 @ ip<- r2 - 32 6039 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6040 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6041 mov r0, r0, asl r2 @ r0<- r0 << r2 6042 b .LOP_SHL_LONG_2ADDR_finish 6043 6044/* ------------------------------ */ 6045 .balign 64 6046.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6047/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6048 /* 6049 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6050 * 32-bit shift distance. 6051 */ 6052 /* shr-long/2addr vA, vB */ 6053 mov r9, rINST, lsr #8 @ r9<- A+ 6054 mov r3, rINST, lsr #12 @ r3<- B 6055 and r9, r9, #15 6056 GET_VREG(r2, r3) @ r2<- vB 6057 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6058 and r2, r2, #63 @ r2<- r2 & 0x3f 6059 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6060 6061 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6062 rsb r3, r2, #32 @ r3<- 32 - r2 6063 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6064 subs ip, r2, #32 @ ip<- r2 - 32 6065 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6066 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6067 mov r1, r1, asr r2 @ r1<- r1 >> r2 6068 b .LOP_SHR_LONG_2ADDR_finish 6069 6070/* ------------------------------ */ 6071 .balign 64 6072.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6073/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6074 /* 6075 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6076 * 32-bit shift distance. 6077 */ 6078 /* ushr-long/2addr vA, vB */ 6079 mov r9, rINST, lsr #8 @ r9<- A+ 6080 mov r3, rINST, lsr #12 @ r3<- B 6081 and r9, r9, #15 6082 GET_VREG(r2, r3) @ r2<- vB 6083 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6084 and r2, r2, #63 @ r2<- r2 & 0x3f 6085 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6086 6087 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6088 rsb r3, r2, #32 @ r3<- 32 - r2 6089 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6090 subs ip, r2, #32 @ ip<- r2 - 32 6091 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6092 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6093 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6094 b .LOP_USHR_LONG_2ADDR_finish 6095 6096/* ------------------------------ */ 6097 .balign 64 6098.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6099/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6100/* File: arm-vfp/fbinop2addr.S */ 6101 /* 6102 * Generic 32-bit floating point "/2addr" binary operation. Provide 6103 * an "instr" line that specifies an instruction that performs 6104 * "s2 = s0 op s1". 6105 * 6106 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6107 */ 6108 /* binop/2addr vA, vB */ 6109 mov r3, rINST, lsr #12 @ r3<- B 6110 mov r9, rINST, lsr #8 @ r9<- A+ 6111 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6112 and r9, r9, #15 @ r9<- A 6113 flds s1, [r3] @ s1<- vB 6114 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6115 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6116 flds s0, [r9] @ s0<- vA 6117 6118 fadds s2, s0, s1 @ s2<- op 6119 GET_INST_OPCODE(ip) @ extract opcode from rINST 6120 fsts s2, [r9] @ vAA<- s2 6121 GOTO_OPCODE(ip) @ jump to next instruction 6122 6123 6124/* ------------------------------ */ 6125 .balign 64 6126.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6127/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6128/* File: arm-vfp/fbinop2addr.S */ 6129 /* 6130 * Generic 32-bit floating point "/2addr" binary operation. Provide 6131 * an "instr" line that specifies an instruction that performs 6132 * "s2 = s0 op s1". 6133 * 6134 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6135 */ 6136 /* binop/2addr vA, vB */ 6137 mov r3, rINST, lsr #12 @ r3<- B 6138 mov r9, rINST, lsr #8 @ r9<- A+ 6139 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6140 and r9, r9, #15 @ r9<- A 6141 flds s1, [r3] @ s1<- vB 6142 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6143 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6144 flds s0, [r9] @ s0<- vA 6145 6146 fsubs s2, s0, s1 @ s2<- op 6147 GET_INST_OPCODE(ip) @ extract opcode from rINST 6148 fsts s2, [r9] @ vAA<- s2 6149 GOTO_OPCODE(ip) @ jump to next instruction 6150 6151 6152/* ------------------------------ */ 6153 .balign 64 6154.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6155/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6156/* File: arm-vfp/fbinop2addr.S */ 6157 /* 6158 * Generic 32-bit floating point "/2addr" binary operation. Provide 6159 * an "instr" line that specifies an instruction that performs 6160 * "s2 = s0 op s1". 6161 * 6162 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6163 */ 6164 /* binop/2addr vA, vB */ 6165 mov r3, rINST, lsr #12 @ r3<- B 6166 mov r9, rINST, lsr #8 @ r9<- A+ 6167 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6168 and r9, r9, #15 @ r9<- A 6169 flds s1, [r3] @ s1<- vB 6170 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6171 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6172 flds s0, [r9] @ s0<- vA 6173 6174 fmuls s2, s0, s1 @ s2<- op 6175 GET_INST_OPCODE(ip) @ extract opcode from rINST 6176 fsts s2, [r9] @ vAA<- s2 6177 GOTO_OPCODE(ip) @ jump to next instruction 6178 6179 6180/* ------------------------------ */ 6181 .balign 64 6182.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6183/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6184/* File: arm-vfp/fbinop2addr.S */ 6185 /* 6186 * Generic 32-bit floating point "/2addr" binary operation. Provide 6187 * an "instr" line that specifies an instruction that performs 6188 * "s2 = s0 op s1". 6189 * 6190 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6191 */ 6192 /* binop/2addr vA, vB */ 6193 mov r3, rINST, lsr #12 @ r3<- B 6194 mov r9, rINST, lsr #8 @ r9<- A+ 6195 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6196 and r9, r9, #15 @ r9<- A 6197 flds s1, [r3] @ s1<- vB 6198 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6199 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6200 flds s0, [r9] @ s0<- vA 6201 6202 fdivs s2, s0, s1 @ s2<- op 6203 GET_INST_OPCODE(ip) @ extract opcode from rINST 6204 fsts s2, [r9] @ vAA<- s2 6205 GOTO_OPCODE(ip) @ jump to next instruction 6206 6207 6208/* ------------------------------ */ 6209 .balign 64 6210.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6211/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6212/* EABI doesn't define a float remainder function, but libm does */ 6213/* File: armv5te/binop2addr.S */ 6214 /* 6215 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6216 * that specifies an instruction that performs "result = r0 op r1". 6217 * This could be an ARM instruction or a function call. (If the result 6218 * comes back in a register other than r0, you can override "result".) 6219 * 6220 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6221 * vCC (r1). Useful for integer division and modulus. 6222 * 6223 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6224 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6225 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6226 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6227 */ 6228 /* binop/2addr vA, vB */ 6229 mov r9, rINST, lsr #8 @ r9<- A+ 6230 mov r3, rINST, lsr #12 @ r3<- B 6231 and r9, r9, #15 6232 GET_VREG(r1, r3) @ r1<- vB 6233 GET_VREG(r0, r9) @ r0<- vA 6234 .if 0 6235 cmp r1, #0 @ is second operand zero? 6236 beq common_errDivideByZero 6237 .endif 6238 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6239 6240 @ optional op; may set condition codes 6241 bl fmodf @ r0<- op, r0-r3 changed 6242 GET_INST_OPCODE(ip) @ extract opcode from rINST 6243 SET_VREG(r0, r9) @ vAA<- r0 6244 GOTO_OPCODE(ip) @ jump to next instruction 6245 /* 10-13 instructions */ 6246 6247 6248/* ------------------------------ */ 6249 .balign 64 6250.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6251/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6252/* File: arm-vfp/fbinopWide2addr.S */ 6253 /* 6254 * Generic 64-bit floating point "/2addr" binary operation. Provide 6255 * an "instr" line that specifies an instruction that performs 6256 * "d2 = d0 op d1". 6257 * 6258 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6259 * div-double/2addr 6260 */ 6261 /* binop/2addr vA, vB */ 6262 mov r3, rINST, lsr #12 @ r3<- B 6263 mov r9, rINST, lsr #8 @ r9<- A+ 6264 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6265 and r9, r9, #15 @ r9<- A 6266 fldd d1, [r3] @ d1<- vB 6267 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6268 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6269 fldd d0, [r9] @ d0<- vA 6270 6271 faddd d2, d0, d1 @ d2<- op 6272 GET_INST_OPCODE(ip) @ extract opcode from rINST 6273 fstd d2, [r9] @ vAA<- d2 6274 GOTO_OPCODE(ip) @ jump to next instruction 6275 6276 6277/* ------------------------------ */ 6278 .balign 64 6279.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6280/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6281/* File: arm-vfp/fbinopWide2addr.S */ 6282 /* 6283 * Generic 64-bit floating point "/2addr" binary operation. Provide 6284 * an "instr" line that specifies an instruction that performs 6285 * "d2 = d0 op d1". 6286 * 6287 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6288 * div-double/2addr 6289 */ 6290 /* binop/2addr vA, vB */ 6291 mov r3, rINST, lsr #12 @ r3<- B 6292 mov r9, rINST, lsr #8 @ r9<- A+ 6293 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6294 and r9, r9, #15 @ r9<- A 6295 fldd d1, [r3] @ d1<- vB 6296 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6297 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6298 fldd d0, [r9] @ d0<- vA 6299 6300 fsubd d2, d0, d1 @ d2<- op 6301 GET_INST_OPCODE(ip) @ extract opcode from rINST 6302 fstd d2, [r9] @ vAA<- d2 6303 GOTO_OPCODE(ip) @ jump to next instruction 6304 6305 6306/* ------------------------------ */ 6307 .balign 64 6308.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6309/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6310/* File: arm-vfp/fbinopWide2addr.S */ 6311 /* 6312 * Generic 64-bit floating point "/2addr" binary operation. Provide 6313 * an "instr" line that specifies an instruction that performs 6314 * "d2 = d0 op d1". 6315 * 6316 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6317 * div-double/2addr 6318 */ 6319 /* binop/2addr vA, vB */ 6320 mov r3, rINST, lsr #12 @ r3<- B 6321 mov r9, rINST, lsr #8 @ r9<- A+ 6322 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6323 and r9, r9, #15 @ r9<- A 6324 fldd d1, [r3] @ d1<- vB 6325 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6326 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6327 fldd d0, [r9] @ d0<- vA 6328 6329 fmuld d2, d0, d1 @ d2<- op 6330 GET_INST_OPCODE(ip) @ extract opcode from rINST 6331 fstd d2, [r9] @ vAA<- d2 6332 GOTO_OPCODE(ip) @ jump to next instruction 6333 6334 6335/* ------------------------------ */ 6336 .balign 64 6337.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6338/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6339/* File: arm-vfp/fbinopWide2addr.S */ 6340 /* 6341 * Generic 64-bit floating point "/2addr" binary operation. Provide 6342 * an "instr" line that specifies an instruction that performs 6343 * "d2 = d0 op d1". 6344 * 6345 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6346 * div-double/2addr 6347 */ 6348 /* binop/2addr vA, vB */ 6349 mov r3, rINST, lsr #12 @ r3<- B 6350 mov r9, rINST, lsr #8 @ r9<- A+ 6351 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6352 and r9, r9, #15 @ r9<- A 6353 fldd d1, [r3] @ d1<- vB 6354 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6355 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6356 fldd d0, [r9] @ d0<- vA 6357 6358 fdivd d2, d0, d1 @ d2<- op 6359 GET_INST_OPCODE(ip) @ extract opcode from rINST 6360 fstd d2, [r9] @ vAA<- d2 6361 GOTO_OPCODE(ip) @ jump to next instruction 6362 6363 6364/* ------------------------------ */ 6365 .balign 64 6366.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6367/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6368/* EABI doesn't define a double remainder function, but libm does */ 6369/* File: armv5te/binopWide2addr.S */ 6370 /* 6371 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6372 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6373 * This could be an ARM instruction or a function call. (If the result 6374 * comes back in a register other than r0, you can override "result".) 6375 * 6376 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6377 * vCC (r1). Useful for integer division and modulus. 6378 * 6379 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6380 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6381 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6382 * rem-double/2addr 6383 */ 6384 /* binop/2addr vA, vB */ 6385 mov r9, rINST, lsr #8 @ r9<- A+ 6386 mov r1, rINST, lsr #12 @ r1<- B 6387 and r9, r9, #15 6388 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6389 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6390 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6391 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6392 .if 0 6393 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6394 beq common_errDivideByZero 6395 .endif 6396 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6397 6398 @ optional op; may set condition codes 6399 bl fmod @ result<- op, r0-r3 changed 6400 GET_INST_OPCODE(ip) @ extract opcode from rINST 6401 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6402 GOTO_OPCODE(ip) @ jump to next instruction 6403 /* 12-15 instructions */ 6404 6405 6406/* ------------------------------ */ 6407 .balign 64 6408.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6409/* File: armv5te/OP_ADD_INT_LIT16.S */ 6410/* File: armv5te/binopLit16.S */ 6411 /* 6412 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6413 * that specifies an instruction that performs "result = r0 op r1". 6414 * This could be an ARM instruction or a function call. (If the result 6415 * comes back in a register other than r0, you can override "result".) 6416 * 6417 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6418 * vCC (r1). Useful for integer division and modulus. 6419 * 6420 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6421 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6422 */ 6423 /* binop/lit16 vA, vB, #+CCCC */ 6424 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6425 mov r2, rINST, lsr #12 @ r2<- B 6426 mov r9, rINST, lsr #8 @ r9<- A+ 6427 GET_VREG(r0, r2) @ r0<- vB 6428 and r9, r9, #15 6429 .if 0 6430 cmp r1, #0 @ is second operand zero? 6431 beq common_errDivideByZero 6432 .endif 6433 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6434 6435 add r0, r0, r1 @ r0<- op, r0-r3 changed 6436 GET_INST_OPCODE(ip) @ extract opcode from rINST 6437 SET_VREG(r0, r9) @ vAA<- r0 6438 GOTO_OPCODE(ip) @ jump to next instruction 6439 /* 10-13 instructions */ 6440 6441 6442/* ------------------------------ */ 6443 .balign 64 6444.L_OP_RSUB_INT: /* 0xd1 */ 6445/* File: armv5te/OP_RSUB_INT.S */ 6446/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6447/* File: armv5te/binopLit16.S */ 6448 /* 6449 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6450 * that specifies an instruction that performs "result = r0 op r1". 6451 * This could be an ARM instruction or a function call. (If the result 6452 * comes back in a register other than r0, you can override "result".) 6453 * 6454 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6455 * vCC (r1). Useful for integer division and modulus. 6456 * 6457 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6458 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6459 */ 6460 /* binop/lit16 vA, vB, #+CCCC */ 6461 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6462 mov r2, rINST, lsr #12 @ r2<- B 6463 mov r9, rINST, lsr #8 @ r9<- A+ 6464 GET_VREG(r0, r2) @ r0<- vB 6465 and r9, r9, #15 6466 .if 0 6467 cmp r1, #0 @ is second operand zero? 6468 beq common_errDivideByZero 6469 .endif 6470 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6471 6472 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6473 GET_INST_OPCODE(ip) @ extract opcode from rINST 6474 SET_VREG(r0, r9) @ vAA<- r0 6475 GOTO_OPCODE(ip) @ jump to next instruction 6476 /* 10-13 instructions */ 6477 6478 6479/* ------------------------------ */ 6480 .balign 64 6481.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6482/* File: armv5te/OP_MUL_INT_LIT16.S */ 6483/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6484/* File: armv5te/binopLit16.S */ 6485 /* 6486 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6487 * that specifies an instruction that performs "result = r0 op r1". 6488 * This could be an ARM instruction or a function call. (If the result 6489 * comes back in a register other than r0, you can override "result".) 6490 * 6491 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6492 * vCC (r1). Useful for integer division and modulus. 6493 * 6494 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6495 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6496 */ 6497 /* binop/lit16 vA, vB, #+CCCC */ 6498 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6499 mov r2, rINST, lsr #12 @ r2<- B 6500 mov r9, rINST, lsr #8 @ r9<- A+ 6501 GET_VREG(r0, r2) @ r0<- vB 6502 and r9, r9, #15 6503 .if 0 6504 cmp r1, #0 @ is second operand zero? 6505 beq common_errDivideByZero 6506 .endif 6507 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6508 6509 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6510 GET_INST_OPCODE(ip) @ extract opcode from rINST 6511 SET_VREG(r0, r9) @ vAA<- r0 6512 GOTO_OPCODE(ip) @ jump to next instruction 6513 /* 10-13 instructions */ 6514 6515 6516/* ------------------------------ */ 6517 .balign 64 6518.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6519/* File: armv5te/OP_DIV_INT_LIT16.S */ 6520/* File: armv5te/binopLit16.S */ 6521 /* 6522 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6523 * that specifies an instruction that performs "result = r0 op r1". 6524 * This could be an ARM instruction or a function call. (If the result 6525 * comes back in a register other than r0, you can override "result".) 6526 * 6527 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6528 * vCC (r1). Useful for integer division and modulus. 6529 * 6530 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6531 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6532 */ 6533 /* binop/lit16 vA, vB, #+CCCC */ 6534 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6535 mov r2, rINST, lsr #12 @ r2<- B 6536 mov r9, rINST, lsr #8 @ r9<- A+ 6537 GET_VREG(r0, r2) @ r0<- vB 6538 and r9, r9, #15 6539 .if 1 6540 cmp r1, #0 @ is second operand zero? 6541 beq common_errDivideByZero 6542 .endif 6543 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6544 6545 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6546 GET_INST_OPCODE(ip) @ extract opcode from rINST 6547 SET_VREG(r0, r9) @ vAA<- r0 6548 GOTO_OPCODE(ip) @ jump to next instruction 6549 /* 10-13 instructions */ 6550 6551 6552/* ------------------------------ */ 6553 .balign 64 6554.L_OP_REM_INT_LIT16: /* 0xd4 */ 6555/* File: armv5te/OP_REM_INT_LIT16.S */ 6556/* idivmod returns quotient in r0 and remainder in r1 */ 6557/* File: armv5te/binopLit16.S */ 6558 /* 6559 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6560 * that specifies an instruction that performs "result = r0 op r1". 6561 * This could be an ARM instruction or a function call. (If the result 6562 * comes back in a register other than r0, you can override "result".) 6563 * 6564 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6565 * vCC (r1). Useful for integer division and modulus. 6566 * 6567 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6568 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6569 */ 6570 /* binop/lit16 vA, vB, #+CCCC */ 6571 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6572 mov r2, rINST, lsr #12 @ r2<- B 6573 mov r9, rINST, lsr #8 @ r9<- A+ 6574 GET_VREG(r0, r2) @ r0<- vB 6575 and r9, r9, #15 6576 .if 1 6577 cmp r1, #0 @ is second operand zero? 6578 beq common_errDivideByZero 6579 .endif 6580 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6581 6582 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6583 GET_INST_OPCODE(ip) @ extract opcode from rINST 6584 SET_VREG(r1, r9) @ vAA<- r1 6585 GOTO_OPCODE(ip) @ jump to next instruction 6586 /* 10-13 instructions */ 6587 6588 6589/* ------------------------------ */ 6590 .balign 64 6591.L_OP_AND_INT_LIT16: /* 0xd5 */ 6592/* File: armv5te/OP_AND_INT_LIT16.S */ 6593/* File: armv5te/binopLit16.S */ 6594 /* 6595 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6596 * that specifies an instruction that performs "result = r0 op r1". 6597 * This could be an ARM instruction or a function call. (If the result 6598 * comes back in a register other than r0, you can override "result".) 6599 * 6600 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6601 * vCC (r1). Useful for integer division and modulus. 6602 * 6603 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6604 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6605 */ 6606 /* binop/lit16 vA, vB, #+CCCC */ 6607 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6608 mov r2, rINST, lsr #12 @ r2<- B 6609 mov r9, rINST, lsr #8 @ r9<- A+ 6610 GET_VREG(r0, r2) @ r0<- vB 6611 and r9, r9, #15 6612 .if 0 6613 cmp r1, #0 @ is second operand zero? 6614 beq common_errDivideByZero 6615 .endif 6616 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6617 6618 and r0, r0, r1 @ r0<- op, r0-r3 changed 6619 GET_INST_OPCODE(ip) @ extract opcode from rINST 6620 SET_VREG(r0, r9) @ vAA<- r0 6621 GOTO_OPCODE(ip) @ jump to next instruction 6622 /* 10-13 instructions */ 6623 6624 6625/* ------------------------------ */ 6626 .balign 64 6627.L_OP_OR_INT_LIT16: /* 0xd6 */ 6628/* File: armv5te/OP_OR_INT_LIT16.S */ 6629/* File: armv5te/binopLit16.S */ 6630 /* 6631 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6632 * that specifies an instruction that performs "result = r0 op r1". 6633 * This could be an ARM instruction or a function call. (If the result 6634 * comes back in a register other than r0, you can override "result".) 6635 * 6636 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6637 * vCC (r1). Useful for integer division and modulus. 6638 * 6639 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6640 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6641 */ 6642 /* binop/lit16 vA, vB, #+CCCC */ 6643 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6644 mov r2, rINST, lsr #12 @ r2<- B 6645 mov r9, rINST, lsr #8 @ r9<- A+ 6646 GET_VREG(r0, r2) @ r0<- vB 6647 and r9, r9, #15 6648 .if 0 6649 cmp r1, #0 @ is second operand zero? 6650 beq common_errDivideByZero 6651 .endif 6652 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6653 6654 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6655 GET_INST_OPCODE(ip) @ extract opcode from rINST 6656 SET_VREG(r0, r9) @ vAA<- r0 6657 GOTO_OPCODE(ip) @ jump to next instruction 6658 /* 10-13 instructions */ 6659 6660 6661/* ------------------------------ */ 6662 .balign 64 6663.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6664/* File: armv5te/OP_XOR_INT_LIT16.S */ 6665/* File: armv5te/binopLit16.S */ 6666 /* 6667 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6668 * that specifies an instruction that performs "result = r0 op r1". 6669 * This could be an ARM instruction or a function call. (If the result 6670 * comes back in a register other than r0, you can override "result".) 6671 * 6672 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6673 * vCC (r1). Useful for integer division and modulus. 6674 * 6675 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6676 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6677 */ 6678 /* binop/lit16 vA, vB, #+CCCC */ 6679 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6680 mov r2, rINST, lsr #12 @ r2<- B 6681 mov r9, rINST, lsr #8 @ r9<- A+ 6682 GET_VREG(r0, r2) @ r0<- vB 6683 and r9, r9, #15 6684 .if 0 6685 cmp r1, #0 @ is second operand zero? 6686 beq common_errDivideByZero 6687 .endif 6688 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6689 6690 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6691 GET_INST_OPCODE(ip) @ extract opcode from rINST 6692 SET_VREG(r0, r9) @ vAA<- r0 6693 GOTO_OPCODE(ip) @ jump to next instruction 6694 /* 10-13 instructions */ 6695 6696 6697/* ------------------------------ */ 6698 .balign 64 6699.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6700/* File: armv5te/OP_ADD_INT_LIT8.S */ 6701/* File: armv5te/binopLit8.S */ 6702 /* 6703 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6704 * that specifies an instruction that performs "result = r0 op r1". 6705 * This could be an ARM instruction or a function call. (If the result 6706 * comes back in a register other than r0, you can override "result".) 6707 * 6708 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6709 * vCC (r1). Useful for integer division and modulus. 6710 * 6711 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6712 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6713 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6714 */ 6715 /* binop/lit8 vAA, vBB, #+CC */ 6716 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6717 mov r9, rINST, lsr #8 @ r9<- AA 6718 and r2, r3, #255 @ r2<- BB 6719 GET_VREG(r0, r2) @ r0<- vBB 6720 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6721 .if 0 6722 @cmp r1, #0 @ is second operand zero? 6723 beq common_errDivideByZero 6724 .endif 6725 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6726 6727 @ optional op; may set condition codes 6728 add r0, r0, r1 @ r0<- op, r0-r3 changed 6729 GET_INST_OPCODE(ip) @ extract opcode from rINST 6730 SET_VREG(r0, r9) @ vAA<- r0 6731 GOTO_OPCODE(ip) @ jump to next instruction 6732 /* 10-12 instructions */ 6733 6734 6735/* ------------------------------ */ 6736 .balign 64 6737.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6738/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6739/* File: armv5te/binopLit8.S */ 6740 /* 6741 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6742 * that specifies an instruction that performs "result = r0 op r1". 6743 * This could be an ARM instruction or a function call. (If the result 6744 * comes back in a register other than r0, you can override "result".) 6745 * 6746 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6747 * vCC (r1). Useful for integer division and modulus. 6748 * 6749 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6750 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6751 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6752 */ 6753 /* binop/lit8 vAA, vBB, #+CC */ 6754 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6755 mov r9, rINST, lsr #8 @ r9<- AA 6756 and r2, r3, #255 @ r2<- BB 6757 GET_VREG(r0, r2) @ r0<- vBB 6758 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6759 .if 0 6760 @cmp r1, #0 @ is second operand zero? 6761 beq common_errDivideByZero 6762 .endif 6763 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6764 6765 @ optional op; may set condition codes 6766 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6767 GET_INST_OPCODE(ip) @ extract opcode from rINST 6768 SET_VREG(r0, r9) @ vAA<- r0 6769 GOTO_OPCODE(ip) @ jump to next instruction 6770 /* 10-12 instructions */ 6771 6772 6773/* ------------------------------ */ 6774 .balign 64 6775.L_OP_MUL_INT_LIT8: /* 0xda */ 6776/* File: armv5te/OP_MUL_INT_LIT8.S */ 6777/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6778/* File: armv5te/binopLit8.S */ 6779 /* 6780 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6781 * that specifies an instruction that performs "result = r0 op r1". 6782 * This could be an ARM instruction or a function call. (If the result 6783 * comes back in a register other than r0, you can override "result".) 6784 * 6785 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6786 * vCC (r1). Useful for integer division and modulus. 6787 * 6788 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6789 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6790 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6791 */ 6792 /* binop/lit8 vAA, vBB, #+CC */ 6793 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6794 mov r9, rINST, lsr #8 @ r9<- AA 6795 and r2, r3, #255 @ r2<- BB 6796 GET_VREG(r0, r2) @ r0<- vBB 6797 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6798 .if 0 6799 @cmp r1, #0 @ is second operand zero? 6800 beq common_errDivideByZero 6801 .endif 6802 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6803 6804 @ optional op; may set condition codes 6805 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6806 GET_INST_OPCODE(ip) @ extract opcode from rINST 6807 SET_VREG(r0, r9) @ vAA<- r0 6808 GOTO_OPCODE(ip) @ jump to next instruction 6809 /* 10-12 instructions */ 6810 6811 6812/* ------------------------------ */ 6813 .balign 64 6814.L_OP_DIV_INT_LIT8: /* 0xdb */ 6815/* File: armv5te/OP_DIV_INT_LIT8.S */ 6816/* File: armv5te/binopLit8.S */ 6817 /* 6818 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6819 * that specifies an instruction that performs "result = r0 op r1". 6820 * This could be an ARM instruction or a function call. (If the result 6821 * comes back in a register other than r0, you can override "result".) 6822 * 6823 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6824 * vCC (r1). Useful for integer division and modulus. 6825 * 6826 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6827 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6828 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6829 */ 6830 /* binop/lit8 vAA, vBB, #+CC */ 6831 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6832 mov r9, rINST, lsr #8 @ r9<- AA 6833 and r2, r3, #255 @ r2<- BB 6834 GET_VREG(r0, r2) @ r0<- vBB 6835 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6836 .if 1 6837 @cmp r1, #0 @ is second operand zero? 6838 beq common_errDivideByZero 6839 .endif 6840 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6841 6842 @ optional op; may set condition codes 6843 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6844 GET_INST_OPCODE(ip) @ extract opcode from rINST 6845 SET_VREG(r0, r9) @ vAA<- r0 6846 GOTO_OPCODE(ip) @ jump to next instruction 6847 /* 10-12 instructions */ 6848 6849 6850/* ------------------------------ */ 6851 .balign 64 6852.L_OP_REM_INT_LIT8: /* 0xdc */ 6853/* File: armv5te/OP_REM_INT_LIT8.S */ 6854/* idivmod returns quotient in r0 and remainder in r1 */ 6855/* File: armv5te/binopLit8.S */ 6856 /* 6857 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6858 * that specifies an instruction that performs "result = r0 op r1". 6859 * This could be an ARM instruction or a function call. (If the result 6860 * comes back in a register other than r0, you can override "result".) 6861 * 6862 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6863 * vCC (r1). Useful for integer division and modulus. 6864 * 6865 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6866 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6867 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6868 */ 6869 /* binop/lit8 vAA, vBB, #+CC */ 6870 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6871 mov r9, rINST, lsr #8 @ r9<- AA 6872 and r2, r3, #255 @ r2<- BB 6873 GET_VREG(r0, r2) @ r0<- vBB 6874 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6875 .if 1 6876 @cmp r1, #0 @ is second operand zero? 6877 beq common_errDivideByZero 6878 .endif 6879 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6880 6881 @ optional op; may set condition codes 6882 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6883 GET_INST_OPCODE(ip) @ extract opcode from rINST 6884 SET_VREG(r1, r9) @ vAA<- r1 6885 GOTO_OPCODE(ip) @ jump to next instruction 6886 /* 10-12 instructions */ 6887 6888 6889/* ------------------------------ */ 6890 .balign 64 6891.L_OP_AND_INT_LIT8: /* 0xdd */ 6892/* File: armv5te/OP_AND_INT_LIT8.S */ 6893/* File: armv5te/binopLit8.S */ 6894 /* 6895 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6896 * that specifies an instruction that performs "result = r0 op r1". 6897 * This could be an ARM instruction or a function call. (If the result 6898 * comes back in a register other than r0, you can override "result".) 6899 * 6900 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6901 * vCC (r1). Useful for integer division and modulus. 6902 * 6903 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6904 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6905 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6906 */ 6907 /* binop/lit8 vAA, vBB, #+CC */ 6908 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6909 mov r9, rINST, lsr #8 @ r9<- AA 6910 and r2, r3, #255 @ r2<- BB 6911 GET_VREG(r0, r2) @ r0<- vBB 6912 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6913 .if 0 6914 @cmp r1, #0 @ is second operand zero? 6915 beq common_errDivideByZero 6916 .endif 6917 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6918 6919 @ optional op; may set condition codes 6920 and r0, r0, r1 @ r0<- op, r0-r3 changed 6921 GET_INST_OPCODE(ip) @ extract opcode from rINST 6922 SET_VREG(r0, r9) @ vAA<- r0 6923 GOTO_OPCODE(ip) @ jump to next instruction 6924 /* 10-12 instructions */ 6925 6926 6927/* ------------------------------ */ 6928 .balign 64 6929.L_OP_OR_INT_LIT8: /* 0xde */ 6930/* File: armv5te/OP_OR_INT_LIT8.S */ 6931/* File: armv5te/binopLit8.S */ 6932 /* 6933 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6934 * that specifies an instruction that performs "result = r0 op r1". 6935 * This could be an ARM instruction or a function call. (If the result 6936 * comes back in a register other than r0, you can override "result".) 6937 * 6938 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6939 * vCC (r1). Useful for integer division and modulus. 6940 * 6941 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6942 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6943 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6944 */ 6945 /* binop/lit8 vAA, vBB, #+CC */ 6946 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6947 mov r9, rINST, lsr #8 @ r9<- AA 6948 and r2, r3, #255 @ r2<- BB 6949 GET_VREG(r0, r2) @ r0<- vBB 6950 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6951 .if 0 6952 @cmp r1, #0 @ is second operand zero? 6953 beq common_errDivideByZero 6954 .endif 6955 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6956 6957 @ optional op; may set condition codes 6958 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6959 GET_INST_OPCODE(ip) @ extract opcode from rINST 6960 SET_VREG(r0, r9) @ vAA<- r0 6961 GOTO_OPCODE(ip) @ jump to next instruction 6962 /* 10-12 instructions */ 6963 6964 6965/* ------------------------------ */ 6966 .balign 64 6967.L_OP_XOR_INT_LIT8: /* 0xdf */ 6968/* File: armv5te/OP_XOR_INT_LIT8.S */ 6969/* File: armv5te/binopLit8.S */ 6970 /* 6971 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6972 * that specifies an instruction that performs "result = r0 op r1". 6973 * This could be an ARM instruction or a function call. (If the result 6974 * comes back in a register other than r0, you can override "result".) 6975 * 6976 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6977 * vCC (r1). Useful for integer division and modulus. 6978 * 6979 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6980 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6981 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6982 */ 6983 /* binop/lit8 vAA, vBB, #+CC */ 6984 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6985 mov r9, rINST, lsr #8 @ r9<- AA 6986 and r2, r3, #255 @ r2<- BB 6987 GET_VREG(r0, r2) @ r0<- vBB 6988 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6989 .if 0 6990 @cmp r1, #0 @ is second operand zero? 6991 beq common_errDivideByZero 6992 .endif 6993 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6994 6995 @ optional op; may set condition codes 6996 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6997 GET_INST_OPCODE(ip) @ extract opcode from rINST 6998 SET_VREG(r0, r9) @ vAA<- r0 6999 GOTO_OPCODE(ip) @ jump to next instruction 7000 /* 10-12 instructions */ 7001 7002 7003/* ------------------------------ */ 7004 .balign 64 7005.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7006/* File: armv5te/OP_SHL_INT_LIT8.S */ 7007/* File: armv5te/binopLit8.S */ 7008 /* 7009 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7010 * that specifies an instruction that performs "result = r0 op r1". 7011 * This could be an ARM instruction or a function call. (If the result 7012 * comes back in a register other than r0, you can override "result".) 7013 * 7014 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7015 * vCC (r1). Useful for integer division and modulus. 7016 * 7017 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7018 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7019 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7020 */ 7021 /* binop/lit8 vAA, vBB, #+CC */ 7022 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7023 mov r9, rINST, lsr #8 @ r9<- AA 7024 and r2, r3, #255 @ r2<- BB 7025 GET_VREG(r0, r2) @ r0<- vBB 7026 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7027 .if 0 7028 @cmp r1, #0 @ is second operand zero? 7029 beq common_errDivideByZero 7030 .endif 7031 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7032 7033 and r1, r1, #31 @ optional op; may set condition codes 7034 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7035 GET_INST_OPCODE(ip) @ extract opcode from rINST 7036 SET_VREG(r0, r9) @ vAA<- r0 7037 GOTO_OPCODE(ip) @ jump to next instruction 7038 /* 10-12 instructions */ 7039 7040 7041/* ------------------------------ */ 7042 .balign 64 7043.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7044/* File: armv5te/OP_SHR_INT_LIT8.S */ 7045/* File: armv5te/binopLit8.S */ 7046 /* 7047 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7048 * that specifies an instruction that performs "result = r0 op r1". 7049 * This could be an ARM instruction or a function call. (If the result 7050 * comes back in a register other than r0, you can override "result".) 7051 * 7052 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7053 * vCC (r1). Useful for integer division and modulus. 7054 * 7055 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7056 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7057 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7058 */ 7059 /* binop/lit8 vAA, vBB, #+CC */ 7060 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7061 mov r9, rINST, lsr #8 @ r9<- AA 7062 and r2, r3, #255 @ r2<- BB 7063 GET_VREG(r0, r2) @ r0<- vBB 7064 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7065 .if 0 7066 @cmp r1, #0 @ is second operand zero? 7067 beq common_errDivideByZero 7068 .endif 7069 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7070 7071 and r1, r1, #31 @ optional op; may set condition codes 7072 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7073 GET_INST_OPCODE(ip) @ extract opcode from rINST 7074 SET_VREG(r0, r9) @ vAA<- r0 7075 GOTO_OPCODE(ip) @ jump to next instruction 7076 /* 10-12 instructions */ 7077 7078 7079/* ------------------------------ */ 7080 .balign 64 7081.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7082/* File: armv5te/OP_USHR_INT_LIT8.S */ 7083/* File: armv5te/binopLit8.S */ 7084 /* 7085 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7086 * that specifies an instruction that performs "result = r0 op r1". 7087 * This could be an ARM instruction or a function call. (If the result 7088 * comes back in a register other than r0, you can override "result".) 7089 * 7090 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7091 * vCC (r1). Useful for integer division and modulus. 7092 * 7093 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7094 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7095 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7096 */ 7097 /* binop/lit8 vAA, vBB, #+CC */ 7098 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7099 mov r9, rINST, lsr #8 @ r9<- AA 7100 and r2, r3, #255 @ r2<- BB 7101 GET_VREG(r0, r2) @ r0<- vBB 7102 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7103 .if 0 7104 @cmp r1, #0 @ is second operand zero? 7105 beq common_errDivideByZero 7106 .endif 7107 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7108 7109 and r1, r1, #31 @ optional op; may set condition codes 7110 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7111 GET_INST_OPCODE(ip) @ extract opcode from rINST 7112 SET_VREG(r0, r9) @ vAA<- r0 7113 GOTO_OPCODE(ip) @ jump to next instruction 7114 /* 10-12 instructions */ 7115 7116 7117/* ------------------------------ */ 7118 .balign 64 7119.L_OP_IGET_VOLATILE: /* 0xe3 */ 7120/* File: armv5te/OP_IGET_VOLATILE.S */ 7121/* File: armv5te/OP_IGET.S */ 7122 /* 7123 * General 32-bit instance field get. 7124 * 7125 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7126 */ 7127 /* op vA, vB, field@CCCC */ 7128 mov r0, rINST, lsr #12 @ r0<- B 7129 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7130 FETCH(r1, 1) @ r1<- field ref CCCC 7131 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7132 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7133 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7134 cmp r0, #0 @ is resolved entry null? 7135 bne .LOP_IGET_VOLATILE_finish @ no, already resolved 71368: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7137 EXPORT_PC() @ resolve() could throw 7138 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7139 bl dvmResolveInstField @ r0<- resolved InstField ptr 7140 cmp r0, #0 7141 bne .LOP_IGET_VOLATILE_finish 7142 b common_exceptionThrown 7143 7144 7145/* ------------------------------ */ 7146 .balign 64 7147.L_OP_IPUT_VOLATILE: /* 0xe4 */ 7148/* File: armv5te/OP_IPUT_VOLATILE.S */ 7149/* File: armv5te/OP_IPUT.S */ 7150 /* 7151 * General 32-bit instance field put. 7152 * 7153 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 7154 */ 7155 /* op vA, vB, field@CCCC */ 7156 mov r0, rINST, lsr #12 @ r0<- B 7157 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7158 FETCH(r1, 1) @ r1<- field ref CCCC 7159 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7160 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7161 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7162 cmp r0, #0 @ is resolved entry null? 7163 bne .LOP_IPUT_VOLATILE_finish @ no, already resolved 71648: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7165 EXPORT_PC() @ resolve() could throw 7166 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7167 bl dvmResolveInstField @ r0<- resolved InstField ptr 7168 cmp r0, #0 @ success? 7169 bne .LOP_IPUT_VOLATILE_finish @ yes, finish up 7170 b common_exceptionThrown 7171 7172 7173/* ------------------------------ */ 7174 .balign 64 7175.L_OP_SGET_VOLATILE: /* 0xe5 */ 7176/* File: armv5te/OP_SGET_VOLATILE.S */ 7177/* File: armv5te/OP_SGET.S */ 7178 /* 7179 * General 32-bit SGET handler. 7180 * 7181 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7182 */ 7183 /* op vAA, field@BBBB */ 7184 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7185 FETCH(r1, 1) @ r1<- field ref BBBB 7186 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7187 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7188 cmp r0, #0 @ is resolved entry null? 7189 beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve 7190.LOP_SGET_VOLATILE_finish: @ field ptr in r0 7191 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7192 SMP_DMB @ acquiring load 7193 mov r2, rINST, lsr #8 @ r2<- AA 7194 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7195 SET_VREG(r1, r2) @ fp[AA]<- r1 7196 GET_INST_OPCODE(ip) @ extract opcode from rINST 7197 GOTO_OPCODE(ip) @ jump to next instruction 7198 7199 7200/* ------------------------------ */ 7201 .balign 64 7202.L_OP_SPUT_VOLATILE: /* 0xe6 */ 7203/* File: armv5te/OP_SPUT_VOLATILE.S */ 7204/* File: armv5te/OP_SPUT.S */ 7205 /* 7206 * General 32-bit SPUT handler. 7207 * 7208 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 7209 */ 7210 /* op vAA, field@BBBB */ 7211 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7212 FETCH(r1, 1) @ r1<- field ref BBBB 7213 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7214 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7215 cmp r0, #0 @ is resolved entry null? 7216 beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve 7217.LOP_SPUT_VOLATILE_finish: @ field ptr in r0 7218 mov r2, rINST, lsr #8 @ r2<- AA 7219 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7220 GET_VREG(r1, r2) @ r1<- fp[AA] 7221 GET_INST_OPCODE(ip) @ extract opcode from rINST 7222 SMP_DMB @ releasing store 7223 str r1, [r0, #offStaticField_value] @ field<- vAA 7224 GOTO_OPCODE(ip) @ jump to next instruction 7225 7226 7227/* ------------------------------ */ 7228 .balign 64 7229.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */ 7230/* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */ 7231/* File: armv5te/OP_IGET.S */ 7232 /* 7233 * General 32-bit instance field get. 7234 * 7235 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7236 */ 7237 /* op vA, vB, field@CCCC */ 7238 mov r0, rINST, lsr #12 @ r0<- B 7239 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7240 FETCH(r1, 1) @ r1<- field ref CCCC 7241 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7242 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7243 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7244 cmp r0, #0 @ is resolved entry null? 7245 bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved 72468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7247 EXPORT_PC() @ resolve() could throw 7248 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7249 bl dvmResolveInstField @ r0<- resolved InstField ptr 7250 cmp r0, #0 7251 bne .LOP_IGET_OBJECT_VOLATILE_finish 7252 b common_exceptionThrown 7253 7254 7255/* ------------------------------ */ 7256 .balign 64 7257.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7258/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7259/* File: armv5te/OP_IGET_WIDE.S */ 7260 /* 7261 * Wide 32-bit instance field get. 7262 */ 7263 /* iget-wide vA, vB, field@CCCC */ 7264 mov r0, rINST, lsr #12 @ r0<- B 7265 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7266 FETCH(r1, 1) @ r1<- field ref CCCC 7267 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7268 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7269 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7270 cmp r0, #0 @ is resolved entry null? 7271 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 72728: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7273 EXPORT_PC() @ resolve() could throw 7274 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7275 bl dvmResolveInstField @ r0<- resolved InstField ptr 7276 cmp r0, #0 7277 bne .LOP_IGET_WIDE_VOLATILE_finish 7278 b common_exceptionThrown 7279 7280 7281/* ------------------------------ */ 7282 .balign 64 7283.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7284/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7285/* File: armv5te/OP_IPUT_WIDE.S */ 7286 /* iput-wide vA, vB, field@CCCC */ 7287 mov r0, rINST, lsr #12 @ r0<- B 7288 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7289 FETCH(r1, 1) @ r1<- field ref CCCC 7290 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7291 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7292 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7293 cmp r0, #0 @ is resolved entry null? 7294 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 72958: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7296 EXPORT_PC() @ resolve() could throw 7297 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7298 bl dvmResolveInstField @ r0<- resolved InstField ptr 7299 cmp r0, #0 @ success? 7300 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7301 b common_exceptionThrown 7302 7303 7304/* ------------------------------ */ 7305 .balign 64 7306.L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7307/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7308/* File: armv5te/OP_SGET_WIDE.S */ 7309 /* 7310 * 64-bit SGET handler. 7311 */ 7312 /* sget-wide vAA, field@BBBB */ 7313 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7314 FETCH(r1, 1) @ r1<- field ref BBBB 7315 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7316 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7317 cmp r0, #0 @ is resolved entry null? 7318 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7319.LOP_SGET_WIDE_VOLATILE_finish: 7320 mov r9, rINST, lsr #8 @ r9<- AA 7321 .if 1 7322 add r0, r0, #offStaticField_value @ r0<- pointer to data 7323 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 7324 .else 7325 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7326 .endif 7327 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7328 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7329 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7330 GET_INST_OPCODE(ip) @ extract opcode from rINST 7331 GOTO_OPCODE(ip) @ jump to next instruction 7332 7333 7334/* ------------------------------ */ 7335 .balign 64 7336.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7337/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7338/* File: armv5te/OP_SPUT_WIDE.S */ 7339 /* 7340 * 64-bit SPUT handler. 7341 */ 7342 /* sput-wide vAA, field@BBBB */ 7343 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7344 FETCH(r1, 1) @ r1<- field ref BBBB 7345 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7346 mov r9, rINST, lsr #8 @ r9<- AA 7347 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7348 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7349 cmp r2, #0 @ is resolved entry null? 7350 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7351.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7352 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7353 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7354 GET_INST_OPCODE(r10) @ extract opcode from rINST 7355 .if 1 7356 add r2, r2, #offStaticField_value @ r2<- pointer to data 7357 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 7358 .else 7359 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7360 .endif 7361 GOTO_OPCODE(r10) @ jump to next instruction 7362 7363 7364/* ------------------------------ */ 7365 .balign 64 7366.L_OP_BREAKPOINT: /* 0xec */ 7367/* File: armv5te/OP_BREAKPOINT.S */ 7368/* File: armv5te/unused.S */ 7369 bl common_abort 7370 7371 7372/* ------------------------------ */ 7373 .balign 64 7374.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7375/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7376 /* 7377 * Handle a throw-verification-error instruction. This throws an 7378 * exception for an error discovered during verification. The 7379 * exception is indicated by AA, with some detail provided by BBBB. 7380 */ 7381 /* op AA, ref@BBBB */ 7382 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7383 FETCH(r2, 1) @ r2<- BBBB 7384 EXPORT_PC() @ export the PC 7385 mov r1, rINST, lsr #8 @ r1<- AA 7386 bl dvmThrowVerificationError @ always throws 7387 b common_exceptionThrown @ handle exception 7388 7389/* ------------------------------ */ 7390 .balign 64 7391.L_OP_EXECUTE_INLINE: /* 0xee */ 7392/* File: armv5te/OP_EXECUTE_INLINE.S */ 7393 /* 7394 * Execute a "native inline" instruction. 7395 * 7396 * We need to call an InlineOp4Func: 7397 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7398 * 7399 * The first four args are in r0-r3, pointer to return value storage 7400 * is on the stack. The function's return value is a flag that tells 7401 * us if an exception was thrown. 7402 */ 7403 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7404 FETCH(r10, 1) @ r10<- BBBB 7405 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7406 EXPORT_PC() @ can throw 7407 sub sp, sp, #8 @ make room for arg, +64 bit align 7408 mov r0, rINST, lsr #12 @ r0<- B 7409 str r1, [sp] @ push &glue->retval 7410 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7411 add sp, sp, #8 @ pop stack 7412 cmp r0, #0 @ test boolean result of inline 7413 beq common_exceptionThrown @ returned false, handle exception 7414 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7415 GET_INST_OPCODE(ip) @ extract opcode from rINST 7416 GOTO_OPCODE(ip) @ jump to next instruction 7417 7418/* ------------------------------ */ 7419 .balign 64 7420.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7421/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7422 /* 7423 * Execute a "native inline" instruction, using "/range" semantics. 7424 * Same idea as execute-inline, but we get the args differently. 7425 * 7426 * We need to call an InlineOp4Func: 7427 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7428 * 7429 * The first four args are in r0-r3, pointer to return value storage 7430 * is on the stack. The function's return value is a flag that tells 7431 * us if an exception was thrown. 7432 */ 7433 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7434 FETCH(r10, 1) @ r10<- BBBB 7435 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7436 EXPORT_PC() @ can throw 7437 sub sp, sp, #8 @ make room for arg, +64 bit align 7438 mov r0, rINST, lsr #8 @ r0<- AA 7439 str r1, [sp] @ push &glue->retval 7440 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7441 add sp, sp, #8 @ pop stack 7442 cmp r0, #0 @ test boolean result of inline 7443 beq common_exceptionThrown @ returned false, handle exception 7444 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7445 GET_INST_OPCODE(ip) @ extract opcode from rINST 7446 GOTO_OPCODE(ip) @ jump to next instruction 7447 7448/* ------------------------------ */ 7449 .balign 64 7450.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7451/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7452 /* 7453 * invoke-direct-empty is a no-op in a "standard" interpreter. 7454 */ 7455 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7456 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7457 GOTO_OPCODE(ip) @ execute it 7458 7459/* ------------------------------ */ 7460 .balign 64 7461.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */ 7462/* File: armv5te/OP_RETURN_VOID_BARRIER.S */ 7463 SMP_DMB_ST 7464 b common_returnFromMethod 7465 7466/* ------------------------------ */ 7467 .balign 64 7468.L_OP_IGET_QUICK: /* 0xf2 */ 7469/* File: armv5te/OP_IGET_QUICK.S */ 7470 /* For: iget-quick, iget-object-quick */ 7471 /* op vA, vB, offset@CCCC */ 7472 mov r2, rINST, lsr #12 @ r2<- B 7473 GET_VREG(r3, r2) @ r3<- object we're operating on 7474 FETCH(r1, 1) @ r1<- field byte offset 7475 cmp r3, #0 @ check object for null 7476 mov r2, rINST, lsr #8 @ r2<- A(+) 7477 beq common_errNullObject @ object was null 7478 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7479 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7480 and r2, r2, #15 7481 GET_INST_OPCODE(ip) @ extract opcode from rINST 7482 SET_VREG(r0, r2) @ fp[A]<- r0 7483 GOTO_OPCODE(ip) @ jump to next instruction 7484 7485/* ------------------------------ */ 7486 .balign 64 7487.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7488/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7489 /* iget-wide-quick vA, vB, offset@CCCC */ 7490 mov r2, rINST, lsr #12 @ r2<- B 7491 GET_VREG(r3, r2) @ r3<- object we're operating on 7492 FETCH(ip, 1) @ ip<- field byte offset 7493 cmp r3, #0 @ check object for null 7494 mov r2, rINST, lsr #8 @ r2<- A(+) 7495 beq common_errNullObject @ object was null 7496 ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) 7497 and r2, r2, #15 7498 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7499 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7500 GET_INST_OPCODE(ip) @ extract opcode from rINST 7501 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7502 GOTO_OPCODE(ip) @ jump to next instruction 7503 7504/* ------------------------------ */ 7505 .balign 64 7506.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7507/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7508/* File: armv5te/OP_IGET_QUICK.S */ 7509 /* For: iget-quick, iget-object-quick */ 7510 /* op vA, vB, offset@CCCC */ 7511 mov r2, rINST, lsr #12 @ r2<- B 7512 GET_VREG(r3, r2) @ r3<- object we're operating on 7513 FETCH(r1, 1) @ r1<- field byte offset 7514 cmp r3, #0 @ check object for null 7515 mov r2, rINST, lsr #8 @ r2<- A(+) 7516 beq common_errNullObject @ object was null 7517 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7518 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7519 and r2, r2, #15 7520 GET_INST_OPCODE(ip) @ extract opcode from rINST 7521 SET_VREG(r0, r2) @ fp[A]<- r0 7522 GOTO_OPCODE(ip) @ jump to next instruction 7523 7524 7525/* ------------------------------ */ 7526 .balign 64 7527.L_OP_IPUT_QUICK: /* 0xf5 */ 7528/* File: armv5te/OP_IPUT_QUICK.S */ 7529 /* For: iput-quick */ 7530 /* op vA, vB, offset@CCCC */ 7531 mov r2, rINST, lsr #12 @ r2<- B 7532 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7533 FETCH(r1, 1) @ r1<- field byte offset 7534 cmp r3, #0 @ check object for null 7535 mov r2, rINST, lsr #8 @ r2<- A(+) 7536 beq common_errNullObject @ object was null 7537 and r2, r2, #15 7538 GET_VREG(r0, r2) @ r0<- fp[A] 7539 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7540 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7541 GET_INST_OPCODE(ip) @ extract opcode from rINST 7542 GOTO_OPCODE(ip) @ jump to next instruction 7543 7544/* ------------------------------ */ 7545 .balign 64 7546.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7547/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7548 /* iput-wide-quick vA, vB, offset@CCCC */ 7549 mov r0, rINST, lsr #8 @ r0<- A(+) 7550 mov r1, rINST, lsr #12 @ r1<- B 7551 and r0, r0, #15 7552 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7553 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7554 cmp r2, #0 @ check object for null 7555 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7556 beq common_errNullObject @ object was null 7557 FETCH(r3, 1) @ r3<- field byte offset 7558 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7559 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7560 GET_INST_OPCODE(ip) @ extract opcode from rINST 7561 GOTO_OPCODE(ip) @ jump to next instruction 7562 7563/* ------------------------------ */ 7564 .balign 64 7565.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7566/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7567 /* For: iput-object-quick */ 7568 /* op vA, vB, offset@CCCC */ 7569 mov r2, rINST, lsr #12 @ r2<- B 7570 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7571 FETCH(r1, 1) @ r1<- field byte offset 7572 cmp r3, #0 @ check object for null 7573 mov r2, rINST, lsr #8 @ r2<- A(+) 7574 beq common_errNullObject @ object was null 7575 and r2, r2, #15 7576 GET_VREG(r0, r2) @ r0<- fp[A] 7577 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 7578 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7579 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7580 cmp r0, #0 7581 strneb r2, [r2, r3, lsr #GC_CARD_SHIFT] @ mark card on non-null store 7582 GET_INST_OPCODE(ip) @ extract opcode from rINST 7583 GOTO_OPCODE(ip) @ jump to next instruction 7584 7585/* ------------------------------ */ 7586 .balign 64 7587.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7588/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7589 /* 7590 * Handle an optimized virtual method call. 7591 * 7592 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7593 */ 7594 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7595 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7596 FETCH(r3, 2) @ r3<- FEDC or CCCC 7597 FETCH(r1, 1) @ r1<- BBBB 7598 .if (!0) 7599 and r3, r3, #15 @ r3<- C (or stays CCCC) 7600 .endif 7601 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7602 cmp r2, #0 @ is "this" null? 7603 beq common_errNullObject @ null "this", throw exception 7604 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7605 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7606 EXPORT_PC() @ invoke must export 7607 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7608 bl common_invokeMethodNoRange @ continue on 7609 7610/* ------------------------------ */ 7611 .balign 64 7612.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7613/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7614/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7615 /* 7616 * Handle an optimized virtual method call. 7617 * 7618 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7619 */ 7620 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7621 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7622 FETCH(r3, 2) @ r3<- FEDC or CCCC 7623 FETCH(r1, 1) @ r1<- BBBB 7624 .if (!1) 7625 and r3, r3, #15 @ r3<- C (or stays CCCC) 7626 .endif 7627 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7628 cmp r2, #0 @ is "this" null? 7629 beq common_errNullObject @ null "this", throw exception 7630 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7631 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7632 EXPORT_PC() @ invoke must export 7633 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7634 bl common_invokeMethodRange @ continue on 7635 7636 7637/* ------------------------------ */ 7638 .balign 64 7639.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7640/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7641 /* 7642 * Handle an optimized "super" method call. 7643 * 7644 * for: [opt] invoke-super-quick, invoke-super-quick/range 7645 */ 7646 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7647 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7648 FETCH(r10, 2) @ r10<- GFED or CCCC 7649 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7650 .if (!0) 7651 and r10, r10, #15 @ r10<- D (or stays CCCC) 7652 .endif 7653 FETCH(r1, 1) @ r1<- BBBB 7654 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7655 EXPORT_PC() @ must export for invoke 7656 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7657 GET_VREG(r3, r10) @ r3<- "this" 7658 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7659 cmp r3, #0 @ null "this" ref? 7660 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7661 beq common_errNullObject @ "this" is null, throw exception 7662 bl common_invokeMethodNoRange @ continue on 7663 7664/* ------------------------------ */ 7665 .balign 64 7666.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7667/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7668/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7669 /* 7670 * Handle an optimized "super" method call. 7671 * 7672 * for: [opt] invoke-super-quick, invoke-super-quick/range 7673 */ 7674 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7675 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7676 FETCH(r10, 2) @ r10<- GFED or CCCC 7677 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7678 .if (!1) 7679 and r10, r10, #15 @ r10<- D (or stays CCCC) 7680 .endif 7681 FETCH(r1, 1) @ r1<- BBBB 7682 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7683 EXPORT_PC() @ must export for invoke 7684 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7685 GET_VREG(r3, r10) @ r3<- "this" 7686 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7687 cmp r3, #0 @ null "this" ref? 7688 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7689 beq common_errNullObject @ "this" is null, throw exception 7690 bl common_invokeMethodRange @ continue on 7691 7692 7693/* ------------------------------ */ 7694 .balign 64 7695.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */ 7696/* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */ 7697/* File: armv5te/OP_IPUT_OBJECT.S */ 7698 /* 7699 * 32-bit instance field put. 7700 * 7701 * for: iput-object, iput-object-volatile 7702 */ 7703 /* op vA, vB, field@CCCC */ 7704 mov r0, rINST, lsr #12 @ r0<- B 7705 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7706 FETCH(r1, 1) @ r1<- field ref CCCC 7707 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7708 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7709 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7710 cmp r0, #0 @ is resolved entry null? 7711 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved 77128: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7713 EXPORT_PC() @ resolve() could throw 7714 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7715 bl dvmResolveInstField @ r0<- resolved InstField ptr 7716 cmp r0, #0 @ success? 7717 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up 7718 b common_exceptionThrown 7719 7720 7721/* ------------------------------ */ 7722 .balign 64 7723.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */ 7724/* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */ 7725/* File: armv5te/OP_SGET.S */ 7726 /* 7727 * General 32-bit SGET handler. 7728 * 7729 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7730 */ 7731 /* op vAA, field@BBBB */ 7732 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7733 FETCH(r1, 1) @ r1<- field ref BBBB 7734 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7735 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7736 cmp r0, #0 @ is resolved entry null? 7737 beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve 7738.LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0 7739 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7740 SMP_DMB @ acquiring load 7741 mov r2, rINST, lsr #8 @ r2<- AA 7742 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7743 SET_VREG(r1, r2) @ fp[AA]<- r1 7744 GET_INST_OPCODE(ip) @ extract opcode from rINST 7745 GOTO_OPCODE(ip) @ jump to next instruction 7746 7747 7748/* ------------------------------ */ 7749 .balign 64 7750.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */ 7751/* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */ 7752/* File: armv5te/OP_SPUT_OBJECT.S */ 7753 /* 7754 * 32-bit SPUT handler for objects 7755 * 7756 * for: sput-object, sput-object-volatile 7757 */ 7758 /* op vAA, field@BBBB */ 7759 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7760 FETCH(r1, 1) @ r1<- field ref BBBB 7761 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7762 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7763 cmp r0, #0 @ is resolved entry null? 7764 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ no, continue 7765 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7766 EXPORT_PC() @ resolve() could throw, so export now 7767 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7768 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 7769 cmp r0, #0 @ success? 7770 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ yes, finish 7771 b common_exceptionThrown @ no, handle exception 7772 7773 7774 7775/* ------------------------------ */ 7776 .balign 64 7777.L_OP_UNUSED_FF: /* 0xff */ 7778/* File: armv5te/OP_UNUSED_FF.S */ 7779/* File: armv5te/unused.S */ 7780 bl common_abort 7781 7782 7783 7784 .balign 64 7785 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7786 .global dvmAsmInstructionEnd 7787dvmAsmInstructionEnd: 7788 7789/* 7790 * =========================================================================== 7791 * Sister implementations 7792 * =========================================================================== 7793 */ 7794 .global dvmAsmSisterStart 7795 .type dvmAsmSisterStart, %function 7796 .text 7797 .balign 4 7798dvmAsmSisterStart: 7799 7800/* continuation for OP_CONST_STRING */ 7801 7802 /* 7803 * Continuation if the String has not yet been resolved. 7804 * r1: BBBB (String ref) 7805 * r9: target register 7806 */ 7807.LOP_CONST_STRING_resolve: 7808 EXPORT_PC() 7809 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7810 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7811 bl dvmResolveString @ r0<- String reference 7812 cmp r0, #0 @ failed? 7813 beq common_exceptionThrown @ yup, handle the exception 7814 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7815 GET_INST_OPCODE(ip) @ extract opcode from rINST 7816 SET_VREG(r0, r9) @ vAA<- r0 7817 GOTO_OPCODE(ip) @ jump to next instruction 7818 7819/* continuation for OP_CONST_STRING_JUMBO */ 7820 7821 /* 7822 * Continuation if the String has not yet been resolved. 7823 * r1: BBBBBBBB (String ref) 7824 * r9: target register 7825 */ 7826.LOP_CONST_STRING_JUMBO_resolve: 7827 EXPORT_PC() 7828 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7829 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7830 bl dvmResolveString @ r0<- String reference 7831 cmp r0, #0 @ failed? 7832 beq common_exceptionThrown @ yup, handle the exception 7833 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7834 GET_INST_OPCODE(ip) @ extract opcode from rINST 7835 SET_VREG(r0, r9) @ vAA<- r0 7836 GOTO_OPCODE(ip) @ jump to next instruction 7837 7838/* continuation for OP_CONST_CLASS */ 7839 7840 /* 7841 * Continuation if the Class has not yet been resolved. 7842 * r1: BBBB (Class ref) 7843 * r9: target register 7844 */ 7845.LOP_CONST_CLASS_resolve: 7846 EXPORT_PC() 7847 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7848 mov r2, #1 @ r2<- true 7849 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7850 bl dvmResolveClass @ r0<- Class reference 7851 cmp r0, #0 @ failed? 7852 beq common_exceptionThrown @ yup, handle the exception 7853 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7854 GET_INST_OPCODE(ip) @ extract opcode from rINST 7855 SET_VREG(r0, r9) @ vAA<- r0 7856 GOTO_OPCODE(ip) @ jump to next instruction 7857 7858/* continuation for OP_CHECK_CAST */ 7859 7860 /* 7861 * Trivial test failed, need to perform full check. This is common. 7862 * r0 holds obj->clazz 7863 * r1 holds class resolved from BBBB 7864 * r9 holds object 7865 */ 7866.LOP_CHECK_CAST_fullcheck: 7867 bl dvmInstanceofNonTrivial @ r0<- boolean result 7868 cmp r0, #0 @ failed? 7869 bne .LOP_CHECK_CAST_okay @ no, success 7870 7871 @ A cast has failed. We need to throw a ClassCastException with the 7872 @ class of the object that failed to be cast. 7873 EXPORT_PC() @ about to throw 7874 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7875 ldr r0, .LstrClassCastExceptionPtr 7876 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7877 bl dvmThrowExceptionWithClassMessage 7878 b common_exceptionThrown 7879 7880 /* 7881 * Resolution required. This is the least-likely path. 7882 * 7883 * r2 holds BBBB 7884 * r9 holds object 7885 */ 7886.LOP_CHECK_CAST_resolve: 7887 EXPORT_PC() @ resolve() could throw 7888 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7889 mov r1, r2 @ r1<- BBBB 7890 mov r2, #0 @ r2<- false 7891 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7892 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7893 cmp r0, #0 @ got null? 7894 beq common_exceptionThrown @ yes, handle exception 7895 mov r1, r0 @ r1<- class resolved from BBB 7896 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7897 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7898 7899.LstrClassCastExceptionPtr: 7900 .word .LstrClassCastException 7901 7902/* continuation for OP_INSTANCE_OF */ 7903 7904 /* 7905 * Trivial test failed, need to perform full check. This is common. 7906 * r0 holds obj->clazz 7907 * r1 holds class resolved from BBBB 7908 * r9 holds A 7909 */ 7910.LOP_INSTANCE_OF_fullcheck: 7911 bl dvmInstanceofNonTrivial @ r0<- boolean result 7912 @ fall through to OP_INSTANCE_OF_store 7913 7914 /* 7915 * r0 holds boolean result 7916 * r9 holds A 7917 */ 7918.LOP_INSTANCE_OF_store: 7919 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7920 SET_VREG(r0, r9) @ vA<- r0 7921 GET_INST_OPCODE(ip) @ extract opcode from rINST 7922 GOTO_OPCODE(ip) @ jump to next instruction 7923 7924 /* 7925 * Trivial test succeeded, save and bail. 7926 * r9 holds A 7927 */ 7928.LOP_INSTANCE_OF_trivial: 7929 mov r0, #1 @ indicate success 7930 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7931 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7932 SET_VREG(r0, r9) @ vA<- r0 7933 GET_INST_OPCODE(ip) @ extract opcode from rINST 7934 GOTO_OPCODE(ip) @ jump to next instruction 7935 7936 /* 7937 * Resolution required. This is the least-likely path. 7938 * 7939 * r3 holds BBBB 7940 * r9 holds A 7941 */ 7942.LOP_INSTANCE_OF_resolve: 7943 EXPORT_PC() @ resolve() could throw 7944 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7945 mov r1, r3 @ r1<- BBBB 7946 mov r2, #1 @ r2<- true 7947 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7948 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7949 cmp r0, #0 @ got null? 7950 beq common_exceptionThrown @ yes, handle exception 7951 mov r1, r0 @ r1<- class resolved from BBB 7952 mov r3, rINST, lsr #12 @ r3<- B 7953 GET_VREG(r0, r3) @ r0<- vB (object) 7954 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7955 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7956 7957/* continuation for OP_NEW_INSTANCE */ 7958 7959 .balign 32 @ minimize cache lines 7960.LOP_NEW_INSTANCE_finish: @ r0=new object 7961 mov r3, rINST, lsr #8 @ r3<- AA 7962 cmp r0, #0 @ failed? 7963 beq common_exceptionThrown @ yes, handle the exception 7964 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7965 GET_INST_OPCODE(ip) @ extract opcode from rINST 7966 SET_VREG(r0, r3) @ vAA<- r0 7967 GOTO_OPCODE(ip) @ jump to next instruction 7968 7969 /* 7970 * Class initialization required. 7971 * 7972 * r0 holds class object 7973 */ 7974.LOP_NEW_INSTANCE_needinit: 7975 mov r9, r0 @ save r0 7976 bl dvmInitClass @ initialize class 7977 cmp r0, #0 @ check boolean result 7978 mov r0, r9 @ restore r0 7979 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7980 b common_exceptionThrown @ failed, deal with init exception 7981 7982 /* 7983 * Resolution required. This is the least-likely path. 7984 * 7985 * r1 holds BBBB 7986 */ 7987.LOP_NEW_INSTANCE_resolve: 7988 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7989 mov r2, #0 @ r2<- false 7990 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7991 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7992 cmp r0, #0 @ got null? 7993 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7994 b common_exceptionThrown @ yes, handle exception 7995 7996.LstrInstantiationErrorPtr: 7997 .word .LstrInstantiationError 7998 7999/* continuation for OP_NEW_ARRAY */ 8000 8001 8002 /* 8003 * Resolve class. (This is an uncommon case.) 8004 * 8005 * r1 holds array length 8006 * r2 holds class ref CCCC 8007 */ 8008.LOP_NEW_ARRAY_resolve: 8009 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8010 mov r9, r1 @ r9<- length (save) 8011 mov r1, r2 @ r1<- CCCC 8012 mov r2, #0 @ r2<- false 8013 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8014 bl dvmResolveClass @ r0<- call(clazz, ref) 8015 cmp r0, #0 @ got null? 8016 mov r1, r9 @ r1<- length (restore) 8017 beq common_exceptionThrown @ yes, handle exception 8018 @ fall through to OP_NEW_ARRAY_finish 8019 8020 /* 8021 * Finish allocation. 8022 * 8023 * r0 holds class 8024 * r1 holds array length 8025 */ 8026.LOP_NEW_ARRAY_finish: 8027 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8028 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8029 cmp r0, #0 @ failed? 8030 mov r2, rINST, lsr #8 @ r2<- A+ 8031 beq common_exceptionThrown @ yes, handle the exception 8032 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8033 and r2, r2, #15 @ r2<- A 8034 GET_INST_OPCODE(ip) @ extract opcode from rINST 8035 SET_VREG(r0, r2) @ vA<- r0 8036 GOTO_OPCODE(ip) @ jump to next instruction 8037 8038/* continuation for OP_FILLED_NEW_ARRAY */ 8039 8040 /* 8041 * On entry: 8042 * r0 holds array class 8043 * r10 holds AA or BA 8044 */ 8045.LOP_FILLED_NEW_ARRAY_continue: 8046 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8047 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8048 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8049 .if 0 8050 mov r1, r10 @ r1<- AA (length) 8051 .else 8052 mov r1, r10, lsr #4 @ r1<- B (length) 8053 .endif 8054 cmp rINST, #'I' @ array of ints? 8055 cmpne rINST, #'L' @ array of objects? 8056 cmpne rINST, #'[' @ array of arrays? 8057 mov r9, r1 @ save length in r9 8058 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8059 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8060 cmp r0, #0 @ null return? 8061 beq common_exceptionThrown @ alloc failed, handle exception 8062 8063 FETCH(r1, 2) @ r1<- FEDC or CCCC 8064 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8065 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8066 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8067 subs r9, r9, #1 @ length--, check for neg 8068 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8069 bmi 2f @ was zero, bail 8070 8071 @ copy values from registers into the array 8072 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8073 .if 0 8074 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 80751: ldr r3, [r2], #4 @ r3<- *r2++ 8076 subs r9, r9, #1 @ count-- 8077 str r3, [r0], #4 @ *contents++ = vX 8078 bpl 1b 8079 @ continue at 2 8080 .else 8081 cmp r9, #4 @ length was initially 5? 8082 and r2, r10, #15 @ r2<- A 8083 bne 1f @ <= 4 args, branch 8084 GET_VREG(r3, r2) @ r3<- vA 8085 sub r9, r9, #1 @ count-- 8086 str r3, [r0, #16] @ contents[4] = vA 80871: and r2, r1, #15 @ r2<- F/E/D/C 8088 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8089 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8090 subs r9, r9, #1 @ count-- 8091 str r3, [r0], #4 @ *contents++ = vX 8092 bpl 1b 8093 @ continue at 2 8094 .endif 8095 80962: 8097 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8098 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8099 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8100 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8101 cmp r1, #'I' @ Is int array? 8102 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card if not 8103 GOTO_OPCODE(ip) @ execute it 8104 8105 /* 8106 * Throw an exception indicating that we have not implemented this 8107 * mode of filled-new-array. 8108 */ 8109.LOP_FILLED_NEW_ARRAY_notimpl: 8110 ldr r0, .L_strInternalError 8111 ldr r1, .L_strFilledNewArrayNotImpl 8112 bl dvmThrowException 8113 b common_exceptionThrown 8114 8115 .if (!0) @ define in one or the other, not both 8116.L_strFilledNewArrayNotImpl: 8117 .word .LstrFilledNewArrayNotImpl 8118.L_strInternalError: 8119 .word .LstrInternalError 8120 .endif 8121 8122/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8123 8124 /* 8125 * On entry: 8126 * r0 holds array class 8127 * r10 holds AA or BA 8128 */ 8129.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8130 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8131 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8132 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8133 .if 1 8134 mov r1, r10 @ r1<- AA (length) 8135 .else 8136 mov r1, r10, lsr #4 @ r1<- B (length) 8137 .endif 8138 cmp rINST, #'I' @ array of ints? 8139 cmpne rINST, #'L' @ array of objects? 8140 cmpne rINST, #'[' @ array of arrays? 8141 mov r9, r1 @ save length in r9 8142 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8143 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8144 cmp r0, #0 @ null return? 8145 beq common_exceptionThrown @ alloc failed, handle exception 8146 8147 FETCH(r1, 2) @ r1<- FEDC or CCCC 8148 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8149 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8150 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8151 subs r9, r9, #1 @ length--, check for neg 8152 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8153 bmi 2f @ was zero, bail 8154 8155 @ copy values from registers into the array 8156 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8157 .if 1 8158 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 81591: ldr r3, [r2], #4 @ r3<- *r2++ 8160 subs r9, r9, #1 @ count-- 8161 str r3, [r0], #4 @ *contents++ = vX 8162 bpl 1b 8163 @ continue at 2 8164 .else 8165 cmp r9, #4 @ length was initially 5? 8166 and r2, r10, #15 @ r2<- A 8167 bne 1f @ <= 4 args, branch 8168 GET_VREG(r3, r2) @ r3<- vA 8169 sub r9, r9, #1 @ count-- 8170 str r3, [r0, #16] @ contents[4] = vA 81711: and r2, r1, #15 @ r2<- F/E/D/C 8172 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8173 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8174 subs r9, r9, #1 @ count-- 8175 str r3, [r0], #4 @ *contents++ = vX 8176 bpl 1b 8177 @ continue at 2 8178 .endif 8179 81802: 8181 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8182 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8183 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8184 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8185 cmp r1, #'I' @ Is int array? 8186 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card if not 8187 GOTO_OPCODE(ip) @ execute it 8188 8189 /* 8190 * Throw an exception indicating that we have not implemented this 8191 * mode of filled-new-array. 8192 */ 8193.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8194 ldr r0, .L_strInternalError 8195 ldr r1, .L_strFilledNewArrayNotImpl 8196 bl dvmThrowException 8197 b common_exceptionThrown 8198 8199 .if (!1) @ define in one or the other, not both 8200.L_strFilledNewArrayNotImpl: 8201 .word .LstrFilledNewArrayNotImpl 8202.L_strInternalError: 8203 .word .LstrInternalError 8204 .endif 8205 8206/* continuation for OP_CMPL_FLOAT */ 8207.LOP_CMPL_FLOAT_finish: 8208 SET_VREG(r0, r9) @ vAA<- r0 8209 GOTO_OPCODE(ip) @ jump to next instruction 8210 8211/* continuation for OP_CMPG_FLOAT */ 8212.LOP_CMPG_FLOAT_finish: 8213 SET_VREG(r0, r9) @ vAA<- r0 8214 GOTO_OPCODE(ip) @ jump to next instruction 8215 8216/* continuation for OP_CMPL_DOUBLE */ 8217.LOP_CMPL_DOUBLE_finish: 8218 SET_VREG(r0, r9) @ vAA<- r0 8219 GOTO_OPCODE(ip) @ jump to next instruction 8220 8221/* continuation for OP_CMPG_DOUBLE */ 8222.LOP_CMPG_DOUBLE_finish: 8223 SET_VREG(r0, r9) @ vAA<- r0 8224 GOTO_OPCODE(ip) @ jump to next instruction 8225 8226/* continuation for OP_CMP_LONG */ 8227 8228.LOP_CMP_LONG_less: 8229 mvn r1, #0 @ r1<- -1 8230 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8231 @ instead, we just replicate the tail end. 8232 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8233 SET_VREG(r1, r9) @ vAA<- r1 8234 GET_INST_OPCODE(ip) @ extract opcode from rINST 8235 GOTO_OPCODE(ip) @ jump to next instruction 8236 8237.LOP_CMP_LONG_greater: 8238 mov r1, #1 @ r1<- 1 8239 @ fall through to _finish 8240 8241.LOP_CMP_LONG_finish: 8242 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8243 SET_VREG(r1, r9) @ vAA<- r1 8244 GET_INST_OPCODE(ip) @ extract opcode from rINST 8245 GOTO_OPCODE(ip) @ jump to next instruction 8246 8247/* continuation for OP_AGET_WIDE */ 8248 8249.LOP_AGET_WIDE_finish: 8250 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8251 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8252 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8253 GET_INST_OPCODE(ip) @ extract opcode from rINST 8254 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8255 GOTO_OPCODE(ip) @ jump to next instruction 8256 8257/* continuation for OP_APUT_WIDE */ 8258 8259.LOP_APUT_WIDE_finish: 8260 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8261 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8262 GET_INST_OPCODE(ip) @ extract opcode from rINST 8263 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8264 GOTO_OPCODE(ip) @ jump to next instruction 8265 8266/* continuation for OP_APUT_OBJECT */ 8267 /* 8268 * On entry: 8269 * r1 = vBB (arrayObj) 8270 * r9 = vAA (obj) 8271 * r10 = offset into array (vBB + vCC * width) 8272 */ 8273.LOP_APUT_OBJECT_finish: 8274 cmp r9, #0 @ storing null reference? 8275 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8276 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8277 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8278 bl dvmCanPutArrayElement @ test object type vs. array type 8279 cmp r0, #0 @ okay? 8280 beq common_errArrayStore @ no 8281 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8282 ldr r2, [rGLUE, #offGlue_cardTable] @ get biased CT base 8283 add r10, #offArrayObject_contents @ r0<- pointer to slot 8284 GET_INST_OPCODE(ip) @ extract opcode from rINST 8285 str r9, [r10] @ vBB[vCC]<- vAA 8286 strb r2, [r2, r10, lsr #GC_CARD_SHIFT] @ mark card 8287 GOTO_OPCODE(ip) @ jump to next instruction 8288.LOP_APUT_OBJECT_skip_check: 8289 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8290 GET_INST_OPCODE(ip) @ extract opcode from rINST 8291 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8292 GOTO_OPCODE(ip) @ jump to next instruction 8293 8294/* continuation for OP_IGET */ 8295 8296 /* 8297 * Currently: 8298 * r0 holds resolved field 8299 * r9 holds object 8300 */ 8301.LOP_IGET_finish: 8302 @bl common_squeak0 8303 cmp r9, #0 @ check object for null 8304 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8305 beq common_errNullObject @ object was null 8306 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8307 @ no-op @ acquiring load 8308 mov r2, rINST, lsr #8 @ r2<- A+ 8309 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8310 and r2, r2, #15 @ r2<- A 8311 GET_INST_OPCODE(ip) @ extract opcode from rINST 8312 SET_VREG(r0, r2) @ fp[A]<- r0 8313 GOTO_OPCODE(ip) @ jump to next instruction 8314 8315/* continuation for OP_IGET_WIDE */ 8316 8317 /* 8318 * Currently: 8319 * r0 holds resolved field 8320 * r9 holds object 8321 */ 8322.LOP_IGET_WIDE_finish: 8323 cmp r9, #0 @ check object for null 8324 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8325 beq common_errNullObject @ object was null 8326 .if 0 8327 add r0, r9, r3 @ r0<- address of field 8328 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 8329 .else 8330 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8331 .endif 8332 mov r2, rINST, lsr #8 @ r2<- A+ 8333 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8334 and r2, r2, #15 @ r2<- A 8335 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8336 GET_INST_OPCODE(ip) @ extract opcode from rINST 8337 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8338 GOTO_OPCODE(ip) @ jump to next instruction 8339 8340/* continuation for OP_IGET_OBJECT */ 8341 8342 /* 8343 * Currently: 8344 * r0 holds resolved field 8345 * r9 holds object 8346 */ 8347.LOP_IGET_OBJECT_finish: 8348 @bl common_squeak0 8349 cmp r9, #0 @ check object for null 8350 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8351 beq common_errNullObject @ object was null 8352 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8353 @ no-op @ acquiring load 8354 mov r2, rINST, lsr #8 @ r2<- A+ 8355 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8356 and r2, r2, #15 @ r2<- A 8357 GET_INST_OPCODE(ip) @ extract opcode from rINST 8358 SET_VREG(r0, r2) @ fp[A]<- r0 8359 GOTO_OPCODE(ip) @ jump to next instruction 8360 8361/* continuation for OP_IGET_BOOLEAN */ 8362 8363 /* 8364 * Currently: 8365 * r0 holds resolved field 8366 * r9 holds object 8367 */ 8368.LOP_IGET_BOOLEAN_finish: 8369 @bl common_squeak1 8370 cmp r9, #0 @ check object for null 8371 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8372 beq common_errNullObject @ object was null 8373 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8374 @ no-op @ acquiring load 8375 mov r2, rINST, lsr #8 @ r2<- A+ 8376 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8377 and r2, r2, #15 @ r2<- A 8378 GET_INST_OPCODE(ip) @ extract opcode from rINST 8379 SET_VREG(r0, r2) @ fp[A]<- r0 8380 GOTO_OPCODE(ip) @ jump to next instruction 8381 8382/* continuation for OP_IGET_BYTE */ 8383 8384 /* 8385 * Currently: 8386 * r0 holds resolved field 8387 * r9 holds object 8388 */ 8389.LOP_IGET_BYTE_finish: 8390 @bl common_squeak2 8391 cmp r9, #0 @ check object for null 8392 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8393 beq common_errNullObject @ object was null 8394 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8395 @ no-op @ acquiring load 8396 mov r2, rINST, lsr #8 @ r2<- A+ 8397 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8398 and r2, r2, #15 @ r2<- A 8399 GET_INST_OPCODE(ip) @ extract opcode from rINST 8400 SET_VREG(r0, r2) @ fp[A]<- r0 8401 GOTO_OPCODE(ip) @ jump to next instruction 8402 8403/* continuation for OP_IGET_CHAR */ 8404 8405 /* 8406 * Currently: 8407 * r0 holds resolved field 8408 * r9 holds object 8409 */ 8410.LOP_IGET_CHAR_finish: 8411 @bl common_squeak3 8412 cmp r9, #0 @ check object for null 8413 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8414 beq common_errNullObject @ object was null 8415 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8416 @ no-op @ acquiring load 8417 mov r2, rINST, lsr #8 @ r2<- A+ 8418 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8419 and r2, r2, #15 @ r2<- A 8420 GET_INST_OPCODE(ip) @ extract opcode from rINST 8421 SET_VREG(r0, r2) @ fp[A]<- r0 8422 GOTO_OPCODE(ip) @ jump to next instruction 8423 8424/* continuation for OP_IGET_SHORT */ 8425 8426 /* 8427 * Currently: 8428 * r0 holds resolved field 8429 * r9 holds object 8430 */ 8431.LOP_IGET_SHORT_finish: 8432 @bl common_squeak4 8433 cmp r9, #0 @ check object for null 8434 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8435 beq common_errNullObject @ object was null 8436 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8437 @ no-op @ acquiring load 8438 mov r2, rINST, lsr #8 @ r2<- A+ 8439 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8440 and r2, r2, #15 @ r2<- A 8441 GET_INST_OPCODE(ip) @ extract opcode from rINST 8442 SET_VREG(r0, r2) @ fp[A]<- r0 8443 GOTO_OPCODE(ip) @ jump to next instruction 8444 8445/* continuation for OP_IPUT */ 8446 8447 /* 8448 * Currently: 8449 * r0 holds resolved field 8450 * r9 holds object 8451 */ 8452.LOP_IPUT_finish: 8453 @bl common_squeak0 8454 mov r1, rINST, lsr #8 @ r1<- A+ 8455 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8456 and r1, r1, #15 @ r1<- A 8457 cmp r9, #0 @ check object for null 8458 GET_VREG(r0, r1) @ r0<- fp[A] 8459 beq common_errNullObject @ object was null 8460 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8461 GET_INST_OPCODE(ip) @ extract opcode from rINST 8462 @ no-op @ releasing store 8463 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8464 GOTO_OPCODE(ip) @ jump to next instruction 8465 8466/* continuation for OP_IPUT_WIDE */ 8467 8468 /* 8469 * Currently: 8470 * r0 holds resolved field 8471 * r9 holds object 8472 */ 8473.LOP_IPUT_WIDE_finish: 8474 mov r2, rINST, lsr #8 @ r2<- A+ 8475 cmp r9, #0 @ check object for null 8476 and r2, r2, #15 @ r2<- A 8477 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8478 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8479 beq common_errNullObject @ object was null 8480 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8481 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8482 GET_INST_OPCODE(r10) @ extract opcode from rINST 8483 .if 0 8484 add r2, r9, r3 @ r2<- target address 8485 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 8486 .else 8487 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 8488 .endif 8489 GOTO_OPCODE(r10) @ jump to next instruction 8490 8491/* continuation for OP_IPUT_OBJECT */ 8492 8493 /* 8494 * Currently: 8495 * r0 holds resolved field 8496 * r9 holds object 8497 */ 8498.LOP_IPUT_OBJECT_finish: 8499 @bl common_squeak0 8500 mov r1, rINST, lsr #8 @ r1<- A+ 8501 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8502 and r1, r1, #15 @ r1<- A 8503 cmp r9, #0 @ check object for null 8504 GET_VREG(r0, r1) @ r0<- fp[A] 8505 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8506 beq common_errNullObject @ object was null 8507 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8508 add r9, r3 @ r9<- direct ptr to target location 8509 GET_INST_OPCODE(ip) @ extract opcode from rINST 8510 @ no-op @ releasing store 8511 str r0, [r9] @ obj.field (8/16/32 bits)<- r0 8512 cmp r0, #0 @ stored a null reference? 8513 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 8514 GOTO_OPCODE(ip) @ jump to next instruction 8515 8516/* continuation for OP_IPUT_BOOLEAN */ 8517 8518 /* 8519 * Currently: 8520 * r0 holds resolved field 8521 * r9 holds object 8522 */ 8523.LOP_IPUT_BOOLEAN_finish: 8524 @bl common_squeak1 8525 mov r1, rINST, lsr #8 @ r1<- A+ 8526 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8527 and r1, r1, #15 @ r1<- A 8528 cmp r9, #0 @ check object for null 8529 GET_VREG(r0, r1) @ r0<- fp[A] 8530 beq common_errNullObject @ object was null 8531 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8532 GET_INST_OPCODE(ip) @ extract opcode from rINST 8533 @ no-op @ releasing store 8534 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8535 GOTO_OPCODE(ip) @ jump to next instruction 8536 8537/* continuation for OP_IPUT_BYTE */ 8538 8539 /* 8540 * Currently: 8541 * r0 holds resolved field 8542 * r9 holds object 8543 */ 8544.LOP_IPUT_BYTE_finish: 8545 @bl common_squeak2 8546 mov r1, rINST, lsr #8 @ r1<- A+ 8547 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8548 and r1, r1, #15 @ r1<- A 8549 cmp r9, #0 @ check object for null 8550 GET_VREG(r0, r1) @ r0<- fp[A] 8551 beq common_errNullObject @ object was null 8552 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8553 GET_INST_OPCODE(ip) @ extract opcode from rINST 8554 @ no-op @ releasing store 8555 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8556 GOTO_OPCODE(ip) @ jump to next instruction 8557 8558/* continuation for OP_IPUT_CHAR */ 8559 8560 /* 8561 * Currently: 8562 * r0 holds resolved field 8563 * r9 holds object 8564 */ 8565.LOP_IPUT_CHAR_finish: 8566 @bl common_squeak3 8567 mov r1, rINST, lsr #8 @ r1<- A+ 8568 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8569 and r1, r1, #15 @ r1<- A 8570 cmp r9, #0 @ check object for null 8571 GET_VREG(r0, r1) @ r0<- fp[A] 8572 beq common_errNullObject @ object was null 8573 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8574 GET_INST_OPCODE(ip) @ extract opcode from rINST 8575 @ no-op @ releasing store 8576 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8577 GOTO_OPCODE(ip) @ jump to next instruction 8578 8579/* continuation for OP_IPUT_SHORT */ 8580 8581 /* 8582 * Currently: 8583 * r0 holds resolved field 8584 * r9 holds object 8585 */ 8586.LOP_IPUT_SHORT_finish: 8587 @bl common_squeak4 8588 mov r1, rINST, lsr #8 @ r1<- A+ 8589 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8590 and r1, r1, #15 @ r1<- A 8591 cmp r9, #0 @ check object for null 8592 GET_VREG(r0, r1) @ r0<- fp[A] 8593 beq common_errNullObject @ object was null 8594 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8595 GET_INST_OPCODE(ip) @ extract opcode from rINST 8596 @ no-op @ releasing store 8597 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8598 GOTO_OPCODE(ip) @ jump to next instruction 8599 8600/* continuation for OP_SGET */ 8601 8602 /* 8603 * Continuation if the field has not yet been resolved. 8604 * r1: BBBB field ref 8605 */ 8606.LOP_SGET_resolve: 8607 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8608 EXPORT_PC() @ resolve() could throw, so export now 8609 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8610 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8611 cmp r0, #0 @ success? 8612 bne .LOP_SGET_finish @ yes, finish 8613 b common_exceptionThrown @ no, handle exception 8614 8615/* continuation for OP_SGET_WIDE */ 8616 8617 /* 8618 * Continuation if the field has not yet been resolved. 8619 * r1: BBBB field ref 8620 * 8621 * Returns StaticField pointer in r0. 8622 */ 8623.LOP_SGET_WIDE_resolve: 8624 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8625 EXPORT_PC() @ resolve() could throw, so export now 8626 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8627 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8628 cmp r0, #0 @ success? 8629 bne .LOP_SGET_WIDE_finish @ yes, finish 8630 b common_exceptionThrown @ no, handle exception 8631 8632/* continuation for OP_SGET_OBJECT */ 8633 8634 /* 8635 * Continuation if the field has not yet been resolved. 8636 * r1: BBBB field ref 8637 */ 8638.LOP_SGET_OBJECT_resolve: 8639 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8640 EXPORT_PC() @ resolve() could throw, so export now 8641 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8642 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8643 cmp r0, #0 @ success? 8644 bne .LOP_SGET_OBJECT_finish @ yes, finish 8645 b common_exceptionThrown @ no, handle exception 8646 8647/* continuation for OP_SGET_BOOLEAN */ 8648 8649 /* 8650 * Continuation if the field has not yet been resolved. 8651 * r1: BBBB field ref 8652 */ 8653.LOP_SGET_BOOLEAN_resolve: 8654 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8655 EXPORT_PC() @ resolve() could throw, so export now 8656 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8657 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8658 cmp r0, #0 @ success? 8659 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8660 b common_exceptionThrown @ no, handle exception 8661 8662/* continuation for OP_SGET_BYTE */ 8663 8664 /* 8665 * Continuation if the field has not yet been resolved. 8666 * r1: BBBB field ref 8667 */ 8668.LOP_SGET_BYTE_resolve: 8669 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8670 EXPORT_PC() @ resolve() could throw, so export now 8671 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8672 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8673 cmp r0, #0 @ success? 8674 bne .LOP_SGET_BYTE_finish @ yes, finish 8675 b common_exceptionThrown @ no, handle exception 8676 8677/* continuation for OP_SGET_CHAR */ 8678 8679 /* 8680 * Continuation if the field has not yet been resolved. 8681 * r1: BBBB field ref 8682 */ 8683.LOP_SGET_CHAR_resolve: 8684 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8685 EXPORT_PC() @ resolve() could throw, so export now 8686 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8687 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8688 cmp r0, #0 @ success? 8689 bne .LOP_SGET_CHAR_finish @ yes, finish 8690 b common_exceptionThrown @ no, handle exception 8691 8692/* continuation for OP_SGET_SHORT */ 8693 8694 /* 8695 * Continuation if the field has not yet been resolved. 8696 * r1: BBBB field ref 8697 */ 8698.LOP_SGET_SHORT_resolve: 8699 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8700 EXPORT_PC() @ resolve() could throw, so export now 8701 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8702 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8703 cmp r0, #0 @ success? 8704 bne .LOP_SGET_SHORT_finish @ yes, finish 8705 b common_exceptionThrown @ no, handle exception 8706 8707/* continuation for OP_SPUT */ 8708 8709 /* 8710 * Continuation if the field has not yet been resolved. 8711 * r1: BBBB field ref 8712 */ 8713.LOP_SPUT_resolve: 8714 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8715 EXPORT_PC() @ resolve() could throw, so export now 8716 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8717 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8718 cmp r0, #0 @ success? 8719 bne .LOP_SPUT_finish @ yes, finish 8720 b common_exceptionThrown @ no, handle exception 8721 8722/* continuation for OP_SPUT_WIDE */ 8723 8724 /* 8725 * Continuation if the field has not yet been resolved. 8726 * r1: BBBB field ref 8727 * r9: &fp[AA] 8728 * 8729 * Returns StaticField pointer in r2. 8730 */ 8731.LOP_SPUT_WIDE_resolve: 8732 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8733 EXPORT_PC() @ resolve() could throw, so export now 8734 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8735 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8736 cmp r0, #0 @ success? 8737 mov r2, r0 @ copy to r2 8738 bne .LOP_SPUT_WIDE_finish @ yes, finish 8739 b common_exceptionThrown @ no, handle exception 8740 8741/* continuation for OP_SPUT_OBJECT */ 8742.LOP_SPUT_OBJECT_finish: @ field ptr in r0 8743 mov r2, rINST, lsr #8 @ r2<- AA 8744 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8745 GET_VREG(r1, r2) @ r1<- fp[AA] 8746 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8747 GET_INST_OPCODE(ip) @ extract opcode from rINST 8748 add r0, #offStaticField_value @ r0<- pointer to store target 8749 @ no-op @ releasing store 8750 str r1, [r0] @ field<- vAA 8751 cmp r1, #0 @ stored a null object? 8752 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ mark card if not 8753 GOTO_OPCODE(ip) @ jump to next instruction 8754 8755/* continuation for OP_SPUT_BOOLEAN */ 8756 8757 /* 8758 * Continuation if the field has not yet been resolved. 8759 * r1: BBBB field ref 8760 */ 8761.LOP_SPUT_BOOLEAN_resolve: 8762 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8763 EXPORT_PC() @ resolve() could throw, so export now 8764 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8765 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8766 cmp r0, #0 @ success? 8767 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8768 b common_exceptionThrown @ no, handle exception 8769 8770/* continuation for OP_SPUT_BYTE */ 8771 8772 /* 8773 * Continuation if the field has not yet been resolved. 8774 * r1: BBBB field ref 8775 */ 8776.LOP_SPUT_BYTE_resolve: 8777 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8778 EXPORT_PC() @ resolve() could throw, so export now 8779 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8780 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8781 cmp r0, #0 @ success? 8782 bne .LOP_SPUT_BYTE_finish @ yes, finish 8783 b common_exceptionThrown @ no, handle exception 8784 8785/* continuation for OP_SPUT_CHAR */ 8786 8787 /* 8788 * Continuation if the field has not yet been resolved. 8789 * r1: BBBB field ref 8790 */ 8791.LOP_SPUT_CHAR_resolve: 8792 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8793 EXPORT_PC() @ resolve() could throw, so export now 8794 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8795 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8796 cmp r0, #0 @ success? 8797 bne .LOP_SPUT_CHAR_finish @ yes, finish 8798 b common_exceptionThrown @ no, handle exception 8799 8800/* continuation for OP_SPUT_SHORT */ 8801 8802 /* 8803 * Continuation if the field has not yet been resolved. 8804 * r1: BBBB field ref 8805 */ 8806.LOP_SPUT_SHORT_resolve: 8807 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8808 EXPORT_PC() @ resolve() could throw, so export now 8809 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8810 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8811 cmp r0, #0 @ success? 8812 bne .LOP_SPUT_SHORT_finish @ yes, finish 8813 b common_exceptionThrown @ no, handle exception 8814 8815/* continuation for OP_INVOKE_VIRTUAL */ 8816 8817 /* 8818 * At this point: 8819 * r0 = resolved base method 8820 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8821 */ 8822.LOP_INVOKE_VIRTUAL_continue: 8823 GET_VREG(r1, r10) @ r1<- "this" ptr 8824 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8825 cmp r1, #0 @ is "this" null? 8826 beq common_errNullObject @ null "this", throw exception 8827 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8828 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8829 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8830 bl common_invokeMethodNoRange @ continue on 8831 8832/* continuation for OP_INVOKE_SUPER */ 8833 8834 /* 8835 * At this point: 8836 * r0 = resolved base method 8837 * r9 = method->clazz 8838 */ 8839.LOP_INVOKE_SUPER_continue: 8840 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8841 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8842 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8843 EXPORT_PC() @ must export for invoke 8844 cmp r2, r3 @ compare (methodIndex, vtableCount) 8845 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8846 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8847 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8848 bl common_invokeMethodNoRange @ continue on 8849 8850.LOP_INVOKE_SUPER_resolve: 8851 mov r0, r9 @ r0<- method->clazz 8852 mov r2, #METHOD_VIRTUAL @ resolver method type 8853 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8854 cmp r0, #0 @ got null? 8855 bne .LOP_INVOKE_SUPER_continue @ no, continue 8856 b common_exceptionThrown @ yes, handle exception 8857 8858 /* 8859 * Throw a NoSuchMethodError with the method name as the message. 8860 * r0 = resolved base method 8861 */ 8862.LOP_INVOKE_SUPER_nsm: 8863 ldr r1, [r0, #offMethod_name] @ r1<- method name 8864 b common_errNoSuchMethod 8865 8866/* continuation for OP_INVOKE_DIRECT */ 8867 8868 /* 8869 * On entry: 8870 * r1 = reference (BBBB or CCCC) 8871 * r10 = "this" register 8872 */ 8873.LOP_INVOKE_DIRECT_resolve: 8874 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8875 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8876 mov r2, #METHOD_DIRECT @ resolver method type 8877 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8878 cmp r0, #0 @ got null? 8879 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8880 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8881 b common_exceptionThrown @ yes, handle exception 8882 8883/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8884 8885 /* 8886 * At this point: 8887 * r0 = resolved base method 8888 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8889 */ 8890.LOP_INVOKE_VIRTUAL_RANGE_continue: 8891 GET_VREG(r1, r10) @ r1<- "this" ptr 8892 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8893 cmp r1, #0 @ is "this" null? 8894 beq common_errNullObject @ null "this", throw exception 8895 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8896 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8897 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8898 bl common_invokeMethodRange @ continue on 8899 8900/* continuation for OP_INVOKE_SUPER_RANGE */ 8901 8902 /* 8903 * At this point: 8904 * r0 = resolved base method 8905 * r9 = method->clazz 8906 */ 8907.LOP_INVOKE_SUPER_RANGE_continue: 8908 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8909 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8910 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8911 EXPORT_PC() @ must export for invoke 8912 cmp r2, r3 @ compare (methodIndex, vtableCount) 8913 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8914 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8915 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8916 bl common_invokeMethodRange @ continue on 8917 8918.LOP_INVOKE_SUPER_RANGE_resolve: 8919 mov r0, r9 @ r0<- method->clazz 8920 mov r2, #METHOD_VIRTUAL @ resolver method type 8921 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8922 cmp r0, #0 @ got null? 8923 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8924 b common_exceptionThrown @ yes, handle exception 8925 8926 /* 8927 * Throw a NoSuchMethodError with the method name as the message. 8928 * r0 = resolved base method 8929 */ 8930.LOP_INVOKE_SUPER_RANGE_nsm: 8931 ldr r1, [r0, #offMethod_name] @ r1<- method name 8932 b common_errNoSuchMethod 8933 8934/* continuation for OP_INVOKE_DIRECT_RANGE */ 8935 8936 /* 8937 * On entry: 8938 * r1 = reference (BBBB or CCCC) 8939 * r10 = "this" register 8940 */ 8941.LOP_INVOKE_DIRECT_RANGE_resolve: 8942 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8943 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8944 mov r2, #METHOD_DIRECT @ resolver method type 8945 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8946 cmp r0, #0 @ got null? 8947 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8948 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8949 b common_exceptionThrown @ yes, handle exception 8950 8951/* continuation for OP_FLOAT_TO_LONG */ 8952/* 8953 * Convert the float in r0 to a long in r0/r1. 8954 * 8955 * We have to clip values to long min/max per the specification. The 8956 * expected common case is a "reasonable" value that converts directly 8957 * to modest integer. The EABI convert function isn't doing this for us. 8958 */ 8959f2l_doconv: 8960 stmfd sp!, {r4, lr} 8961 mov r1, #0x5f000000 @ (float)maxlong 8962 mov r4, r0 8963 bl __aeabi_fcmpge @ is arg >= maxlong? 8964 cmp r0, #0 @ nonzero == yes 8965 mvnne r0, #0 @ return maxlong (7fffffff) 8966 mvnne r1, #0x80000000 8967 ldmnefd sp!, {r4, pc} 8968 8969 mov r0, r4 @ recover arg 8970 mov r1, #0xdf000000 @ (float)minlong 8971 bl __aeabi_fcmple @ is arg <= minlong? 8972 cmp r0, #0 @ nonzero == yes 8973 movne r0, #0 @ return minlong (80000000) 8974 movne r1, #0x80000000 8975 ldmnefd sp!, {r4, pc} 8976 8977 mov r0, r4 @ recover arg 8978 mov r1, r4 8979 bl __aeabi_fcmpeq @ is arg == self? 8980 cmp r0, #0 @ zero == no 8981 moveq r1, #0 @ return zero for NaN 8982 ldmeqfd sp!, {r4, pc} 8983 8984 mov r0, r4 @ recover arg 8985 bl __aeabi_f2lz @ convert float to long 8986 ldmfd sp!, {r4, pc} 8987 8988/* continuation for OP_DOUBLE_TO_LONG */ 8989/* 8990 * Convert the double in r0/r1 to a long in r0/r1. 8991 * 8992 * We have to clip values to long min/max per the specification. The 8993 * expected common case is a "reasonable" value that converts directly 8994 * to modest integer. The EABI convert function isn't doing this for us. 8995 */ 8996d2l_doconv: 8997 stmfd sp!, {r4, r5, lr} @ save regs 8998 mov r3, #0x43000000 @ maxlong, as a double (high word) 8999 add r3, #0x00e00000 @ 0x43e00000 9000 mov r2, #0 @ maxlong, as a double (low word) 9001 sub sp, sp, #4 @ align for EABI 9002 mov r4, r0 @ save a copy of r0 9003 mov r5, r1 @ and r1 9004 bl __aeabi_dcmpge @ is arg >= maxlong? 9005 cmp r0, #0 @ nonzero == yes 9006 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9007 mvnne r1, #0x80000000 9008 bne 1f 9009 9010 mov r0, r4 @ recover arg 9011 mov r1, r5 9012 mov r3, #0xc3000000 @ minlong, as a double (high word) 9013 add r3, #0x00e00000 @ 0xc3e00000 9014 mov r2, #0 @ minlong, as a double (low word) 9015 bl __aeabi_dcmple @ is arg <= minlong? 9016 cmp r0, #0 @ nonzero == yes 9017 movne r0, #0 @ return minlong (8000000000000000) 9018 movne r1, #0x80000000 9019 bne 1f 9020 9021 mov r0, r4 @ recover arg 9022 mov r1, r5 9023 mov r2, r4 @ compare against self 9024 mov r3, r5 9025 bl __aeabi_dcmpeq @ is arg == self? 9026 cmp r0, #0 @ zero == no 9027 moveq r1, #0 @ return zero for NaN 9028 beq 1f 9029 9030 mov r0, r4 @ recover arg 9031 mov r1, r5 9032 bl __aeabi_d2lz @ convert double to long 9033 90341: 9035 add sp, sp, #4 9036 ldmfd sp!, {r4, r5, pc} 9037 9038/* continuation for OP_MUL_LONG */ 9039 9040.LOP_MUL_LONG_finish: 9041 GET_INST_OPCODE(ip) @ extract opcode from rINST 9042 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9043 GOTO_OPCODE(ip) @ jump to next instruction 9044 9045/* continuation for OP_SHL_LONG */ 9046 9047.LOP_SHL_LONG_finish: 9048 mov r0, r0, asl r2 @ r0<- r0 << r2 9049 GET_INST_OPCODE(ip) @ extract opcode from rINST 9050 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9051 GOTO_OPCODE(ip) @ jump to next instruction 9052 9053/* continuation for OP_SHR_LONG */ 9054 9055.LOP_SHR_LONG_finish: 9056 mov r1, r1, asr r2 @ r1<- r1 >> r2 9057 GET_INST_OPCODE(ip) @ extract opcode from rINST 9058 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9059 GOTO_OPCODE(ip) @ jump to next instruction 9060 9061/* continuation for OP_USHR_LONG */ 9062 9063.LOP_USHR_LONG_finish: 9064 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9065 GET_INST_OPCODE(ip) @ extract opcode from rINST 9066 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9067 GOTO_OPCODE(ip) @ jump to next instruction 9068 9069/* continuation for OP_SHL_LONG_2ADDR */ 9070 9071.LOP_SHL_LONG_2ADDR_finish: 9072 GET_INST_OPCODE(ip) @ extract opcode from rINST 9073 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9074 GOTO_OPCODE(ip) @ jump to next instruction 9075 9076/* continuation for OP_SHR_LONG_2ADDR */ 9077 9078.LOP_SHR_LONG_2ADDR_finish: 9079 GET_INST_OPCODE(ip) @ extract opcode from rINST 9080 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9081 GOTO_OPCODE(ip) @ jump to next instruction 9082 9083/* continuation for OP_USHR_LONG_2ADDR */ 9084 9085.LOP_USHR_LONG_2ADDR_finish: 9086 GET_INST_OPCODE(ip) @ extract opcode from rINST 9087 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9088 GOTO_OPCODE(ip) @ jump to next instruction 9089 9090/* continuation for OP_IGET_VOLATILE */ 9091 9092 /* 9093 * Currently: 9094 * r0 holds resolved field 9095 * r9 holds object 9096 */ 9097.LOP_IGET_VOLATILE_finish: 9098 @bl common_squeak0 9099 cmp r9, #0 @ check object for null 9100 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9101 beq common_errNullObject @ object was null 9102 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9103 SMP_DMB @ acquiring load 9104 mov r2, rINST, lsr #8 @ r2<- A+ 9105 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9106 and r2, r2, #15 @ r2<- A 9107 GET_INST_OPCODE(ip) @ extract opcode from rINST 9108 SET_VREG(r0, r2) @ fp[A]<- r0 9109 GOTO_OPCODE(ip) @ jump to next instruction 9110 9111/* continuation for OP_IPUT_VOLATILE */ 9112 9113 /* 9114 * Currently: 9115 * r0 holds resolved field 9116 * r9 holds object 9117 */ 9118.LOP_IPUT_VOLATILE_finish: 9119 @bl common_squeak0 9120 mov r1, rINST, lsr #8 @ r1<- A+ 9121 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9122 and r1, r1, #15 @ r1<- A 9123 cmp r9, #0 @ check object for null 9124 GET_VREG(r0, r1) @ r0<- fp[A] 9125 beq common_errNullObject @ object was null 9126 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9127 GET_INST_OPCODE(ip) @ extract opcode from rINST 9128 SMP_DMB @ releasing store 9129 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9130 GOTO_OPCODE(ip) @ jump to next instruction 9131 9132/* continuation for OP_SGET_VOLATILE */ 9133 9134 /* 9135 * Continuation if the field has not yet been resolved. 9136 * r1: BBBB field ref 9137 */ 9138.LOP_SGET_VOLATILE_resolve: 9139 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9140 EXPORT_PC() @ resolve() could throw, so export now 9141 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9142 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9143 cmp r0, #0 @ success? 9144 bne .LOP_SGET_VOLATILE_finish @ yes, finish 9145 b common_exceptionThrown @ no, handle exception 9146 9147/* continuation for OP_SPUT_VOLATILE */ 9148 9149 /* 9150 * Continuation if the field has not yet been resolved. 9151 * r1: BBBB field ref 9152 */ 9153.LOP_SPUT_VOLATILE_resolve: 9154 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9155 EXPORT_PC() @ resolve() could throw, so export now 9156 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9157 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9158 cmp r0, #0 @ success? 9159 bne .LOP_SPUT_VOLATILE_finish @ yes, finish 9160 b common_exceptionThrown @ no, handle exception 9161 9162/* continuation for OP_IGET_OBJECT_VOLATILE */ 9163 9164 /* 9165 * Currently: 9166 * r0 holds resolved field 9167 * r9 holds object 9168 */ 9169.LOP_IGET_OBJECT_VOLATILE_finish: 9170 @bl common_squeak0 9171 cmp r9, #0 @ check object for null 9172 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9173 beq common_errNullObject @ object was null 9174 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9175 SMP_DMB @ acquiring load 9176 mov r2, rINST, lsr #8 @ r2<- A+ 9177 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9178 and r2, r2, #15 @ r2<- A 9179 GET_INST_OPCODE(ip) @ extract opcode from rINST 9180 SET_VREG(r0, r2) @ fp[A]<- r0 9181 GOTO_OPCODE(ip) @ jump to next instruction 9182 9183/* continuation for OP_IGET_WIDE_VOLATILE */ 9184 9185 /* 9186 * Currently: 9187 * r0 holds resolved field 9188 * r9 holds object 9189 */ 9190.LOP_IGET_WIDE_VOLATILE_finish: 9191 cmp r9, #0 @ check object for null 9192 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9193 beq common_errNullObject @ object was null 9194 .if 1 9195 add r0, r9, r3 @ r0<- address of field 9196 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 9197 .else 9198 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9199 .endif 9200 mov r2, rINST, lsr #8 @ r2<- A+ 9201 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9202 and r2, r2, #15 @ r2<- A 9203 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9204 GET_INST_OPCODE(ip) @ extract opcode from rINST 9205 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9206 GOTO_OPCODE(ip) @ jump to next instruction 9207 9208/* continuation for OP_IPUT_WIDE_VOLATILE */ 9209 9210 /* 9211 * Currently: 9212 * r0 holds resolved field 9213 * r9 holds object 9214 */ 9215.LOP_IPUT_WIDE_VOLATILE_finish: 9216 mov r2, rINST, lsr #8 @ r2<- A+ 9217 cmp r9, #0 @ check object for null 9218 and r2, r2, #15 @ r2<- A 9219 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9220 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9221 beq common_errNullObject @ object was null 9222 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9223 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9224 GET_INST_OPCODE(r10) @ extract opcode from rINST 9225 .if 1 9226 add r2, r9, r3 @ r2<- target address 9227 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 9228 .else 9229 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9230 .endif 9231 GOTO_OPCODE(r10) @ jump to next instruction 9232 9233/* continuation for OP_SGET_WIDE_VOLATILE */ 9234 9235 /* 9236 * Continuation if the field has not yet been resolved. 9237 * r1: BBBB field ref 9238 * 9239 * Returns StaticField pointer in r0. 9240 */ 9241.LOP_SGET_WIDE_VOLATILE_resolve: 9242 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9243 EXPORT_PC() @ resolve() could throw, so export now 9244 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9245 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9246 cmp r0, #0 @ success? 9247 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9248 b common_exceptionThrown @ no, handle exception 9249 9250/* continuation for OP_SPUT_WIDE_VOLATILE */ 9251 9252 /* 9253 * Continuation if the field has not yet been resolved. 9254 * r1: BBBB field ref 9255 * r9: &fp[AA] 9256 * 9257 * Returns StaticField pointer in r2. 9258 */ 9259.LOP_SPUT_WIDE_VOLATILE_resolve: 9260 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9261 EXPORT_PC() @ resolve() could throw, so export now 9262 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9263 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9264 cmp r0, #0 @ success? 9265 mov r2, r0 @ copy to r2 9266 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9267 b common_exceptionThrown @ no, handle exception 9268 9269/* continuation for OP_EXECUTE_INLINE */ 9270 9271 /* 9272 * Extract args, call function. 9273 * r0 = #of args (0-4) 9274 * r10 = call index 9275 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9276 * 9277 * Other ideas: 9278 * - Use a jump table from the main piece to jump directly into the 9279 * AND/LDR pairs. Costs a data load, saves a branch. 9280 * - Have five separate pieces that do the loading, so we can work the 9281 * interleave a little better. Increases code size. 9282 */ 9283.LOP_EXECUTE_INLINE_continue: 9284 rsb r0, r0, #4 @ r0<- 4-r0 9285 FETCH(r9, 2) @ r9<- FEDC 9286 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9287 bl common_abort @ (skipped due to ARM prefetch) 92884: and ip, r9, #0xf000 @ isolate F 9289 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 92903: and ip, r9, #0x0f00 @ isolate E 9291 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 92922: and ip, r9, #0x00f0 @ isolate D 9293 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 92941: and ip, r9, #0x000f @ isolate C 9295 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 92960: 9297 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9298 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9299 @ (not reached) 9300 9301.LOP_EXECUTE_INLINE_table: 9302 .word gDvmInlineOpsTable 9303 9304/* continuation for OP_EXECUTE_INLINE_RANGE */ 9305 9306 /* 9307 * Extract args, call function. 9308 * r0 = #of args (0-4) 9309 * r10 = call index 9310 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9311 */ 9312.LOP_EXECUTE_INLINE_RANGE_continue: 9313 rsb r0, r0, #4 @ r0<- 4-r0 9314 FETCH(r9, 2) @ r9<- CCCC 9315 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9316 bl common_abort @ (skipped due to ARM prefetch) 93174: add ip, r9, #3 @ base+3 9318 GET_VREG(r3, ip) @ r3<- vBase[3] 93193: add ip, r9, #2 @ base+2 9320 GET_VREG(r2, ip) @ r2<- vBase[2] 93212: add ip, r9, #1 @ base+1 9322 GET_VREG(r1, ip) @ r1<- vBase[1] 93231: add ip, r9, #0 @ (nop) 9324 GET_VREG(r0, ip) @ r0<- vBase[0] 93250: 9326 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9327 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9328 @ (not reached) 9329 9330.LOP_EXECUTE_INLINE_RANGE_table: 9331 .word gDvmInlineOpsTable 9332 9333/* continuation for OP_IPUT_OBJECT_VOLATILE */ 9334 9335 /* 9336 * Currently: 9337 * r0 holds resolved field 9338 * r9 holds object 9339 */ 9340.LOP_IPUT_OBJECT_VOLATILE_finish: 9341 @bl common_squeak0 9342 mov r1, rINST, lsr #8 @ r1<- A+ 9343 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9344 and r1, r1, #15 @ r1<- A 9345 cmp r9, #0 @ check object for null 9346 GET_VREG(r0, r1) @ r0<- fp[A] 9347 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9348 beq common_errNullObject @ object was null 9349 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9350 add r9, r3 @ r9<- direct ptr to target location 9351 GET_INST_OPCODE(ip) @ extract opcode from rINST 9352 SMP_DMB @ releasing store 9353 str r0, [r9] @ obj.field (8/16/32 bits)<- r0 9354 cmp r0, #0 @ stored a null reference? 9355 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 9356 GOTO_OPCODE(ip) @ jump to next instruction 9357 9358/* continuation for OP_SGET_OBJECT_VOLATILE */ 9359 9360 /* 9361 * Continuation if the field has not yet been resolved. 9362 * r1: BBBB field ref 9363 */ 9364.LOP_SGET_OBJECT_VOLATILE_resolve: 9365 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9366 EXPORT_PC() @ resolve() could throw, so export now 9367 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9368 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9369 cmp r0, #0 @ success? 9370 bne .LOP_SGET_OBJECT_VOLATILE_finish @ yes, finish 9371 b common_exceptionThrown @ no, handle exception 9372 9373/* continuation for OP_SPUT_OBJECT_VOLATILE */ 9374.LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0 9375 mov r2, rINST, lsr #8 @ r2<- AA 9376 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9377 GET_VREG(r1, r2) @ r1<- fp[AA] 9378 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9379 GET_INST_OPCODE(ip) @ extract opcode from rINST 9380 add r0, #offStaticField_value @ r0<- pointer to store target 9381 SMP_DMB @ releasing store 9382 str r1, [r0] @ field<- vAA 9383 cmp r1, #0 @ stored a null object? 9384 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ mark card if not 9385 GOTO_OPCODE(ip) @ jump to next instruction 9386 9387 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9388 .global dvmAsmSisterEnd 9389dvmAsmSisterEnd: 9390 9391/* File: armv5te/footer.S */ 9392 9393/* 9394 * =========================================================================== 9395 * Common subroutines and data 9396 * =========================================================================== 9397 */ 9398 9399 9400 9401 .text 9402 .align 2 9403 9404#if defined(WITH_JIT) 9405#if defined(WITH_SELF_VERIFICATION) 9406 .global dvmJitToInterpPunt 9407dvmJitToInterpPunt: 9408 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9409 mov r2,#kSVSPunt @ r2<- interpreter entry point 9410 mov r3, #0 9411 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9412 b jitSVShadowRunEnd @ doesn't return 9413 9414 .global dvmJitToInterpSingleStep 9415dvmJitToInterpSingleStep: 9416 str lr,[rGLUE,#offGlue_jitResumeNPC] 9417 str r1,[rGLUE,#offGlue_jitResumeDPC] 9418 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9419 b jitSVShadowRunEnd @ doesn't return 9420 9421 .global dvmJitToInterpNoChainNoProfile 9422dvmJitToInterpNoChainNoProfile: 9423 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9424 mov r0,rPC @ pass our target PC 9425 mov r2,#kSVSNoProfile @ r2<- interpreter entry point 9426 mov r3, #0 @ 0 means !inJitCodeCache 9427 str r3, [r10, #offThread_inJitCodeCache] @ back to the interp land 9428 b jitSVShadowRunEnd @ doesn't return 9429 9430 .global dvmJitToInterpTraceSelectNoChain 9431dvmJitToInterpTraceSelectNoChain: 9432 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9433 mov r0,rPC @ pass our target PC 9434 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9435 mov r3, #0 @ 0 means !inJitCodeCache 9436 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9437 b jitSVShadowRunEnd @ doesn't return 9438 9439 .global dvmJitToInterpTraceSelect 9440dvmJitToInterpTraceSelect: 9441 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9442 ldr r0,[lr, #-1] @ pass our target PC 9443 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9444 mov r3, #0 @ 0 means !inJitCodeCache 9445 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9446 b jitSVShadowRunEnd @ doesn't return 9447 9448 .global dvmJitToInterpBackwardBranch 9449dvmJitToInterpBackwardBranch: 9450 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9451 ldr r0,[lr, #-1] @ pass our target PC 9452 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9453 mov r3, #0 @ 0 means !inJitCodeCache 9454 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9455 b jitSVShadowRunEnd @ doesn't return 9456 9457 .global dvmJitToInterpNormal 9458dvmJitToInterpNormal: 9459 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9460 ldr r0,[lr, #-1] @ pass our target PC 9461 mov r2,#kSVSNormal @ r2<- interpreter entry point 9462 mov r3, #0 @ 0 means !inJitCodeCache 9463 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9464 b jitSVShadowRunEnd @ doesn't return 9465 9466 .global dvmJitToInterpNoChain 9467dvmJitToInterpNoChain: 9468 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9469 mov r0,rPC @ pass our target PC 9470 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9471 mov r3, #0 @ 0 means !inJitCodeCache 9472 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9473 b jitSVShadowRunEnd @ doesn't return 9474#else 9475/* 9476 * Return from the translation cache to the interpreter when the compiler is 9477 * having issues translating/executing a Dalvik instruction. We have to skip 9478 * the code cache lookup otherwise it is possible to indefinitely bouce 9479 * between the interpreter and the code cache if the instruction that fails 9480 * to be compiled happens to be at a trace start. 9481 */ 9482 .global dvmJitToInterpPunt 9483dvmJitToInterpPunt: 9484 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9485 mov rPC, r0 9486#if defined(WITH_JIT_TUNING) 9487 mov r0,lr 9488 bl dvmBumpPunt; 9489#endif 9490 EXPORT_PC() 9491 mov r0, #0 9492 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9493 adrl rIBASE, dvmAsmInstructionStart 9494 FETCH_INST() 9495 GET_INST_OPCODE(ip) 9496 GOTO_OPCODE(ip) 9497 9498/* 9499 * Return to the interpreter to handle a single instruction. 9500 * On entry: 9501 * r0 <= PC 9502 * r1 <= PC of resume instruction 9503 * lr <= resume point in translation 9504 */ 9505 .global dvmJitToInterpSingleStep 9506dvmJitToInterpSingleStep: 9507 str lr,[rGLUE,#offGlue_jitResumeNPC] 9508 str r1,[rGLUE,#offGlue_jitResumeDPC] 9509 mov r1,#kInterpEntryInstr 9510 @ enum is 4 byte in aapcs-EABI 9511 str r1, [rGLUE, #offGlue_entryPoint] 9512 mov rPC,r0 9513 EXPORT_PC() 9514 9515 adrl rIBASE, dvmAsmInstructionStart 9516 mov r2,#kJitSingleStep @ Ask for single step and then revert 9517 str r2,[rGLUE,#offGlue_jitState] 9518 mov r1,#1 @ set changeInterp to bail to debug interp 9519 b common_gotoBail 9520 9521/* 9522 * Return from the translation cache and immediately request 9523 * a translation for the exit target. Commonly used for callees. 9524 */ 9525 .global dvmJitToInterpTraceSelectNoChain 9526dvmJitToInterpTraceSelectNoChain: 9527#if defined(WITH_JIT_TUNING) 9528 bl dvmBumpNoChain 9529#endif 9530 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9531 mov r0,rPC 9532 bl dvmJitGetCodeAddr @ Is there a translation? 9533 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9534 mov r1, rPC @ arg1 of translation may need this 9535 mov lr, #0 @ in case target is HANDLER_INTERPRET 9536 cmp r0,#0 @ !0 means translation exists 9537 bxne r0 @ continue native execution if so 9538 b 2f @ branch over to use the interpreter 9539 9540/* 9541 * Return from the translation cache and immediately request 9542 * a translation for the exit target. Commonly used following 9543 * invokes. 9544 */ 9545 .global dvmJitToInterpTraceSelect 9546dvmJitToInterpTraceSelect: 9547 ldr rPC,[lr, #-1] @ get our target PC 9548 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9549 add rINST,lr,#-5 @ save start of chain branch 9550 add rINST, #-4 @ .. which is 9 bytes back 9551 mov r0,rPC 9552 bl dvmJitGetCodeAddr @ Is there a translation? 9553 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9554 cmp r0,#0 9555 beq 2f 9556 mov r1,rINST 9557 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9558 mov r1, rPC @ arg1 of translation may need this 9559 mov lr, #0 @ in case target is HANDLER_INTERPRET 9560 cmp r0,#0 @ successful chain? 9561 bxne r0 @ continue native execution 9562 b toInterpreter @ didn't chain - resume with interpreter 9563 9564/* No translation, so request one if profiling isn't disabled*/ 95652: 9566 adrl rIBASE, dvmAsmInstructionStart 9567 GET_JIT_PROF_TABLE(r0) 9568 FETCH_INST() 9569 cmp r0, #0 9570 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9571 bne common_selectTrace 9572 GET_INST_OPCODE(ip) 9573 GOTO_OPCODE(ip) 9574 9575/* 9576 * Return from the translation cache to the interpreter. 9577 * The return was done with a BLX from thumb mode, and 9578 * the following 32-bit word contains the target rPC value. 9579 * Note that lr (r14) will have its low-order bit set to denote 9580 * its thumb-mode origin. 9581 * 9582 * We'll need to stash our lr origin away, recover the new 9583 * target and then check to see if there is a translation available 9584 * for our new target. If so, we do a translation chain and 9585 * go back to native execution. Otherwise, it's back to the 9586 * interpreter (after treating this entry as a potential 9587 * trace start). 9588 */ 9589 .global dvmJitToInterpNormal 9590dvmJitToInterpNormal: 9591 ldr rPC,[lr, #-1] @ get our target PC 9592 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9593 add rINST,lr,#-5 @ save start of chain branch 9594 add rINST,#-4 @ .. which is 9 bytes back 9595#if defined(WITH_JIT_TUNING) 9596 bl dvmBumpNormal 9597#endif 9598 mov r0,rPC 9599 bl dvmJitGetCodeAddr @ Is there a translation? 9600 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9601 cmp r0,#0 9602 beq toInterpreter @ go if not, otherwise do chain 9603 mov r1,rINST 9604 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9605 mov r1, rPC @ arg1 of translation may need this 9606 mov lr, #0 @ in case target is HANDLER_INTERPRET 9607 cmp r0,#0 @ successful chain? 9608 bxne r0 @ continue native execution 9609 b toInterpreter @ didn't chain - resume with interpreter 9610 9611/* 9612 * Return from the translation cache to the interpreter to do method invocation. 9613 * Check if translation exists for the callee, but don't chain to it. 9614 */ 9615 .global dvmJitToInterpNoChainNoProfile 9616dvmJitToInterpNoChainNoProfile: 9617#if defined(WITH_JIT_TUNING) 9618 bl dvmBumpNoChain 9619#endif 9620 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9621 mov r0,rPC 9622 bl dvmJitGetCodeAddr @ Is there a translation? 9623 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9624 mov r1, rPC @ arg1 of translation may need this 9625 mov lr, #0 @ in case target is HANDLER_INTERPRET 9626 cmp r0,#0 9627 bxne r0 @ continue native execution if so 9628 EXPORT_PC() 9629 adrl rIBASE, dvmAsmInstructionStart 9630 FETCH_INST() 9631 GET_INST_OPCODE(ip) @ extract opcode from rINST 9632 GOTO_OPCODE(ip) @ jump to next instruction 9633 9634/* 9635 * Return from the translation cache to the interpreter to do method invocation. 9636 * Check if translation exists for the callee, but don't chain to it. 9637 */ 9638 .global dvmJitToInterpNoChain 9639dvmJitToInterpNoChain: 9640#if defined(WITH_JIT_TUNING) 9641 bl dvmBumpNoChain 9642#endif 9643 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9644 mov r0,rPC 9645 bl dvmJitGetCodeAddr @ Is there a translation? 9646 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9647 mov r1, rPC @ arg1 of translation may need this 9648 mov lr, #0 @ in case target is HANDLER_INTERPRET 9649 cmp r0,#0 9650 bxne r0 @ continue native execution if so 9651#endif 9652 9653/* 9654 * No translation, restore interpreter regs and start interpreting. 9655 * rGLUE & rFP were preserved in the translated code, and rPC has 9656 * already been restored by the time we get here. We'll need to set 9657 * up rIBASE & rINST, and load the address of the JitTable into r0. 9658 */ 9659toInterpreter: 9660 EXPORT_PC() 9661 adrl rIBASE, dvmAsmInstructionStart 9662 FETCH_INST() 9663 GET_JIT_PROF_TABLE(r0) 9664 @ NOTE: intended fallthrough 9665 9666/* 9667 * Common code to update potential trace start counter, and initiate 9668 * a trace-build if appropriate. On entry, rPC should point to the 9669 * next instruction to execute, and rINST should be already loaded with 9670 * the next opcode word, and r0 holds a pointer to the jit profile 9671 * table (pJitProfTable). 9672 */ 9673common_testUpdateProfile: 9674 cmp r0,#0 9675 GET_INST_OPCODE(ip) 9676 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9677 9678common_updateProfile: 9679 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9680 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9681 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9682 GET_INST_OPCODE(ip) 9683 subs r1,r1,#1 @ decrement counter 9684 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9685 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9686 9687/* 9688 * Here, we switch to the debug interpreter to request 9689 * trace selection. First, though, check to see if there 9690 * is already a native translation in place (and, if so, 9691 * jump to it now). 9692 */ 9693 GET_JIT_THRESHOLD(r1) 9694 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9695 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9696 EXPORT_PC() 9697 mov r0,rPC 9698 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9699 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9700 mov r1, rPC @ arg1 of translation may need this 9701 mov lr, #0 @ in case target is HANDLER_INTERPRET 9702 cmp r0,#0 9703#if !defined(WITH_SELF_VERIFICATION) 9704 bxne r0 @ jump to the translation 9705 mov r2,#kJitTSelectRequest @ ask for trace selection 9706 @ fall-through to common_selectTrace 9707#else 9708 moveq r2,#kJitTSelectRequest @ ask for trace selection 9709 beq common_selectTrace 9710 /* 9711 * At this point, we have a target translation. However, if 9712 * that translation is actually the interpret-only pseudo-translation 9713 * we want to treat it the same as no translation. 9714 */ 9715 mov r10, r0 @ save target 9716 bl dvmCompilerGetInterpretTemplate 9717 cmp r0, r10 @ special case? 9718 bne jitSVShadowRunStart @ set up self verification shadow space 9719 GET_INST_OPCODE(ip) 9720 GOTO_OPCODE(ip) 9721 /* no return */ 9722#endif 9723 9724/* 9725 * On entry: 9726 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9727 */ 9728common_selectTrace: 9729 str r2,[rGLUE,#offGlue_jitState] 9730 mov r2,#kInterpEntryInstr @ normal entry reason 9731 str r2,[rGLUE,#offGlue_entryPoint] 9732 mov r1,#1 @ set changeInterp 9733 b common_gotoBail 9734 9735#if defined(WITH_SELF_VERIFICATION) 9736/* 9737 * Save PC and registers to shadow memory for self verification mode 9738 * before jumping to native translation. 9739 * On entry: 9740 * rPC, rFP, rGLUE: the values that they should contain 9741 * r10: the address of the target translation. 9742 */ 9743jitSVShadowRunStart: 9744 mov r0,rPC @ r0<- program counter 9745 mov r1,rFP @ r1<- frame pointer 9746 mov r2,rGLUE @ r2<- InterpState pointer 9747 mov r3,r10 @ r3<- target translation 9748 bl dvmSelfVerificationSaveState @ save registers to shadow space 9749 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9750 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9751 bx r10 @ jump to the translation 9752 9753/* 9754 * Restore PC, registers, and interpState to original values 9755 * before jumping back to the interpreter. 9756 */ 9757jitSVShadowRunEnd: 9758 mov r1,rFP @ pass ending fp 9759 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9760 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9761 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9762 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9763 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9764 cmp r1,#0 @ check for punt condition 9765 beq 1f 9766 mov r2,#kJitSelfVerification @ ask for self verification 9767 str r2,[rGLUE,#offGlue_jitState] 9768 mov r2,#kInterpEntryInstr @ normal entry reason 9769 str r2,[rGLUE,#offGlue_entryPoint] 9770 mov r1,#1 @ set changeInterp 9771 b common_gotoBail 9772 97731: @ exit to interpreter without check 9774 EXPORT_PC() 9775 adrl rIBASE, dvmAsmInstructionStart 9776 FETCH_INST() 9777 GET_INST_OPCODE(ip) 9778 GOTO_OPCODE(ip) 9779#endif 9780 9781#endif 9782 9783/* 9784 * Common code when a backward branch is taken. 9785 * 9786 * TODO: we could avoid a branch by just setting r0 and falling through 9787 * into the common_periodicChecks code, and having a test on r0 at the 9788 * end determine if we should return to the caller or update & branch to 9789 * the next instr. 9790 * 9791 * On entry: 9792 * r9 is PC adjustment *in bytes* 9793 */ 9794common_backwardBranch: 9795 mov r0, #kInterpEntryInstr 9796 bl common_periodicChecks 9797#if defined(WITH_JIT) 9798 GET_JIT_PROF_TABLE(r0) 9799 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9800 cmp r0,#0 9801 bne common_updateProfile 9802 GET_INST_OPCODE(ip) 9803 GOTO_OPCODE(ip) 9804#else 9805 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9806 GET_INST_OPCODE(ip) @ extract opcode from rINST 9807 GOTO_OPCODE(ip) @ jump to next instruction 9808#endif 9809 9810 9811/* 9812 * Need to see if the thread needs to be suspended or debugger/profiler 9813 * activity has begun. If so, we suspend the thread or side-exit to 9814 * the debug interpreter as appropriate. 9815 * 9816 * The common case is no activity on any of these, so we want to figure 9817 * that out quickly. If something is up, we can then sort out what. 9818 * 9819 * We want to be fast if the VM was built without debugger or profiler 9820 * support, but we also need to recognize that the system is usually 9821 * shipped with both of these enabled. 9822 * 9823 * TODO: reduce this so we're just checking a single location. 9824 * 9825 * On entry: 9826 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 9827 * r9 is trampoline PC adjustment *in bytes* 9828 */ 9829common_periodicChecks: 9830 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9831 9832 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9833 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9834 9835 ldr ip, [r3] @ ip<- suspendCount (int) 9836 9837 cmp r1, #0 @ debugger enabled? 9838 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9839 ldr r2, [r2] @ r2<- activeProfilers (int) 9840 orrnes ip, ip, r1 @ ip<- suspendCount | debuggerActive 9841 /* 9842 * Don't switch the interpreter in the libdvm_traceview build even if the 9843 * profiler is active. 9844 * The code here is opted for less intrusion instead of performance. 9845 * That is, *pActiveProfilers is still loaded into r2 even though it is not 9846 * used when WITH_INLINE_PROFILING is defined. 9847 */ 9848#if !defined(WITH_INLINE_PROFILING) 9849 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 9850#endif 9851 9852 9853 bxeq lr @ all zero, return 9854 9855 /* 9856 * One or more interesting events have happened. Figure out what. 9857 * 9858 * If debugging or profiling are compiled in, we need to disambiguate. 9859 * 9860 * r0 still holds the reentry type. 9861 */ 9862 ldr ip, [r3] @ ip<- suspendCount (int) 9863 cmp ip, #0 @ want suspend? 9864 beq 1f @ no, must be debugger/profiler 9865 9866 stmfd sp!, {r0, lr} @ preserve r0 and lr 9867#if defined(WITH_JIT) 9868 /* 9869 * Refresh the Jit's cached copy of profile table pointer. This pointer 9870 * doubles as the Jit's on/off switch. 9871 */ 9872 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9873 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9874 ldr r3, [r3] @ r3 <- pJitProfTable 9875 EXPORT_PC() @ need for precise GC 9876 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9877#else 9878 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9879 EXPORT_PC() @ need for precise GC 9880#endif 9881 bl dvmCheckSuspendPending @ do full check, suspend if necessary 9882 ldmfd sp!, {r0, lr} @ restore r0 and lr 9883 9884 /* 9885 * Reload the debugger/profiler enable flags. We're checking to see 9886 * if either of these got set while we were suspended. 9887 * 9888 * If WITH_INLINE_PROFILING is configured, don't check whether the profiler 9889 * is enabled or not as the profiling will be done inline. 9890 */ 9891 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9892 cmp r1, #0 @ debugger enabled? 9893 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9894 9895#if !defined(WITH_INLINE_PROFILING) 9896 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9897 ldr r2, [r2] @ r2<- activeProfilers (int) 9898 orrs r1, r1, r2 9899#else 9900 cmp r1, #0 @ only consult the debuggerActive flag 9901#endif 9902 9903 beq 2f 9904 99051: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 9906 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 9907 add rPC, rPC, r9 @ update rPC 9908 mov r1, #1 @ "want switch" = true 9909 b common_gotoBail @ side exit 9910 99112: 9912 bx lr @ nothing to do, return 9913 9914 9915/* 9916 * The equivalent of "goto bail", this calls through the "bail handler". 9917 * 9918 * State registers will be saved to the "glue" area before bailing. 9919 * 9920 * On entry: 9921 * r1 is "bool changeInterp", indicating if we want to switch to the 9922 * other interpreter or just bail all the way out 9923 */ 9924common_gotoBail: 9925 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9926 mov r0, rGLUE @ r0<- glue ptr 9927 b dvmMterpStdBail @ call(glue, changeInterp) 9928 9929 @add r1, r1, #1 @ using (boolean+1) 9930 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9931 @bl _longjmp @ does not return 9932 @bl common_abort 9933 9934 9935/* 9936 * Common code for method invocation with range. 9937 * 9938 * On entry: 9939 * r0 is "Method* methodToCall", the method we're trying to call 9940 */ 9941common_invokeMethodRange: 9942.LinvokeNewRange: 9943 @ prepare to copy args to "outs" area of current frame 9944 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9945 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9946 beq .LinvokeArgsDone @ if no args, skip the rest 9947 FETCH(r1, 2) @ r1<- CCCC 9948 9949 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9950 @ (very few methods have > 10 args; could unroll for common cases) 9951 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9952 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9953 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 99541: ldr r1, [r3], #4 @ val = *fp++ 9955 subs r2, r2, #1 @ count-- 9956 str r1, [r10], #4 @ *outs++ = val 9957 bne 1b @ ...while count != 0 9958 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9959 b .LinvokeArgsDone 9960 9961/* 9962 * Common code for method invocation without range. 9963 * 9964 * On entry: 9965 * r0 is "Method* methodToCall", the method we're trying to call 9966 */ 9967common_invokeMethodNoRange: 9968.LinvokeNewNoRange: 9969 @ prepare to copy args to "outs" area of current frame 9970 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9971 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9972 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9973 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9974 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9975 beq .LinvokeArgsDone 9976 9977 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9978.LinvokeNonRange: 9979 rsb r2, r2, #5 @ r2<- 5-r2 9980 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9981 bl common_abort @ (skipped due to ARM prefetch) 99825: and ip, rINST, #0x0f00 @ isolate A 9983 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9984 mov r0, r0 @ nop 9985 str r2, [r10, #-4]! @ *--outs = vA 99864: and ip, r1, #0xf000 @ isolate G 9987 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9988 mov r0, r0 @ nop 9989 str r2, [r10, #-4]! @ *--outs = vG 99903: and ip, r1, #0x0f00 @ isolate F 9991 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9992 mov r0, r0 @ nop 9993 str r2, [r10, #-4]! @ *--outs = vF 99942: and ip, r1, #0x00f0 @ isolate E 9995 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9996 mov r0, r0 @ nop 9997 str r2, [r10, #-4]! @ *--outs = vE 99981: and ip, r1, #0x000f @ isolate D 9999 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 10000 mov r0, r0 @ nop 10001 str r2, [r10, #-4]! @ *--outs = vD 100020: @ fall through to .LinvokeArgsDone 10003 10004.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 10005 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 10006 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 10007 @ find space for the new stack frame, check for overflow 10008 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 10009 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 10010 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 10011@ bl common_dumpRegs 10012 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 10013 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 10014 cmp r3, r9 @ bottom < interpStackEnd? 10015 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 10016 blo .LstackOverflow @ yes, this frame will overflow stack 10017 10018 @ set up newSaveArea 10019#ifdef EASY_GDB 10020 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 10021 str ip, [r10, #offStackSaveArea_prevSave] 10022#endif 10023 str rFP, [r10, #offStackSaveArea_prevFrame] 10024 str rPC, [r10, #offStackSaveArea_savedPc] 10025#if defined(WITH_JIT) 10026 mov r9, #0 10027 str r9, [r10, #offStackSaveArea_returnAddr] 10028#endif 10029#if defined(WITH_INLINE_PROFILING) 10030 stmfd sp!, {r0-r3} @ preserve r0-r3 10031 mov r1, r6 10032 @ r0=methodToCall, r1=rGlue 10033 bl dvmFastMethodTraceEnter 10034 ldmfd sp!, {r0-r3} @ restore r0-r3 10035#endif 10036 str r0, [r10, #offStackSaveArea_method] 10037 tst r3, #ACC_NATIVE 10038 bne .LinvokeNative 10039 10040 /* 10041 stmfd sp!, {r0-r3} 10042 bl common_printNewline 10043 mov r0, rFP 10044 mov r1, #0 10045 bl dvmDumpFp 10046 ldmfd sp!, {r0-r3} 10047 stmfd sp!, {r0-r3} 10048 mov r0, r1 10049 mov r1, r10 10050 bl dvmDumpFp 10051 bl common_printNewline 10052 ldmfd sp!, {r0-r3} 10053 */ 10054 10055 ldrh r9, [r2] @ r9 <- load INST from new PC 10056 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10057 mov rPC, r2 @ publish new rPC 10058 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10059 10060 @ Update "glue" values for the new method 10061 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10062 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10063 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10064#if defined(WITH_JIT) 10065 GET_JIT_PROF_TABLE(r0) 10066 mov rFP, r1 @ fp = newFp 10067 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10068 mov rINST, r9 @ publish new rINST 10069 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10070 cmp r0,#0 10071 bne common_updateProfile 10072 GOTO_OPCODE(ip) @ jump to next instruction 10073#else 10074 mov rFP, r1 @ fp = newFp 10075 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10076 mov rINST, r9 @ publish new rINST 10077 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10078 GOTO_OPCODE(ip) @ jump to next instruction 10079#endif 10080 10081.LinvokeNative: 10082 @ Prep for the native call 10083 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10084 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10085 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10086 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10087 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10088 mov r9, r3 @ r9<- glue->self (preserve) 10089 10090 mov r2, r0 @ r2<- methodToCall 10091 mov r0, r1 @ r0<- newFp (points to args) 10092 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10093 10094#ifdef ASSIST_DEBUGGER 10095 /* insert fake function header to help gdb find the stack frame */ 10096 b .Lskip 10097 .type dalvik_mterp, %function 10098dalvik_mterp: 10099 .fnstart 10100 MTERP_ENTRY1 10101 MTERP_ENTRY2 10102.Lskip: 10103#endif 10104 10105#if defined(WITH_INLINE_PROFILING) 10106 @ r2=JNIMethod, r6=rGLUE 10107 stmfd sp!, {r2,r6} 10108#endif 10109 10110 @mov lr, pc @ set return addr 10111 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10112 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10113 10114#if defined(WITH_INLINE_PROFILING) 10115 @ r0=JNIMethod, r1=rGLUE 10116 ldmfd sp!, {r0-r1} 10117 bl dvmFastNativeMethodTraceExit 10118#endif 10119 10120#if defined(WITH_JIT) 10121 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10122#endif 10123 10124 @ native return; r9=self, r10=newSaveArea 10125 @ equivalent to dvmPopJniLocals 10126 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10127 ldr r1, [r9, #offThread_exception] @ check for exception 10128#if defined(WITH_JIT) 10129 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10130#endif 10131 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10132 cmp r1, #0 @ null? 10133 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10134#if defined(WITH_JIT) 10135 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10136#endif 10137 bne common_exceptionThrown @ no, handle exception 10138 10139 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10140 GET_INST_OPCODE(ip) @ extract opcode from rINST 10141 GOTO_OPCODE(ip) @ jump to next instruction 10142 10143.LstackOverflow: @ r0=methodToCall 10144 mov r1, r0 @ r1<- methodToCall 10145 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10146 bl dvmHandleStackOverflow 10147 b common_exceptionThrown 10148#ifdef ASSIST_DEBUGGER 10149 .fnend 10150#endif 10151 10152 10153 /* 10154 * Common code for method invocation, calling through "glue code". 10155 * 10156 * TODO: now that we have range and non-range invoke handlers, this 10157 * needs to be split into two. Maybe just create entry points 10158 * that set r9 and jump here? 10159 * 10160 * On entry: 10161 * r0 is "Method* methodToCall", the method we're trying to call 10162 * r9 is "bool methodCallRange", indicating if this is a /range variant 10163 */ 10164 .if 0 10165.LinvokeOld: 10166 sub sp, sp, #8 @ space for args + pad 10167 FETCH(ip, 2) @ ip<- FEDC or CCCC 10168 mov r2, r0 @ A2<- methodToCall 10169 mov r0, rGLUE @ A0<- glue 10170 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10171 mov r1, r9 @ A1<- methodCallRange 10172 mov r3, rINST, lsr #8 @ A3<- AA 10173 str ip, [sp, #0] @ A4<- ip 10174 bl dvmMterp_invokeMethod @ call the C invokeMethod 10175 add sp, sp, #8 @ remove arg area 10176 b common_resumeAfterGlueCall @ continue to next instruction 10177 .endif 10178 10179 10180 10181/* 10182 * Common code for handling a return instruction. 10183 * 10184 * This does not return. 10185 */ 10186common_returnFromMethod: 10187.LreturnNew: 10188 mov r0, #kInterpEntryReturn 10189 mov r9, #0 10190 bl common_periodicChecks 10191 10192#if defined(WITH_INLINE_PROFILING) 10193 stmfd sp!, {r0-r3} @ preserve r0-r3 10194 mov r0, r6 10195 @ r0=rGlue 10196 bl dvmFastJavaMethodTraceExit 10197 ldmfd sp!, {r0-r3} @ restore r0-r3 10198#endif 10199 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10200 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10201 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10202 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10203 @ r2<- method we're returning to 10204 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10205 cmp r2, #0 @ is this a break frame? 10206 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10207 mov r1, #0 @ "want switch" = false 10208 beq common_gotoBail @ break frame, bail out completely 10209 10210 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10211 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10212 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10213 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10214#if defined(WITH_JIT) 10215 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10216 mov rPC, r9 @ publish new rPC 10217 str r1, [rGLUE, #offGlue_methodClassDex] 10218 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10219 cmp r10, #0 @ caller is compiled code 10220 blxne r10 10221 GET_INST_OPCODE(ip) @ extract opcode from rINST 10222 GOTO_OPCODE(ip) @ jump to next instruction 10223#else 10224 GET_INST_OPCODE(ip) @ extract opcode from rINST 10225 mov rPC, r9 @ publish new rPC 10226 str r1, [rGLUE, #offGlue_methodClassDex] 10227 GOTO_OPCODE(ip) @ jump to next instruction 10228#endif 10229 10230 /* 10231 * Return handling, calls through "glue code". 10232 */ 10233 .if 0 10234.LreturnOld: 10235 SAVE_PC_FP_TO_GLUE() @ export state 10236 mov r0, rGLUE @ arg to function 10237 bl dvmMterp_returnFromMethod 10238 b common_resumeAfterGlueCall 10239 .endif 10240 10241 10242/* 10243 * Somebody has thrown an exception. Handle it. 10244 * 10245 * If the exception processing code returns to us (instead of falling 10246 * out of the interpreter), continue with whatever the next instruction 10247 * now happens to be. 10248 * 10249 * This does not return. 10250 */ 10251 .global dvmMterpCommonExceptionThrown 10252dvmMterpCommonExceptionThrown: 10253common_exceptionThrown: 10254.LexceptionNew: 10255 mov r0, #kInterpEntryThrow 10256 mov r9, #0 10257 bl common_periodicChecks 10258 10259 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10260 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10261 mov r1, r10 @ r1<- self 10262 mov r0, r9 @ r0<- exception 10263 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10264 mov r3, #0 @ r3<- NULL 10265 str r3, [r10, #offThread_exception] @ self->exception = NULL 10266 10267 /* set up args and a local for "&fp" */ 10268 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10269 str rFP, [sp, #-4]! @ *--sp = fp 10270 mov ip, sp @ ip<- &fp 10271 mov r3, #0 @ r3<- false 10272 str ip, [sp, #-4]! @ *--sp = &fp 10273 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10274 mov r0, r10 @ r0<- self 10275 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10276 mov r2, r9 @ r2<- exception 10277 sub r1, rPC, r1 @ r1<- pc - method->insns 10278 mov r1, r1, asr #1 @ r1<- offset in code units 10279 10280 /* call, r0 gets catchRelPc (a code-unit offset) */ 10281 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10282 10283 /* fix earlier stack overflow if necessary; may trash rFP */ 10284 ldrb r1, [r10, #offThread_stackOverflowed] 10285 cmp r1, #0 @ did we overflow earlier? 10286 beq 1f @ no, skip ahead 10287 mov rFP, r0 @ save relPc result in rFP 10288 mov r0, r10 @ r0<- self 10289 mov r1, r9 @ r1<- exception 10290 bl dvmCleanupStackOverflow @ call(self) 10291 mov r0, rFP @ restore result 102921: 10293 10294 /* update frame pointer and check result from dvmFindCatchBlock */ 10295 ldr rFP, [sp, #4] @ retrieve the updated rFP 10296 cmp r0, #0 @ is catchRelPc < 0? 10297 add sp, sp, #8 @ restore stack 10298 bmi .LnotCaughtLocally 10299 10300 /* adjust locals to match self->curFrame and updated PC */ 10301 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10302 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10303 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10304 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10305 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10306 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10307 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10308 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10309 10310 /* release the tracked alloc on the exception */ 10311 mov r0, r9 @ r0<- exception 10312 mov r1, r10 @ r1<- self 10313 bl dvmReleaseTrackedAlloc @ release the exception 10314 10315 /* restore the exception if the handler wants it */ 10316 FETCH_INST() @ load rINST from rPC 10317 GET_INST_OPCODE(ip) @ extract opcode from rINST 10318 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10319 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10320 GOTO_OPCODE(ip) @ jump to next instruction 10321 10322.LnotCaughtLocally: @ r9=exception, r10=self 10323 /* fix stack overflow if necessary */ 10324 ldrb r1, [r10, #offThread_stackOverflowed] 10325 cmp r1, #0 @ did we overflow earlier? 10326 movne r0, r10 @ if yes: r0<- self 10327 movne r1, r9 @ if yes: r1<- exception 10328 blne dvmCleanupStackOverflow @ if yes: call(self) 10329 10330 @ may want to show "not caught locally" debug messages here 10331#if DVM_SHOW_EXCEPTION >= 2 10332 /* call __android_log_print(prio, tag, format, ...) */ 10333 /* "Exception %s from %s:%d not caught locally" */ 10334 @ dvmLineNumFromPC(method, pc - method->insns) 10335 ldr r0, [rGLUE, #offGlue_method] 10336 ldr r1, [r0, #offMethod_insns] 10337 sub r1, rPC, r1 10338 asr r1, r1, #1 10339 bl dvmLineNumFromPC 10340 str r0, [sp, #-4]! 10341 @ dvmGetMethodSourceFile(method) 10342 ldr r0, [rGLUE, #offGlue_method] 10343 bl dvmGetMethodSourceFile 10344 str r0, [sp, #-4]! 10345 @ exception->clazz->descriptor 10346 ldr r3, [r9, #offObject_clazz] 10347 ldr r3, [r3, #offClassObject_descriptor] 10348 @ 10349 ldr r2, strExceptionNotCaughtLocally 10350 ldr r1, strLogTag 10351 mov r0, #3 @ LOG_DEBUG 10352 bl __android_log_print 10353#endif 10354 str r9, [r10, #offThread_exception] @ restore exception 10355 mov r0, r9 @ r0<- exception 10356 mov r1, r10 @ r1<- self 10357 bl dvmReleaseTrackedAlloc @ release the exception 10358 mov r1, #0 @ "want switch" = false 10359 b common_gotoBail @ bail out 10360 10361 10362 /* 10363 * Exception handling, calls through "glue code". 10364 */ 10365 .if 0 10366.LexceptionOld: 10367 SAVE_PC_FP_TO_GLUE() @ export state 10368 mov r0, rGLUE @ arg to function 10369 bl dvmMterp_exceptionThrown 10370 b common_resumeAfterGlueCall 10371 .endif 10372 10373 10374/* 10375 * After returning from a "glued" function, pull out the updated 10376 * values and start executing at the next instruction. 10377 */ 10378common_resumeAfterGlueCall: 10379 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10380 FETCH_INST() @ load rINST from rPC 10381 GET_INST_OPCODE(ip) @ extract opcode from rINST 10382 GOTO_OPCODE(ip) @ jump to next instruction 10383 10384/* 10385 * Invalid array index. 10386 */ 10387common_errArrayIndex: 10388 EXPORT_PC() 10389 ldr r0, strArrayIndexException 10390 mov r1, #0 10391 bl dvmThrowException 10392 b common_exceptionThrown 10393 10394/* 10395 * Invalid array value. 10396 */ 10397common_errArrayStore: 10398 EXPORT_PC() 10399 ldr r0, strArrayStoreException 10400 mov r1, #0 10401 bl dvmThrowException 10402 b common_exceptionThrown 10403 10404/* 10405 * Integer divide or mod by zero. 10406 */ 10407common_errDivideByZero: 10408 EXPORT_PC() 10409 ldr r0, strArithmeticException 10410 ldr r1, strDivideByZero 10411 bl dvmThrowException 10412 b common_exceptionThrown 10413 10414/* 10415 * Attempt to allocate an array with a negative size. 10416 */ 10417common_errNegativeArraySize: 10418 EXPORT_PC() 10419 ldr r0, strNegativeArraySizeException 10420 mov r1, #0 10421 bl dvmThrowException 10422 b common_exceptionThrown 10423 10424/* 10425 * Invocation of a non-existent method. 10426 */ 10427common_errNoSuchMethod: 10428 EXPORT_PC() 10429 ldr r0, strNoSuchMethodError 10430 mov r1, #0 10431 bl dvmThrowException 10432 b common_exceptionThrown 10433 10434/* 10435 * We encountered a null object when we weren't expecting one. We 10436 * export the PC, throw a NullPointerException, and goto the exception 10437 * processing code. 10438 */ 10439common_errNullObject: 10440 EXPORT_PC() 10441 ldr r0, strNullPointerException 10442 mov r1, #0 10443 bl dvmThrowException 10444 b common_exceptionThrown 10445 10446/* 10447 * For debugging, cause an immediate fault. The source address will 10448 * be in lr (use a bl instruction to jump here). 10449 */ 10450common_abort: 10451 ldr pc, .LdeadFood 10452.LdeadFood: 10453 .word 0xdeadf00d 10454 10455/* 10456 * Spit out a "we were here", preserving all registers. (The attempt 10457 * to save ip won't work, but we need to save an even number of 10458 * registers for EABI 64-bit stack alignment.) 10459 */ 10460 .macro SQUEAK num 10461common_squeak\num: 10462 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10463 ldr r0, strSqueak 10464 mov r1, #\num 10465 bl printf 10466 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10467 bx lr 10468 .endm 10469 10470 SQUEAK 0 10471 SQUEAK 1 10472 SQUEAK 2 10473 SQUEAK 3 10474 SQUEAK 4 10475 SQUEAK 5 10476 10477/* 10478 * Spit out the number in r0, preserving registers. 10479 */ 10480common_printNum: 10481 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10482 mov r1, r0 10483 ldr r0, strSqueak 10484 bl printf 10485 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10486 bx lr 10487 10488/* 10489 * Print a newline, preserving registers. 10490 */ 10491common_printNewline: 10492 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10493 ldr r0, strNewline 10494 bl printf 10495 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10496 bx lr 10497 10498 /* 10499 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10500 */ 10501common_printHex: 10502 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10503 mov r1, r0 10504 ldr r0, strPrintHex 10505 bl printf 10506 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10507 bx lr 10508 10509/* 10510 * Print the 64-bit quantity in r0-r1, preserving registers. 10511 */ 10512common_printLong: 10513 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10514 mov r3, r1 10515 mov r2, r0 10516 ldr r0, strPrintLong 10517 bl printf 10518 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10519 bx lr 10520 10521/* 10522 * Print full method info. Pass the Method* in r0. Preserves regs. 10523 */ 10524common_printMethod: 10525 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10526 bl dvmMterpPrintMethod 10527 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10528 bx lr 10529 10530/* 10531 * Call a C helper function that dumps regs and possibly some 10532 * additional info. Requires the C function to be compiled in. 10533 */ 10534 .if 0 10535common_dumpRegs: 10536 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10537 bl dvmMterpDumpArmRegs 10538 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10539 bx lr 10540 .endif 10541 10542#if 0 10543/* 10544 * Experiment on VFP mode. 10545 * 10546 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10547 * 10548 * Updates the bits specified by "mask", setting them to the values in "val". 10549 */ 10550setFPSCR: 10551 and r0, r0, r1 @ make sure no stray bits are set 10552 fmrx r2, fpscr @ get VFP reg 10553 mvn r1, r1 @ bit-invert mask 10554 and r2, r2, r1 @ clear masked bits 10555 orr r2, r2, r0 @ set specified bits 10556 fmxr fpscr, r2 @ set VFP reg 10557 mov r0, r2 @ return new value 10558 bx lr 10559 10560 .align 2 10561 .global dvmConfigureFP 10562 .type dvmConfigureFP, %function 10563dvmConfigureFP: 10564 stmfd sp!, {ip, lr} 10565 /* 0x03000000 sets DN/FZ */ 10566 /* 0x00009f00 clears the six exception enable flags */ 10567 bl common_squeak0 10568 mov r0, #0x03000000 @ r0<- 0x03000000 10569 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10570 bl setFPSCR 10571 ldmfd sp!, {ip, pc} 10572#endif 10573 10574 10575/* 10576 * String references, must be close to the code that uses them. 10577 */ 10578 .align 2 10579strArithmeticException: 10580 .word .LstrArithmeticException 10581strArrayIndexException: 10582 .word .LstrArrayIndexException 10583strArrayStoreException: 10584 .word .LstrArrayStoreException 10585strDivideByZero: 10586 .word .LstrDivideByZero 10587strNegativeArraySizeException: 10588 .word .LstrNegativeArraySizeException 10589strNoSuchMethodError: 10590 .word .LstrNoSuchMethodError 10591strNullPointerException: 10592 .word .LstrNullPointerException 10593 10594strLogTag: 10595 .word .LstrLogTag 10596strExceptionNotCaughtLocally: 10597 .word .LstrExceptionNotCaughtLocally 10598 10599strNewline: 10600 .word .LstrNewline 10601strSqueak: 10602 .word .LstrSqueak 10603strPrintHex: 10604 .word .LstrPrintHex 10605strPrintLong: 10606 .word .LstrPrintLong 10607 10608/* 10609 * Zero-terminated ASCII string data. 10610 * 10611 * On ARM we have two choices: do like gcc does, and LDR from a .word 10612 * with the address, or use an ADR pseudo-op to get the address 10613 * directly. ADR saves 4 bytes and an indirection, but it's using a 10614 * PC-relative addressing mode and hence has a limited range, which 10615 * makes it not work well with mergeable string sections. 10616 */ 10617 .section .rodata.str1.4,"aMS",%progbits,1 10618 10619.LstrBadEntryPoint: 10620 .asciz "Bad entry point %d\n" 10621.LstrArithmeticException: 10622 .asciz "Ljava/lang/ArithmeticException;" 10623.LstrArrayIndexException: 10624 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10625.LstrArrayStoreException: 10626 .asciz "Ljava/lang/ArrayStoreException;" 10627.LstrClassCastException: 10628 .asciz "Ljava/lang/ClassCastException;" 10629.LstrDivideByZero: 10630 .asciz "divide by zero" 10631.LstrFilledNewArrayNotImpl: 10632 .asciz "filled-new-array only implemented for objects and 'int'" 10633.LstrInternalError: 10634 .asciz "Ljava/lang/InternalError;" 10635.LstrInstantiationError: 10636 .asciz "Ljava/lang/InstantiationError;" 10637.LstrNegativeArraySizeException: 10638 .asciz "Ljava/lang/NegativeArraySizeException;" 10639.LstrNoSuchMethodError: 10640 .asciz "Ljava/lang/NoSuchMethodError;" 10641.LstrNullPointerException: 10642 .asciz "Ljava/lang/NullPointerException;" 10643 10644.LstrLogTag: 10645 .asciz "mterp" 10646.LstrExceptionNotCaughtLocally: 10647 .asciz "Exception %s from %s:%d not caught locally\n" 10648 10649.LstrNewline: 10650 .asciz "\n" 10651.LstrSqueak: 10652 .asciz "<%d>" 10653.LstrPrintHex: 10654 .asciz "<0x%x>" 10655.LstrPrintLong: 10656 .asciz "<%lld>" 10657 10658