InterpAsm-armv5te-vfp.S revision 978738d2cbf9d08fa78c65762eaac3351ab76b9a
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te-vfp'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204#if defined(WITH_JIT) 205#include "../common/jit-config.h" 206#endif 207 208/* File: armv5te/platform.S */ 209/* 210 * =========================================================================== 211 * CPU-version-specific defines 212 * =========================================================================== 213 */ 214 215/* 216 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 217 * one-way branch. 218 * 219 * May modify IP. Does not modify LR. 220 */ 221.macro LDR_PC source 222 ldr pc, \source 223.endm 224 225/* 226 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 227 * Jump to subroutine. 228 * 229 * May modify IP and LR. 230 */ 231.macro LDR_PC_LR source 232 mov lr, pc 233 ldr pc, \source 234.endm 235 236/* 237 * Macro for "LDMFD SP!, {...regs...,PC}". 238 * 239 * May modify IP and LR. 240 */ 241.macro LDMFD_PC regs 242 ldmfd sp!, {\regs,pc} 243.endm 244 245 246/* File: armv5te/entry.S */ 247/* 248 * Copyright (C) 2008 The Android Open Source Project 249 * 250 * Licensed under the Apache License, Version 2.0 (the "License"); 251 * you may not use this file except in compliance with the License. 252 * You may obtain a copy of the License at 253 * 254 * http://www.apache.org/licenses/LICENSE-2.0 255 * 256 * Unless required by applicable law or agreed to in writing, software 257 * distributed under the License is distributed on an "AS IS" BASIS, 258 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 259 * See the License for the specific language governing permissions and 260 * limitations under the License. 261 */ 262/* 263 * Interpreter entry point. 264 */ 265 266/* 267 * We don't have formal stack frames, so gdb scans upward in the code 268 * to find the start of the function (a label with the %function type), 269 * and then looks at the next few instructions to figure out what 270 * got pushed onto the stack. From this it figures out how to restore 271 * the registers, including PC, for the previous stack frame. If gdb 272 * sees a non-function label, it stops scanning, so either we need to 273 * have nothing but assembler-local labels between the entry point and 274 * the break, or we need to fake it out. 275 * 276 * When this is defined, we add some stuff to make gdb less confused. 277 */ 278#define ASSIST_DEBUGGER 1 279 280 .text 281 .align 2 282 .global dvmMterpStdRun 283 .type dvmMterpStdRun, %function 284 285/* 286 * On entry: 287 * r0 MterpGlue* glue 288 * 289 * This function returns a boolean "changeInterp" value. The return comes 290 * via a call to dvmMterpStdBail(). 291 */ 292dvmMterpStdRun: 293#define MTERP_ENTRY1 \ 294 .save {r4-r10,fp,lr}; \ 295 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 296#define MTERP_ENTRY2 \ 297 .pad #4; \ 298 sub sp, sp, #4 @ align 64 299 300 .fnstart 301 MTERP_ENTRY1 302 MTERP_ENTRY2 303 304 /* save stack pointer, add magic word for debuggerd */ 305 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 306 307 /* set up "named" registers, figure out entry point */ 308 mov rGLUE, r0 @ set rGLUE 309 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 310 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 311 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 312 cmp r1, #kInterpEntryInstr @ usual case? 313 bne .Lnot_instr @ no, handle it 314 315#if defined(WITH_JIT) 316.LentryInstr: 317 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 318 /* Entry is always a possible trace start */ 319 GET_JIT_PROF_TABLE(r0) 320 FETCH_INST() 321 mov r1, #0 @ prepare the value for the new state 322 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 323 cmp r0,#0 324 bne common_updateProfile 325 GET_INST_OPCODE(ip) 326 GOTO_OPCODE(ip) 327#else 328 /* start executing the instruction at rPC */ 329 FETCH_INST() @ load rINST from rPC 330 GET_INST_OPCODE(ip) @ extract opcode from rINST 331 GOTO_OPCODE(ip) @ jump to next instruction 332#endif 333 334.Lnot_instr: 335 cmp r1, #kInterpEntryReturn @ were we returning from a method? 336 beq common_returnFromMethod 337 338.Lnot_return: 339 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 340 beq common_exceptionThrown 341 342#if defined(WITH_JIT) 343.Lnot_throw: 344 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 345 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 346 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 347 bne .Lbad_arg 348 cmp rPC,r2 349 bne .LentryInstr @ must have branched, don't resume 350#if defined(WITH_SELF_VERIFICATION) 351 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 352 b jitSVShadowRunStart @ re-enter the translation after the 353 @ single-stepped instruction 354 @noreturn 355#endif 356 mov r1, #kInterpEntryInstr 357 str r1, [rGLUE, #offGlue_entryPoint] 358 bx r10 @ re-enter the translation 359#endif 360 361.Lbad_arg: 362 ldr r0, strBadEntryPoint 363 @ r1 holds value of entryPoint 364 bl printf 365 bl dvmAbort 366 .fnend 367 368 369 .global dvmMterpStdBail 370 .type dvmMterpStdBail, %function 371 372/* 373 * Restore the stack pointer and PC from the save point established on entry. 374 * This is essentially the same as a longjmp, but should be cheaper. The 375 * last instruction causes us to return to whoever called dvmMterpStdRun. 376 * 377 * We pushed some registers on the stack in dvmMterpStdRun, then saved 378 * SP and LR. Here we restore SP, restore the registers, and then restore 379 * LR to PC. 380 * 381 * On entry: 382 * r0 MterpGlue* glue 383 * r1 bool changeInterp 384 */ 385dvmMterpStdBail: 386 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 387 mov r0, r1 @ return the changeInterp value 388 add sp, sp, #4 @ un-align 64 389 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 390 391 392/* 393 * String references. 394 */ 395strBadEntryPoint: 396 .word .LstrBadEntryPoint 397 398 399 400 .global dvmAsmInstructionStart 401 .type dvmAsmInstructionStart, %function 402dvmAsmInstructionStart = .L_OP_NOP 403 .text 404 405/* ------------------------------ */ 406 .balign 64 407.L_OP_NOP: /* 0x00 */ 408/* File: armv5te/OP_NOP.S */ 409 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 410 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 411 GOTO_OPCODE(ip) @ execute it 412 413#ifdef ASSIST_DEBUGGER 414 /* insert fake function header to help gdb find the stack frame */ 415 .type dalvik_inst, %function 416dalvik_inst: 417 .fnstart 418 MTERP_ENTRY1 419 MTERP_ENTRY2 420 .fnend 421#endif 422 423 424/* ------------------------------ */ 425 .balign 64 426.L_OP_MOVE: /* 0x01 */ 427/* File: armv5te/OP_MOVE.S */ 428 /* for move, move-object, long-to-int */ 429 /* op vA, vB */ 430 mov r1, rINST, lsr #12 @ r1<- B from 15:12 431 mov r0, rINST, lsr #8 @ r0<- A from 11:8 432 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 433 GET_VREG(r2, r1) @ r2<- fp[B] 434 and r0, r0, #15 435 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 436 SET_VREG(r2, r0) @ fp[A]<- r2 437 GOTO_OPCODE(ip) @ execute next instruction 438 439 440/* ------------------------------ */ 441 .balign 64 442.L_OP_MOVE_FROM16: /* 0x02 */ 443/* File: armv5te/OP_MOVE_FROM16.S */ 444 /* for: move/from16, move-object/from16 */ 445 /* op vAA, vBBBB */ 446 FETCH(r1, 1) @ r1<- BBBB 447 mov r0, rINST, lsr #8 @ r0<- AA 448 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 449 GET_VREG(r2, r1) @ r2<- fp[BBBB] 450 GET_INST_OPCODE(ip) @ extract opcode from rINST 451 SET_VREG(r2, r0) @ fp[AA]<- r2 452 GOTO_OPCODE(ip) @ jump to next instruction 453 454 455/* ------------------------------ */ 456 .balign 64 457.L_OP_MOVE_16: /* 0x03 */ 458/* File: armv5te/OP_MOVE_16.S */ 459 /* for: move/16, move-object/16 */ 460 /* op vAAAA, vBBBB */ 461 FETCH(r1, 2) @ r1<- BBBB 462 FETCH(r0, 1) @ r0<- AAAA 463 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 464 GET_VREG(r2, r1) @ r2<- fp[BBBB] 465 GET_INST_OPCODE(ip) @ extract opcode from rINST 466 SET_VREG(r2, r0) @ fp[AAAA]<- r2 467 GOTO_OPCODE(ip) @ jump to next instruction 468 469 470/* ------------------------------ */ 471 .balign 64 472.L_OP_MOVE_WIDE: /* 0x04 */ 473/* File: armv5te/OP_MOVE_WIDE.S */ 474 /* move-wide vA, vB */ 475 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 476 mov r2, rINST, lsr #8 @ r2<- A(+) 477 mov r3, rINST, lsr #12 @ r3<- B 478 and r2, r2, #15 479 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 480 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 481 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 482 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 483 GET_INST_OPCODE(ip) @ extract opcode from rINST 484 stmia r2, {r0-r1} @ fp[A]<- r0/r1 485 GOTO_OPCODE(ip) @ jump to next instruction 486 487 488/* ------------------------------ */ 489 .balign 64 490.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 491/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 492 /* move-wide/from16 vAA, vBBBB */ 493 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 494 FETCH(r3, 1) @ r3<- BBBB 495 mov r2, rINST, lsr #8 @ r2<- AA 496 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 497 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 498 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 500 GET_INST_OPCODE(ip) @ extract opcode from rINST 501 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 502 GOTO_OPCODE(ip) @ jump to next instruction 503 504 505/* ------------------------------ */ 506 .balign 64 507.L_OP_MOVE_WIDE_16: /* 0x06 */ 508/* File: armv5te/OP_MOVE_WIDE_16.S */ 509 /* move-wide/16 vAAAA, vBBBB */ 510 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 511 FETCH(r3, 2) @ r3<- BBBB 512 FETCH(r2, 1) @ r2<- AAAA 513 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 514 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 515 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 516 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 517 GET_INST_OPCODE(ip) @ extract opcode from rINST 518 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 519 GOTO_OPCODE(ip) @ jump to next instruction 520 521 522/* ------------------------------ */ 523 .balign 64 524.L_OP_MOVE_OBJECT: /* 0x07 */ 525/* File: armv5te/OP_MOVE_OBJECT.S */ 526/* File: armv5te/OP_MOVE.S */ 527 /* for move, move-object, long-to-int */ 528 /* op vA, vB */ 529 mov r1, rINST, lsr #12 @ r1<- B from 15:12 530 mov r0, rINST, lsr #8 @ r0<- A from 11:8 531 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 532 GET_VREG(r2, r1) @ r2<- fp[B] 533 and r0, r0, #15 534 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 535 SET_VREG(r2, r0) @ fp[A]<- r2 536 GOTO_OPCODE(ip) @ execute next instruction 537 538 539 540/* ------------------------------ */ 541 .balign 64 542.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 543/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 544/* File: armv5te/OP_MOVE_FROM16.S */ 545 /* for: move/from16, move-object/from16 */ 546 /* op vAA, vBBBB */ 547 FETCH(r1, 1) @ r1<- BBBB 548 mov r0, rINST, lsr #8 @ r0<- AA 549 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 550 GET_VREG(r2, r1) @ r2<- fp[BBBB] 551 GET_INST_OPCODE(ip) @ extract opcode from rINST 552 SET_VREG(r2, r0) @ fp[AA]<- r2 553 GOTO_OPCODE(ip) @ jump to next instruction 554 555 556 557/* ------------------------------ */ 558 .balign 64 559.L_OP_MOVE_OBJECT_16: /* 0x09 */ 560/* File: armv5te/OP_MOVE_OBJECT_16.S */ 561/* File: armv5te/OP_MOVE_16.S */ 562 /* for: move/16, move-object/16 */ 563 /* op vAAAA, vBBBB */ 564 FETCH(r1, 2) @ r1<- BBBB 565 FETCH(r0, 1) @ r0<- AAAA 566 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 567 GET_VREG(r2, r1) @ r2<- fp[BBBB] 568 GET_INST_OPCODE(ip) @ extract opcode from rINST 569 SET_VREG(r2, r0) @ fp[AAAA]<- r2 570 GOTO_OPCODE(ip) @ jump to next instruction 571 572 573 574/* ------------------------------ */ 575 .balign 64 576.L_OP_MOVE_RESULT: /* 0x0a */ 577/* File: armv5te/OP_MOVE_RESULT.S */ 578 /* for: move-result, move-result-object */ 579 /* op vAA */ 580 mov r2, rINST, lsr #8 @ r2<- AA 581 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 582 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 583 GET_INST_OPCODE(ip) @ extract opcode from rINST 584 SET_VREG(r0, r2) @ fp[AA]<- r0 585 GOTO_OPCODE(ip) @ jump to next instruction 586 587 588/* ------------------------------ */ 589 .balign 64 590.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 591/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 592 /* move-result-wide vAA */ 593 mov r2, rINST, lsr #8 @ r2<- AA 594 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 595 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 596 ldmia r3, {r0-r1} @ r0/r1<- retval.j 597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 598 GET_INST_OPCODE(ip) @ extract opcode from rINST 599 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 600 GOTO_OPCODE(ip) @ jump to next instruction 601 602 603/* ------------------------------ */ 604 .balign 64 605.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 606/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 607/* File: armv5te/OP_MOVE_RESULT.S */ 608 /* for: move-result, move-result-object */ 609 /* op vAA */ 610 mov r2, rINST, lsr #8 @ r2<- AA 611 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 612 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 613 GET_INST_OPCODE(ip) @ extract opcode from rINST 614 SET_VREG(r0, r2) @ fp[AA]<- r0 615 GOTO_OPCODE(ip) @ jump to next instruction 616 617 618 619/* ------------------------------ */ 620 .balign 64 621.L_OP_MOVE_EXCEPTION: /* 0x0d */ 622/* File: armv5te/OP_MOVE_EXCEPTION.S */ 623 /* move-exception vAA */ 624 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 625 mov r2, rINST, lsr #8 @ r2<- AA 626 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 627 mov r1, #0 @ r1<- 0 628 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 629 SET_VREG(r3, r2) @ fp[AA]<- exception obj 630 GET_INST_OPCODE(ip) @ extract opcode from rINST 631 str r1, [r0, #offThread_exception] @ dvmClearException bypass 632 GOTO_OPCODE(ip) @ jump to next instruction 633 634 635/* ------------------------------ */ 636 .balign 64 637.L_OP_RETURN_VOID: /* 0x0e */ 638/* File: armv5te/OP_RETURN_VOID.S */ 639 b common_returnFromMethod 640 641 642/* ------------------------------ */ 643 .balign 64 644.L_OP_RETURN: /* 0x0f */ 645/* File: armv5te/OP_RETURN.S */ 646 /* 647 * Return a 32-bit value. Copies the return value into the "glue" 648 * structure, then jumps to the return handler. 649 * 650 * for: return, return-object 651 */ 652 /* op vAA */ 653 mov r2, rINST, lsr #8 @ r2<- AA 654 GET_VREG(r0, r2) @ r0<- vAA 655 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 656 b common_returnFromMethod 657 658 659/* ------------------------------ */ 660 .balign 64 661.L_OP_RETURN_WIDE: /* 0x10 */ 662/* File: armv5te/OP_RETURN_WIDE.S */ 663 /* 664 * Return a 64-bit value. Copies the return value into the "glue" 665 * structure, then jumps to the return handler. 666 */ 667 /* return-wide vAA */ 668 mov r2, rINST, lsr #8 @ r2<- AA 669 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 670 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 671 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 672 stmia r3, {r0-r1} @ retval<- r0/r1 673 b common_returnFromMethod 674 675 676/* ------------------------------ */ 677 .balign 64 678.L_OP_RETURN_OBJECT: /* 0x11 */ 679/* File: armv5te/OP_RETURN_OBJECT.S */ 680/* File: armv5te/OP_RETURN.S */ 681 /* 682 * Return a 32-bit value. Copies the return value into the "glue" 683 * structure, then jumps to the return handler. 684 * 685 * for: return, return-object 686 */ 687 /* op vAA */ 688 mov r2, rINST, lsr #8 @ r2<- AA 689 GET_VREG(r0, r2) @ r0<- vAA 690 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 691 b common_returnFromMethod 692 693 694 695/* ------------------------------ */ 696 .balign 64 697.L_OP_CONST_4: /* 0x12 */ 698/* File: armv5te/OP_CONST_4.S */ 699 /* const/4 vA, #+B */ 700 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 701 mov r0, rINST, lsr #8 @ r0<- A+ 702 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 703 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 704 and r0, r0, #15 705 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 706 SET_VREG(r1, r0) @ fp[A]<- r1 707 GOTO_OPCODE(ip) @ execute next instruction 708 709 710/* ------------------------------ */ 711 .balign 64 712.L_OP_CONST_16: /* 0x13 */ 713/* File: armv5te/OP_CONST_16.S */ 714 /* const/16 vAA, #+BBBB */ 715 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 716 mov r3, rINST, lsr #8 @ r3<- AA 717 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 718 SET_VREG(r0, r3) @ vAA<- r0 719 GET_INST_OPCODE(ip) @ extract opcode from rINST 720 GOTO_OPCODE(ip) @ jump to next instruction 721 722 723/* ------------------------------ */ 724 .balign 64 725.L_OP_CONST: /* 0x14 */ 726/* File: armv5te/OP_CONST.S */ 727 /* const vAA, #+BBBBbbbb */ 728 mov r3, rINST, lsr #8 @ r3<- AA 729 FETCH(r0, 1) @ r0<- bbbb (low) 730 FETCH(r1, 2) @ r1<- BBBB (high) 731 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 732 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 733 GET_INST_OPCODE(ip) @ extract opcode from rINST 734 SET_VREG(r0, r3) @ vAA<- r0 735 GOTO_OPCODE(ip) @ jump to next instruction 736 737 738/* ------------------------------ */ 739 .balign 64 740.L_OP_CONST_HIGH16: /* 0x15 */ 741/* File: armv5te/OP_CONST_HIGH16.S */ 742 /* const/high16 vAA, #+BBBB0000 */ 743 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 744 mov r3, rINST, lsr #8 @ r3<- AA 745 mov r0, r0, lsl #16 @ r0<- BBBB0000 746 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 747 SET_VREG(r0, r3) @ vAA<- r0 748 GET_INST_OPCODE(ip) @ extract opcode from rINST 749 GOTO_OPCODE(ip) @ jump to next instruction 750 751 752/* ------------------------------ */ 753 .balign 64 754.L_OP_CONST_WIDE_16: /* 0x16 */ 755/* File: armv5te/OP_CONST_WIDE_16.S */ 756 /* const-wide/16 vAA, #+BBBB */ 757 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 758 mov r3, rINST, lsr #8 @ r3<- AA 759 mov r1, r0, asr #31 @ r1<- ssssssss 760 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 761 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 762 GET_INST_OPCODE(ip) @ extract opcode from rINST 763 stmia r3, {r0-r1} @ vAA<- r0/r1 764 GOTO_OPCODE(ip) @ jump to next instruction 765 766 767/* ------------------------------ */ 768 .balign 64 769.L_OP_CONST_WIDE_32: /* 0x17 */ 770/* File: armv5te/OP_CONST_WIDE_32.S */ 771 /* const-wide/32 vAA, #+BBBBbbbb */ 772 FETCH(r0, 1) @ r0<- 0000bbbb (low) 773 mov r3, rINST, lsr #8 @ r3<- AA 774 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 775 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 776 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 777 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 778 mov r1, r0, asr #31 @ r1<- ssssssss 779 GET_INST_OPCODE(ip) @ extract opcode from rINST 780 stmia r3, {r0-r1} @ vAA<- r0/r1 781 GOTO_OPCODE(ip) @ jump to next instruction 782 783 784/* ------------------------------ */ 785 .balign 64 786.L_OP_CONST_WIDE: /* 0x18 */ 787/* File: armv5te/OP_CONST_WIDE.S */ 788 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 789 FETCH(r0, 1) @ r0<- bbbb (low) 790 FETCH(r1, 2) @ r1<- BBBB (low middle) 791 FETCH(r2, 3) @ r2<- hhhh (high middle) 792 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 793 FETCH(r3, 4) @ r3<- HHHH (high) 794 mov r9, rINST, lsr #8 @ r9<- AA 795 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 796 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 797 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 798 GET_INST_OPCODE(ip) @ extract opcode from rINST 799 stmia r9, {r0-r1} @ vAA<- r0/r1 800 GOTO_OPCODE(ip) @ jump to next instruction 801 802 803/* ------------------------------ */ 804 .balign 64 805.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 806/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 807 /* const-wide/high16 vAA, #+BBBB000000000000 */ 808 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 809 mov r3, rINST, lsr #8 @ r3<- AA 810 mov r0, #0 @ r0<- 00000000 811 mov r1, r1, lsl #16 @ r1<- BBBB0000 812 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 813 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 814 GET_INST_OPCODE(ip) @ extract opcode from rINST 815 stmia r3, {r0-r1} @ vAA<- r0/r1 816 GOTO_OPCODE(ip) @ jump to next instruction 817 818 819/* ------------------------------ */ 820 .balign 64 821.L_OP_CONST_STRING: /* 0x1a */ 822/* File: armv5te/OP_CONST_STRING.S */ 823 /* const/string vAA, String@BBBB */ 824 FETCH(r1, 1) @ r1<- BBBB 825 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 826 mov r9, rINST, lsr #8 @ r9<- AA 827 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 828 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 829 cmp r0, #0 @ not yet resolved? 830 beq .LOP_CONST_STRING_resolve 831 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 832 GET_INST_OPCODE(ip) @ extract opcode from rINST 833 SET_VREG(r0, r9) @ vAA<- r0 834 GOTO_OPCODE(ip) @ jump to next instruction 835 836/* ------------------------------ */ 837 .balign 64 838.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 839/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 840 /* const/string vAA, String@BBBBBBBB */ 841 FETCH(r0, 1) @ r0<- bbbb (low) 842 FETCH(r1, 2) @ r1<- BBBB (high) 843 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 844 mov r9, rINST, lsr #8 @ r9<- AA 845 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 846 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 847 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 848 cmp r0, #0 849 beq .LOP_CONST_STRING_JUMBO_resolve 850 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 851 GET_INST_OPCODE(ip) @ extract opcode from rINST 852 SET_VREG(r0, r9) @ vAA<- r0 853 GOTO_OPCODE(ip) @ jump to next instruction 854 855/* ------------------------------ */ 856 .balign 64 857.L_OP_CONST_CLASS: /* 0x1c */ 858/* File: armv5te/OP_CONST_CLASS.S */ 859 /* const/class vAA, Class@BBBB */ 860 FETCH(r1, 1) @ r1<- BBBB 861 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 862 mov r9, rINST, lsr #8 @ r9<- AA 863 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 864 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 865 cmp r0, #0 @ not yet resolved? 866 beq .LOP_CONST_CLASS_resolve 867 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 868 GET_INST_OPCODE(ip) @ extract opcode from rINST 869 SET_VREG(r0, r9) @ vAA<- r0 870 GOTO_OPCODE(ip) @ jump to next instruction 871 872/* ------------------------------ */ 873 .balign 64 874.L_OP_MONITOR_ENTER: /* 0x1d */ 875/* File: armv5te/OP_MONITOR_ENTER.S */ 876 /* 877 * Synchronize on an object. 878 */ 879 /* monitor-enter vAA */ 880 mov r2, rINST, lsr #8 @ r2<- AA 881 GET_VREG(r1, r2) @ r1<- vAA (object) 882 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 883 cmp r1, #0 @ null object? 884 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 885 beq common_errNullObject @ null object, throw an exception 886 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 887 bl dvmLockObject @ call(self, obj) 888#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 889 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 890 ldr r1, [r0, #offThread_exception] @ check for exception 891 cmp r1, #0 892 bne common_exceptionThrown @ exception raised, bail out 893#endif 894 GET_INST_OPCODE(ip) @ extract opcode from rINST 895 GOTO_OPCODE(ip) @ jump to next instruction 896 897 898/* ------------------------------ */ 899 .balign 64 900.L_OP_MONITOR_EXIT: /* 0x1e */ 901/* File: armv5te/OP_MONITOR_EXIT.S */ 902 /* 903 * Unlock an object. 904 * 905 * Exceptions that occur when unlocking a monitor need to appear as 906 * if they happened at the following instruction. See the Dalvik 907 * instruction spec. 908 */ 909 /* monitor-exit vAA */ 910 mov r2, rINST, lsr #8 @ r2<- AA 911 EXPORT_PC() @ before fetch: export the PC 912 GET_VREG(r1, r2) @ r1<- vAA (object) 913 cmp r1, #0 @ null object? 914 beq 1f @ yes 915 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 916 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 917 cmp r0, #0 @ failed? 918 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 919 beq common_exceptionThrown @ yes, exception is pending 920 GET_INST_OPCODE(ip) @ extract opcode from rINST 921 GOTO_OPCODE(ip) @ jump to next instruction 9221: 923 FETCH_ADVANCE_INST(1) @ advance before throw 924 b common_errNullObject 925 926 927/* ------------------------------ */ 928 .balign 64 929.L_OP_CHECK_CAST: /* 0x1f */ 930/* File: armv5te/OP_CHECK_CAST.S */ 931 /* 932 * Check to see if a cast from one class to another is allowed. 933 */ 934 /* check-cast vAA, class@BBBB */ 935 mov r3, rINST, lsr #8 @ r3<- AA 936 FETCH(r2, 1) @ r2<- BBBB 937 GET_VREG(r9, r3) @ r9<- object 938 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 939 cmp r9, #0 @ is object null? 940 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 941 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 942 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 943 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 944 cmp r1, #0 @ have we resolved this before? 945 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 946.LOP_CHECK_CAST_resolved: 947 cmp r0, r1 @ same class (trivial success)? 948 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 949.LOP_CHECK_CAST_okay: 950 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 951 GET_INST_OPCODE(ip) @ extract opcode from rINST 952 GOTO_OPCODE(ip) @ jump to next instruction 953 954/* ------------------------------ */ 955 .balign 64 956.L_OP_INSTANCE_OF: /* 0x20 */ 957/* File: armv5te/OP_INSTANCE_OF.S */ 958 /* 959 * Check to see if an object reference is an instance of a class. 960 * 961 * Most common situation is a non-null object, being compared against 962 * an already-resolved class. 963 */ 964 /* instance-of vA, vB, class@CCCC */ 965 mov r3, rINST, lsr #12 @ r3<- B 966 mov r9, rINST, lsr #8 @ r9<- A+ 967 GET_VREG(r0, r3) @ r0<- vB (object) 968 and r9, r9, #15 @ r9<- A 969 cmp r0, #0 @ is object null? 970 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 971 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 972 FETCH(r3, 1) @ r3<- CCCC 973 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 974 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 975 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 976 cmp r1, #0 @ have we resolved this before? 977 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 978.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 979 cmp r0, r1 @ same class (trivial success)? 980 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 981 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 982 983/* ------------------------------ */ 984 .balign 64 985.L_OP_ARRAY_LENGTH: /* 0x21 */ 986/* File: armv5te/OP_ARRAY_LENGTH.S */ 987 /* 988 * Return the length of an array. 989 */ 990 mov r1, rINST, lsr #12 @ r1<- B 991 mov r2, rINST, lsr #8 @ r2<- A+ 992 GET_VREG(r0, r1) @ r0<- vB (object ref) 993 and r2, r2, #15 @ r2<- A 994 cmp r0, #0 @ is object null? 995 beq common_errNullObject @ yup, fail 996 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 997 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 998 GET_INST_OPCODE(ip) @ extract opcode from rINST 999 SET_VREG(r3, r2) @ vB<- length 1000 GOTO_OPCODE(ip) @ jump to next instruction 1001 1002 1003/* ------------------------------ */ 1004 .balign 64 1005.L_OP_NEW_INSTANCE: /* 0x22 */ 1006/* File: armv5te/OP_NEW_INSTANCE.S */ 1007 /* 1008 * Create a new instance of a class. 1009 */ 1010 /* new-instance vAA, class@BBBB */ 1011 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1012 FETCH(r1, 1) @ r1<- BBBB 1013 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1014 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1015 EXPORT_PC() @ req'd for init, resolve, alloc 1016 cmp r0, #0 @ already resolved? 1017 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1018.LOP_NEW_INSTANCE_resolved: @ r0=class 1019 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1020 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1021 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1022.LOP_NEW_INSTANCE_initialized: @ r0=class 1023 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1024 bl dvmAllocObject @ r0<- new object 1025 b .LOP_NEW_INSTANCE_finish @ continue 1026 1027/* ------------------------------ */ 1028 .balign 64 1029.L_OP_NEW_ARRAY: /* 0x23 */ 1030/* File: armv5te/OP_NEW_ARRAY.S */ 1031 /* 1032 * Allocate an array of objects, specified with the array class 1033 * and a count. 1034 * 1035 * The verifier guarantees that this is an array class, so we don't 1036 * check for it here. 1037 */ 1038 /* new-array vA, vB, class@CCCC */ 1039 mov r0, rINST, lsr #12 @ r0<- B 1040 FETCH(r2, 1) @ r2<- CCCC 1041 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1042 GET_VREG(r1, r0) @ r1<- vB (array length) 1043 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1044 cmp r1, #0 @ check length 1045 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1046 bmi common_errNegativeArraySize @ negative length, bail 1047 cmp r0, #0 @ already resolved? 1048 EXPORT_PC() @ req'd for resolve, alloc 1049 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1050 b .LOP_NEW_ARRAY_resolve @ do resolve now 1051 1052/* ------------------------------ */ 1053 .balign 64 1054.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1055/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1056 /* 1057 * Create a new array with elements filled from registers. 1058 * 1059 * for: filled-new-array, filled-new-array/range 1060 */ 1061 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1062 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1063 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1064 FETCH(r1, 1) @ r1<- BBBB 1065 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1066 EXPORT_PC() @ need for resolve and alloc 1067 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1068 mov r10, rINST, lsr #8 @ r10<- AA or BA 1069 cmp r0, #0 @ already resolved? 1070 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10718: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1072 mov r2, #0 @ r2<- false 1073 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1074 bl dvmResolveClass @ r0<- call(clazz, ref) 1075 cmp r0, #0 @ got null? 1076 beq common_exceptionThrown @ yes, handle exception 1077 b .LOP_FILLED_NEW_ARRAY_continue 1078 1079/* ------------------------------ */ 1080 .balign 64 1081.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1082/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1083/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1084 /* 1085 * Create a new array with elements filled from registers. 1086 * 1087 * for: filled-new-array, filled-new-array/range 1088 */ 1089 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1090 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1091 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1092 FETCH(r1, 1) @ r1<- BBBB 1093 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1094 EXPORT_PC() @ need for resolve and alloc 1095 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1096 mov r10, rINST, lsr #8 @ r10<- AA or BA 1097 cmp r0, #0 @ already resolved? 1098 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10998: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1100 mov r2, #0 @ r2<- false 1101 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1102 bl dvmResolveClass @ r0<- call(clazz, ref) 1103 cmp r0, #0 @ got null? 1104 beq common_exceptionThrown @ yes, handle exception 1105 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1106 1107 1108/* ------------------------------ */ 1109 .balign 64 1110.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1111/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1112 /* fill-array-data vAA, +BBBBBBBB */ 1113 FETCH(r0, 1) @ r0<- bbbb (lo) 1114 FETCH(r1, 2) @ r1<- BBBB (hi) 1115 mov r3, rINST, lsr #8 @ r3<- AA 1116 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1117 GET_VREG(r0, r3) @ r0<- vAA (array object) 1118 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1119 EXPORT_PC(); 1120 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1121 cmp r0, #0 @ 0 means an exception is thrown 1122 beq common_exceptionThrown @ has exception 1123 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1124 GET_INST_OPCODE(ip) @ extract opcode from rINST 1125 GOTO_OPCODE(ip) @ jump to next instruction 1126 1127/* ------------------------------ */ 1128 .balign 64 1129.L_OP_THROW: /* 0x27 */ 1130/* File: armv5te/OP_THROW.S */ 1131 /* 1132 * Throw an exception object in the current thread. 1133 */ 1134 /* throw vAA */ 1135 mov r2, rINST, lsr #8 @ r2<- AA 1136 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1137 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1138 cmp r1, #0 @ null object? 1139 beq common_errNullObject @ yes, throw an NPE instead 1140 @ bypass dvmSetException, just store it 1141 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1142 b common_exceptionThrown 1143 1144 1145/* ------------------------------ */ 1146 .balign 64 1147.L_OP_GOTO: /* 0x28 */ 1148/* File: armv5te/OP_GOTO.S */ 1149 /* 1150 * Unconditional branch, 8-bit offset. 1151 * 1152 * The branch distance is a signed code-unit offset, which we need to 1153 * double to get a byte offset. 1154 */ 1155 /* goto +AA */ 1156 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1157 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1158 mov r9, r9, lsl #1 @ r9<- byte offset 1159 bmi common_backwardBranch @ backward branch, do periodic checks 1160#if defined(WITH_JIT) 1161 GET_JIT_PROF_TABLE(r0) 1162 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1163 cmp r0,#0 1164 bne common_updateProfile 1165 GET_INST_OPCODE(ip) @ extract opcode from rINST 1166 GOTO_OPCODE(ip) @ jump to next instruction 1167#else 1168 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1169 GET_INST_OPCODE(ip) @ extract opcode from rINST 1170 GOTO_OPCODE(ip) @ jump to next instruction 1171#endif 1172 1173/* ------------------------------ */ 1174 .balign 64 1175.L_OP_GOTO_16: /* 0x29 */ 1176/* File: armv5te/OP_GOTO_16.S */ 1177 /* 1178 * Unconditional branch, 16-bit offset. 1179 * 1180 * The branch distance is a signed code-unit offset, which we need to 1181 * double to get a byte offset. 1182 */ 1183 /* goto/16 +AAAA */ 1184 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1185 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1186 bmi common_backwardBranch @ backward branch, do periodic checks 1187#if defined(WITH_JIT) 1188 GET_JIT_PROF_TABLE(r0) 1189 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1190 cmp r0,#0 1191 bne common_updateProfile 1192 GET_INST_OPCODE(ip) @ extract opcode from rINST 1193 GOTO_OPCODE(ip) @ jump to next instruction 1194#else 1195 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1196 GET_INST_OPCODE(ip) @ extract opcode from rINST 1197 GOTO_OPCODE(ip) @ jump to next instruction 1198#endif 1199 1200 1201/* ------------------------------ */ 1202 .balign 64 1203.L_OP_GOTO_32: /* 0x2a */ 1204/* File: armv5te/OP_GOTO_32.S */ 1205 /* 1206 * Unconditional branch, 32-bit offset. 1207 * 1208 * The branch distance is a signed code-unit offset, which we need to 1209 * double to get a byte offset. 1210 * 1211 * Unlike most opcodes, this one is allowed to branch to itself, so 1212 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1213 * instruction doesn't affect the V flag, so we need to clear it 1214 * explicitly. 1215 */ 1216 /* goto/32 +AAAAAAAA */ 1217 FETCH(r0, 1) @ r0<- aaaa (lo) 1218 FETCH(r1, 2) @ r1<- AAAA (hi) 1219 cmp ip, ip @ (clear V flag during stall) 1220 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1221 mov r9, r0, asl #1 @ r9<- byte offset 1222 ble common_backwardBranch @ backward branch, do periodic checks 1223#if defined(WITH_JIT) 1224 GET_JIT_PROF_TABLE(r0) 1225 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1226 cmp r0,#0 1227 bne common_updateProfile 1228 GET_INST_OPCODE(ip) @ extract opcode from rINST 1229 GOTO_OPCODE(ip) @ jump to next instruction 1230#else 1231 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1232 GET_INST_OPCODE(ip) @ extract opcode from rINST 1233 GOTO_OPCODE(ip) @ jump to next instruction 1234#endif 1235 1236/* ------------------------------ */ 1237 .balign 64 1238.L_OP_PACKED_SWITCH: /* 0x2b */ 1239/* File: armv5te/OP_PACKED_SWITCH.S */ 1240 /* 1241 * Handle a packed-switch or sparse-switch instruction. In both cases 1242 * we decode it and hand it off to a helper function. 1243 * 1244 * We don't really expect backward branches in a switch statement, but 1245 * they're perfectly legal, so we check for them here. 1246 * 1247 * for: packed-switch, sparse-switch 1248 */ 1249 /* op vAA, +BBBB */ 1250 FETCH(r0, 1) @ r0<- bbbb (lo) 1251 FETCH(r1, 2) @ r1<- BBBB (hi) 1252 mov r3, rINST, lsr #8 @ r3<- AA 1253 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1254 GET_VREG(r1, r3) @ r1<- vAA 1255 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1256 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1257 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1258 bmi common_backwardBranch @ backward branch, do periodic checks 1259 beq common_backwardBranch @ (want to use BLE but V is unknown) 1260#if defined(WITH_JIT) 1261 GET_JIT_PROF_TABLE(r0) 1262 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1263 cmp r0,#0 1264 bne common_updateProfile 1265 GET_INST_OPCODE(ip) @ extract opcode from rINST 1266 GOTO_OPCODE(ip) @ jump to next instruction 1267#else 1268 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1269 GET_INST_OPCODE(ip) @ extract opcode from rINST 1270 GOTO_OPCODE(ip) @ jump to next instruction 1271#endif 1272 1273 1274/* ------------------------------ */ 1275 .balign 64 1276.L_OP_SPARSE_SWITCH: /* 0x2c */ 1277/* File: armv5te/OP_SPARSE_SWITCH.S */ 1278/* File: armv5te/OP_PACKED_SWITCH.S */ 1279 /* 1280 * Handle a packed-switch or sparse-switch instruction. In both cases 1281 * we decode it and hand it off to a helper function. 1282 * 1283 * We don't really expect backward branches in a switch statement, but 1284 * they're perfectly legal, so we check for them here. 1285 * 1286 * for: packed-switch, sparse-switch 1287 */ 1288 /* op vAA, +BBBB */ 1289 FETCH(r0, 1) @ r0<- bbbb (lo) 1290 FETCH(r1, 2) @ r1<- BBBB (hi) 1291 mov r3, rINST, lsr #8 @ r3<- AA 1292 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1293 GET_VREG(r1, r3) @ r1<- vAA 1294 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1295 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1296 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1297 bmi common_backwardBranch @ backward branch, do periodic checks 1298 beq common_backwardBranch @ (want to use BLE but V is unknown) 1299#if defined(WITH_JIT) 1300 GET_JIT_PROF_TABLE(r0) 1301 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1302 cmp r0,#0 1303 bne common_updateProfile 1304 GET_INST_OPCODE(ip) @ extract opcode from rINST 1305 GOTO_OPCODE(ip) @ jump to next instruction 1306#else 1307 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1308 GET_INST_OPCODE(ip) @ extract opcode from rINST 1309 GOTO_OPCODE(ip) @ jump to next instruction 1310#endif 1311 1312 1313 1314/* ------------------------------ */ 1315 .balign 64 1316.L_OP_CMPL_FLOAT: /* 0x2d */ 1317/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1318 /* 1319 * Compare two floating-point values. Puts 0, 1, or -1 into the 1320 * destination register based on the results of the comparison. 1321 * 1322 * int compare(x, y) { 1323 * if (x == y) { 1324 * return 0; 1325 * } else if (x > y) { 1326 * return 1; 1327 * } else if (x < y) { 1328 * return -1; 1329 * } else { 1330 * return -1; 1331 * } 1332 * } 1333 */ 1334 /* op vAA, vBB, vCC */ 1335 FETCH(r0, 1) @ r0<- CCBB 1336 mov r9, rINST, lsr #8 @ r9<- AA 1337 and r2, r0, #255 @ r2<- BB 1338 mov r3, r0, lsr #8 @ r3<- CC 1339 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1340 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1341 flds s0, [r2] @ s0<- vBB 1342 flds s1, [r3] @ s1<- vCC 1343 fcmpes s0, s1 @ compare (vBB, vCC) 1344 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1345 mvn r0, #0 @ r0<- -1 (default) 1346 GET_INST_OPCODE(ip) @ extract opcode from rINST 1347 fmstat @ export status flags 1348 movgt r0, #1 @ (greater than) r1<- 1 1349 moveq r0, #0 @ (equal) r1<- 0 1350 b .LOP_CMPL_FLOAT_finish @ argh 1351 1352 1353/* ------------------------------ */ 1354 .balign 64 1355.L_OP_CMPG_FLOAT: /* 0x2e */ 1356/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1357 /* 1358 * Compare two floating-point values. Puts 0, 1, or -1 into the 1359 * destination register based on the results of the comparison. 1360 * 1361 * int compare(x, y) { 1362 * if (x == y) { 1363 * return 0; 1364 * } else if (x < y) { 1365 * return -1; 1366 * } else if (x > y) { 1367 * return 1; 1368 * } else { 1369 * return 1; 1370 * } 1371 * } 1372 */ 1373 /* op vAA, vBB, vCC */ 1374 FETCH(r0, 1) @ r0<- CCBB 1375 mov r9, rINST, lsr #8 @ r9<- AA 1376 and r2, r0, #255 @ r2<- BB 1377 mov r3, r0, lsr #8 @ r3<- CC 1378 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1379 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1380 flds s0, [r2] @ s0<- vBB 1381 flds s1, [r3] @ s1<- vCC 1382 fcmpes s0, s1 @ compare (vBB, vCC) 1383 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1384 mov r0, #1 @ r0<- 1 (default) 1385 GET_INST_OPCODE(ip) @ extract opcode from rINST 1386 fmstat @ export status flags 1387 mvnmi r0, #0 @ (less than) r1<- -1 1388 moveq r0, #0 @ (equal) r1<- 0 1389 b .LOP_CMPG_FLOAT_finish @ argh 1390 1391 1392/* ------------------------------ */ 1393 .balign 64 1394.L_OP_CMPL_DOUBLE: /* 0x2f */ 1395/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1396 /* 1397 * Compare two floating-point values. Puts 0, 1, or -1 into the 1398 * destination register based on the results of the comparison. 1399 * 1400 * int compare(x, y) { 1401 * if (x == y) { 1402 * return 0; 1403 * } else if (x > y) { 1404 * return 1; 1405 * } else if (x < y) { 1406 * return -1; 1407 * } else { 1408 * return -1; 1409 * } 1410 * } 1411 */ 1412 /* op vAA, vBB, vCC */ 1413 FETCH(r0, 1) @ r0<- CCBB 1414 mov r9, rINST, lsr #8 @ r9<- AA 1415 and r2, r0, #255 @ r2<- BB 1416 mov r3, r0, lsr #8 @ r3<- CC 1417 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1418 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1419 fldd d0, [r2] @ d0<- vBB 1420 fldd d1, [r3] @ d1<- vCC 1421 fcmped d0, d1 @ compare (vBB, vCC) 1422 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1423 mvn r0, #0 @ r0<- -1 (default) 1424 GET_INST_OPCODE(ip) @ extract opcode from rINST 1425 fmstat @ export status flags 1426 movgt r0, #1 @ (greater than) r1<- 1 1427 moveq r0, #0 @ (equal) r1<- 0 1428 b .LOP_CMPL_DOUBLE_finish @ argh 1429 1430 1431/* ------------------------------ */ 1432 .balign 64 1433.L_OP_CMPG_DOUBLE: /* 0x30 */ 1434/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1435 /* 1436 * Compare two floating-point values. Puts 0, 1, or -1 into the 1437 * destination register based on the results of the comparison. 1438 * 1439 * int compare(x, y) { 1440 * if (x == y) { 1441 * return 0; 1442 * } else if (x < y) { 1443 * return -1; 1444 * } else if (x > y) { 1445 * return 1; 1446 * } else { 1447 * return 1; 1448 * } 1449 * } 1450 */ 1451 /* op vAA, vBB, vCC */ 1452 FETCH(r0, 1) @ r0<- CCBB 1453 mov r9, rINST, lsr #8 @ r9<- AA 1454 and r2, r0, #255 @ r2<- BB 1455 mov r3, r0, lsr #8 @ r3<- CC 1456 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1457 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1458 fldd d0, [r2] @ d0<- vBB 1459 fldd d1, [r3] @ d1<- vCC 1460 fcmped d0, d1 @ compare (vBB, vCC) 1461 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1462 mov r0, #1 @ r0<- 1 (default) 1463 GET_INST_OPCODE(ip) @ extract opcode from rINST 1464 fmstat @ export status flags 1465 mvnmi r0, #0 @ (less than) r1<- -1 1466 moveq r0, #0 @ (equal) r1<- 0 1467 b .LOP_CMPG_DOUBLE_finish @ argh 1468 1469 1470/* ------------------------------ */ 1471 .balign 64 1472.L_OP_CMP_LONG: /* 0x31 */ 1473/* File: armv5te/OP_CMP_LONG.S */ 1474 /* 1475 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1476 * register based on the results of the comparison. 1477 * 1478 * We load the full values with LDM, but in practice many values could 1479 * be resolved by only looking at the high word. This could be made 1480 * faster or slower by splitting the LDM into a pair of LDRs. 1481 * 1482 * If we just wanted to set condition flags, we could do this: 1483 * subs ip, r0, r2 1484 * sbcs ip, r1, r3 1485 * subeqs ip, r0, r2 1486 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1487 * integer value, which we can do with 2 conditional mov/mvn instructions 1488 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1489 * us a constant 5-cycle path plus a branch at the end to the 1490 * instruction epilogue code. The multi-compare approach below needs 1491 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1492 * in the worst case (the 64-bit values are equal). 1493 */ 1494 /* cmp-long vAA, vBB, vCC */ 1495 FETCH(r0, 1) @ r0<- CCBB 1496 mov r9, rINST, lsr #8 @ r9<- AA 1497 and r2, r0, #255 @ r2<- BB 1498 mov r3, r0, lsr #8 @ r3<- CC 1499 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1500 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1501 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1502 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1503 cmp r1, r3 @ compare (vBB+1, vCC+1) 1504 blt .LOP_CMP_LONG_less @ signed compare on high part 1505 bgt .LOP_CMP_LONG_greater 1506 subs r1, r0, r2 @ r1<- r0 - r2 1507 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1508 bne .LOP_CMP_LONG_less 1509 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1510 1511/* ------------------------------ */ 1512 .balign 64 1513.L_OP_IF_EQ: /* 0x32 */ 1514/* File: armv5te/OP_IF_EQ.S */ 1515/* File: armv5te/bincmp.S */ 1516 /* 1517 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1518 * fragment that specifies the *reverse* comparison to perform, e.g. 1519 * for "if-le" you would use "gt". 1520 * 1521 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1522 */ 1523 /* if-cmp vA, vB, +CCCC */ 1524 mov r0, rINST, lsr #8 @ r0<- A+ 1525 mov r1, rINST, lsr #12 @ r1<- B 1526 and r0, r0, #15 1527 GET_VREG(r3, r1) @ r3<- vB 1528 GET_VREG(r2, r0) @ r2<- vA 1529 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1530 cmp r2, r3 @ compare (vA, vB) 1531 bne 1f @ branch to 1 if comparison failed 1532 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1533 movs r9, r9, asl #1 @ convert to bytes, check sign 1534 bmi common_backwardBranch @ yes, do periodic checks 15351: 1536#if defined(WITH_JIT) 1537 GET_JIT_PROF_TABLE(r0) 1538 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1539 b common_testUpdateProfile 1540#else 1541 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1542 GET_INST_OPCODE(ip) @ extract opcode from rINST 1543 GOTO_OPCODE(ip) @ jump to next instruction 1544#endif 1545 1546 1547 1548/* ------------------------------ */ 1549 .balign 64 1550.L_OP_IF_NE: /* 0x33 */ 1551/* File: armv5te/OP_IF_NE.S */ 1552/* File: armv5te/bincmp.S */ 1553 /* 1554 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1555 * fragment that specifies the *reverse* comparison to perform, e.g. 1556 * for "if-le" you would use "gt". 1557 * 1558 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1559 */ 1560 /* if-cmp vA, vB, +CCCC */ 1561 mov r0, rINST, lsr #8 @ r0<- A+ 1562 mov r1, rINST, lsr #12 @ r1<- B 1563 and r0, r0, #15 1564 GET_VREG(r3, r1) @ r3<- vB 1565 GET_VREG(r2, r0) @ r2<- vA 1566 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1567 cmp r2, r3 @ compare (vA, vB) 1568 beq 1f @ branch to 1 if comparison failed 1569 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1570 movs r9, r9, asl #1 @ convert to bytes, check sign 1571 bmi common_backwardBranch @ yes, do periodic checks 15721: 1573#if defined(WITH_JIT) 1574 GET_JIT_PROF_TABLE(r0) 1575 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1576 b common_testUpdateProfile 1577#else 1578 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1579 GET_INST_OPCODE(ip) @ extract opcode from rINST 1580 GOTO_OPCODE(ip) @ jump to next instruction 1581#endif 1582 1583 1584 1585/* ------------------------------ */ 1586 .balign 64 1587.L_OP_IF_LT: /* 0x34 */ 1588/* File: armv5te/OP_IF_LT.S */ 1589/* File: armv5te/bincmp.S */ 1590 /* 1591 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1592 * fragment that specifies the *reverse* comparison to perform, e.g. 1593 * for "if-le" you would use "gt". 1594 * 1595 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1596 */ 1597 /* if-cmp vA, vB, +CCCC */ 1598 mov r0, rINST, lsr #8 @ r0<- A+ 1599 mov r1, rINST, lsr #12 @ r1<- B 1600 and r0, r0, #15 1601 GET_VREG(r3, r1) @ r3<- vB 1602 GET_VREG(r2, r0) @ r2<- vA 1603 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1604 cmp r2, r3 @ compare (vA, vB) 1605 bge 1f @ branch to 1 if comparison failed 1606 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1607 movs r9, r9, asl #1 @ convert to bytes, check sign 1608 bmi common_backwardBranch @ yes, do periodic checks 16091: 1610#if defined(WITH_JIT) 1611 GET_JIT_PROF_TABLE(r0) 1612 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1613 b common_testUpdateProfile 1614#else 1615 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1616 GET_INST_OPCODE(ip) @ extract opcode from rINST 1617 GOTO_OPCODE(ip) @ jump to next instruction 1618#endif 1619 1620 1621 1622/* ------------------------------ */ 1623 .balign 64 1624.L_OP_IF_GE: /* 0x35 */ 1625/* File: armv5te/OP_IF_GE.S */ 1626/* File: armv5te/bincmp.S */ 1627 /* 1628 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1629 * fragment that specifies the *reverse* comparison to perform, e.g. 1630 * for "if-le" you would use "gt". 1631 * 1632 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1633 */ 1634 /* if-cmp vA, vB, +CCCC */ 1635 mov r0, rINST, lsr #8 @ r0<- A+ 1636 mov r1, rINST, lsr #12 @ r1<- B 1637 and r0, r0, #15 1638 GET_VREG(r3, r1) @ r3<- vB 1639 GET_VREG(r2, r0) @ r2<- vA 1640 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1641 cmp r2, r3 @ compare (vA, vB) 1642 blt 1f @ branch to 1 if comparison failed 1643 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1644 movs r9, r9, asl #1 @ convert to bytes, check sign 1645 bmi common_backwardBranch @ yes, do periodic checks 16461: 1647#if defined(WITH_JIT) 1648 GET_JIT_PROF_TABLE(r0) 1649 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1650 b common_testUpdateProfile 1651#else 1652 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1653 GET_INST_OPCODE(ip) @ extract opcode from rINST 1654 GOTO_OPCODE(ip) @ jump to next instruction 1655#endif 1656 1657 1658 1659/* ------------------------------ */ 1660 .balign 64 1661.L_OP_IF_GT: /* 0x36 */ 1662/* File: armv5te/OP_IF_GT.S */ 1663/* File: armv5te/bincmp.S */ 1664 /* 1665 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1666 * fragment that specifies the *reverse* comparison to perform, e.g. 1667 * for "if-le" you would use "gt". 1668 * 1669 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1670 */ 1671 /* if-cmp vA, vB, +CCCC */ 1672 mov r0, rINST, lsr #8 @ r0<- A+ 1673 mov r1, rINST, lsr #12 @ r1<- B 1674 and r0, r0, #15 1675 GET_VREG(r3, r1) @ r3<- vB 1676 GET_VREG(r2, r0) @ r2<- vA 1677 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1678 cmp r2, r3 @ compare (vA, vB) 1679 ble 1f @ branch to 1 if comparison failed 1680 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1681 movs r9, r9, asl #1 @ convert to bytes, check sign 1682 bmi common_backwardBranch @ yes, do periodic checks 16831: 1684#if defined(WITH_JIT) 1685 GET_JIT_PROF_TABLE(r0) 1686 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1687 b common_testUpdateProfile 1688#else 1689 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1690 GET_INST_OPCODE(ip) @ extract opcode from rINST 1691 GOTO_OPCODE(ip) @ jump to next instruction 1692#endif 1693 1694 1695 1696/* ------------------------------ */ 1697 .balign 64 1698.L_OP_IF_LE: /* 0x37 */ 1699/* File: armv5te/OP_IF_LE.S */ 1700/* File: armv5te/bincmp.S */ 1701 /* 1702 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1703 * fragment that specifies the *reverse* comparison to perform, e.g. 1704 * for "if-le" you would use "gt". 1705 * 1706 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1707 */ 1708 /* if-cmp vA, vB, +CCCC */ 1709 mov r0, rINST, lsr #8 @ r0<- A+ 1710 mov r1, rINST, lsr #12 @ r1<- B 1711 and r0, r0, #15 1712 GET_VREG(r3, r1) @ r3<- vB 1713 GET_VREG(r2, r0) @ r2<- vA 1714 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1715 cmp r2, r3 @ compare (vA, vB) 1716 bgt 1f @ branch to 1 if comparison failed 1717 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1718 movs r9, r9, asl #1 @ convert to bytes, check sign 1719 bmi common_backwardBranch @ yes, do periodic checks 17201: 1721#if defined(WITH_JIT) 1722 GET_JIT_PROF_TABLE(r0) 1723 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1724 b common_testUpdateProfile 1725#else 1726 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1727 GET_INST_OPCODE(ip) @ extract opcode from rINST 1728 GOTO_OPCODE(ip) @ jump to next instruction 1729#endif 1730 1731 1732 1733/* ------------------------------ */ 1734 .balign 64 1735.L_OP_IF_EQZ: /* 0x38 */ 1736/* File: armv5te/OP_IF_EQZ.S */ 1737/* File: armv5te/zcmp.S */ 1738 /* 1739 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1740 * fragment that specifies the *reverse* comparison to perform, e.g. 1741 * for "if-le" you would use "gt". 1742 * 1743 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1744 */ 1745 /* if-cmp vAA, +BBBB */ 1746 mov r0, rINST, lsr #8 @ r0<- AA 1747 GET_VREG(r2, r0) @ r2<- vAA 1748 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1749 cmp r2, #0 @ compare (vA, 0) 1750 bne 1f @ branch to 1 if comparison failed 1751 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1752 movs r9, r9, asl #1 @ convert to bytes, check sign 1753 bmi common_backwardBranch @ backward branch, do periodic checks 17541: 1755#if defined(WITH_JIT) 1756 GET_JIT_PROF_TABLE(r0) 1757 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1758 cmp r0,#0 1759 bne common_updateProfile 1760 GET_INST_OPCODE(ip) @ extract opcode from rINST 1761 GOTO_OPCODE(ip) @ jump to next instruction 1762#else 1763 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1764 GET_INST_OPCODE(ip) @ extract opcode from rINST 1765 GOTO_OPCODE(ip) @ jump to next instruction 1766#endif 1767 1768 1769 1770/* ------------------------------ */ 1771 .balign 64 1772.L_OP_IF_NEZ: /* 0x39 */ 1773/* File: armv5te/OP_IF_NEZ.S */ 1774/* File: armv5te/zcmp.S */ 1775 /* 1776 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1777 * fragment that specifies the *reverse* comparison to perform, e.g. 1778 * for "if-le" you would use "gt". 1779 * 1780 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1781 */ 1782 /* if-cmp vAA, +BBBB */ 1783 mov r0, rINST, lsr #8 @ r0<- AA 1784 GET_VREG(r2, r0) @ r2<- vAA 1785 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1786 cmp r2, #0 @ compare (vA, 0) 1787 beq 1f @ branch to 1 if comparison failed 1788 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1789 movs r9, r9, asl #1 @ convert to bytes, check sign 1790 bmi common_backwardBranch @ backward branch, do periodic checks 17911: 1792#if defined(WITH_JIT) 1793 GET_JIT_PROF_TABLE(r0) 1794 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1795 cmp r0,#0 1796 bne common_updateProfile 1797 GET_INST_OPCODE(ip) @ extract opcode from rINST 1798 GOTO_OPCODE(ip) @ jump to next instruction 1799#else 1800 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1801 GET_INST_OPCODE(ip) @ extract opcode from rINST 1802 GOTO_OPCODE(ip) @ jump to next instruction 1803#endif 1804 1805 1806 1807/* ------------------------------ */ 1808 .balign 64 1809.L_OP_IF_LTZ: /* 0x3a */ 1810/* File: armv5te/OP_IF_LTZ.S */ 1811/* File: armv5te/zcmp.S */ 1812 /* 1813 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1814 * fragment that specifies the *reverse* comparison to perform, e.g. 1815 * for "if-le" you would use "gt". 1816 * 1817 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1818 */ 1819 /* if-cmp vAA, +BBBB */ 1820 mov r0, rINST, lsr #8 @ r0<- AA 1821 GET_VREG(r2, r0) @ r2<- vAA 1822 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1823 cmp r2, #0 @ compare (vA, 0) 1824 bge 1f @ branch to 1 if comparison failed 1825 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1826 movs r9, r9, asl #1 @ convert to bytes, check sign 1827 bmi common_backwardBranch @ backward branch, do periodic checks 18281: 1829#if defined(WITH_JIT) 1830 GET_JIT_PROF_TABLE(r0) 1831 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1832 cmp r0,#0 1833 bne common_updateProfile 1834 GET_INST_OPCODE(ip) @ extract opcode from rINST 1835 GOTO_OPCODE(ip) @ jump to next instruction 1836#else 1837 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1838 GET_INST_OPCODE(ip) @ extract opcode from rINST 1839 GOTO_OPCODE(ip) @ jump to next instruction 1840#endif 1841 1842 1843 1844/* ------------------------------ */ 1845 .balign 64 1846.L_OP_IF_GEZ: /* 0x3b */ 1847/* File: armv5te/OP_IF_GEZ.S */ 1848/* File: armv5te/zcmp.S */ 1849 /* 1850 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1851 * fragment that specifies the *reverse* comparison to perform, e.g. 1852 * for "if-le" you would use "gt". 1853 * 1854 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1855 */ 1856 /* if-cmp vAA, +BBBB */ 1857 mov r0, rINST, lsr #8 @ r0<- AA 1858 GET_VREG(r2, r0) @ r2<- vAA 1859 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1860 cmp r2, #0 @ compare (vA, 0) 1861 blt 1f @ branch to 1 if comparison failed 1862 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1863 movs r9, r9, asl #1 @ convert to bytes, check sign 1864 bmi common_backwardBranch @ backward branch, do periodic checks 18651: 1866#if defined(WITH_JIT) 1867 GET_JIT_PROF_TABLE(r0) 1868 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1869 cmp r0,#0 1870 bne common_updateProfile 1871 GET_INST_OPCODE(ip) @ extract opcode from rINST 1872 GOTO_OPCODE(ip) @ jump to next instruction 1873#else 1874 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1875 GET_INST_OPCODE(ip) @ extract opcode from rINST 1876 GOTO_OPCODE(ip) @ jump to next instruction 1877#endif 1878 1879 1880 1881/* ------------------------------ */ 1882 .balign 64 1883.L_OP_IF_GTZ: /* 0x3c */ 1884/* File: armv5te/OP_IF_GTZ.S */ 1885/* File: armv5te/zcmp.S */ 1886 /* 1887 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1888 * fragment that specifies the *reverse* comparison to perform, e.g. 1889 * for "if-le" you would use "gt". 1890 * 1891 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1892 */ 1893 /* if-cmp vAA, +BBBB */ 1894 mov r0, rINST, lsr #8 @ r0<- AA 1895 GET_VREG(r2, r0) @ r2<- vAA 1896 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1897 cmp r2, #0 @ compare (vA, 0) 1898 ble 1f @ branch to 1 if comparison failed 1899 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1900 movs r9, r9, asl #1 @ convert to bytes, check sign 1901 bmi common_backwardBranch @ backward branch, do periodic checks 19021: 1903#if defined(WITH_JIT) 1904 GET_JIT_PROF_TABLE(r0) 1905 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1906 cmp r0,#0 1907 bne common_updateProfile 1908 GET_INST_OPCODE(ip) @ extract opcode from rINST 1909 GOTO_OPCODE(ip) @ jump to next instruction 1910#else 1911 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1912 GET_INST_OPCODE(ip) @ extract opcode from rINST 1913 GOTO_OPCODE(ip) @ jump to next instruction 1914#endif 1915 1916 1917 1918/* ------------------------------ */ 1919 .balign 64 1920.L_OP_IF_LEZ: /* 0x3d */ 1921/* File: armv5te/OP_IF_LEZ.S */ 1922/* File: armv5te/zcmp.S */ 1923 /* 1924 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1925 * fragment that specifies the *reverse* comparison to perform, e.g. 1926 * for "if-le" you would use "gt". 1927 * 1928 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1929 */ 1930 /* if-cmp vAA, +BBBB */ 1931 mov r0, rINST, lsr #8 @ r0<- AA 1932 GET_VREG(r2, r0) @ r2<- vAA 1933 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1934 cmp r2, #0 @ compare (vA, 0) 1935 bgt 1f @ branch to 1 if comparison failed 1936 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1937 movs r9, r9, asl #1 @ convert to bytes, check sign 1938 bmi common_backwardBranch @ backward branch, do periodic checks 19391: 1940#if defined(WITH_JIT) 1941 GET_JIT_PROF_TABLE(r0) 1942 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1943 cmp r0,#0 1944 bne common_updateProfile 1945 GET_INST_OPCODE(ip) @ extract opcode from rINST 1946 GOTO_OPCODE(ip) @ jump to next instruction 1947#else 1948 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1949 GET_INST_OPCODE(ip) @ extract opcode from rINST 1950 GOTO_OPCODE(ip) @ jump to next instruction 1951#endif 1952 1953 1954 1955/* ------------------------------ */ 1956 .balign 64 1957.L_OP_UNUSED_3E: /* 0x3e */ 1958/* File: armv5te/OP_UNUSED_3E.S */ 1959/* File: armv5te/unused.S */ 1960 bl common_abort 1961 1962 1963 1964/* ------------------------------ */ 1965 .balign 64 1966.L_OP_UNUSED_3F: /* 0x3f */ 1967/* File: armv5te/OP_UNUSED_3F.S */ 1968/* File: armv5te/unused.S */ 1969 bl common_abort 1970 1971 1972 1973/* ------------------------------ */ 1974 .balign 64 1975.L_OP_UNUSED_40: /* 0x40 */ 1976/* File: armv5te/OP_UNUSED_40.S */ 1977/* File: armv5te/unused.S */ 1978 bl common_abort 1979 1980 1981 1982/* ------------------------------ */ 1983 .balign 64 1984.L_OP_UNUSED_41: /* 0x41 */ 1985/* File: armv5te/OP_UNUSED_41.S */ 1986/* File: armv5te/unused.S */ 1987 bl common_abort 1988 1989 1990 1991/* ------------------------------ */ 1992 .balign 64 1993.L_OP_UNUSED_42: /* 0x42 */ 1994/* File: armv5te/OP_UNUSED_42.S */ 1995/* File: armv5te/unused.S */ 1996 bl common_abort 1997 1998 1999 2000/* ------------------------------ */ 2001 .balign 64 2002.L_OP_UNUSED_43: /* 0x43 */ 2003/* File: armv5te/OP_UNUSED_43.S */ 2004/* File: armv5te/unused.S */ 2005 bl common_abort 2006 2007 2008 2009/* ------------------------------ */ 2010 .balign 64 2011.L_OP_AGET: /* 0x44 */ 2012/* File: armv5te/OP_AGET.S */ 2013 /* 2014 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2015 * 2016 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2017 * instructions. We use a pair of FETCH_Bs instead. 2018 * 2019 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2020 */ 2021 /* op vAA, vBB, vCC */ 2022 FETCH_B(r2, 1, 0) @ r2<- BB 2023 mov r9, rINST, lsr #8 @ r9<- AA 2024 FETCH_B(r3, 1, 1) @ r3<- CC 2025 GET_VREG(r0, r2) @ r0<- vBB (array object) 2026 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2027 cmp r0, #0 @ null array object? 2028 beq common_errNullObject @ yes, bail 2029 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2030 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2031 cmp r1, r3 @ compare unsigned index, length 2032 bcs common_errArrayIndex @ index >= length, bail 2033 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2034 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2035 GET_INST_OPCODE(ip) @ extract opcode from rINST 2036 SET_VREG(r2, r9) @ vAA<- r2 2037 GOTO_OPCODE(ip) @ jump to next instruction 2038 2039 2040/* ------------------------------ */ 2041 .balign 64 2042.L_OP_AGET_WIDE: /* 0x45 */ 2043/* File: armv5te/OP_AGET_WIDE.S */ 2044 /* 2045 * Array get, 64 bits. vAA <- vBB[vCC]. 2046 * 2047 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2048 */ 2049 /* aget-wide vAA, vBB, vCC */ 2050 FETCH(r0, 1) @ r0<- CCBB 2051 mov r9, rINST, lsr #8 @ r9<- AA 2052 and r2, r0, #255 @ r2<- BB 2053 mov r3, r0, lsr #8 @ r3<- CC 2054 GET_VREG(r0, r2) @ r0<- vBB (array object) 2055 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2056 cmp r0, #0 @ null array object? 2057 beq common_errNullObject @ yes, bail 2058 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2059 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2060 cmp r1, r3 @ compare unsigned index, length 2061 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2062 b common_errArrayIndex @ index >= length, bail 2063 @ May want to swap the order of these two branches depending on how the 2064 @ branch prediction (if any) handles conditional forward branches vs. 2065 @ unconditional forward branches. 2066 2067/* ------------------------------ */ 2068 .balign 64 2069.L_OP_AGET_OBJECT: /* 0x46 */ 2070/* File: armv5te/OP_AGET_OBJECT.S */ 2071/* File: armv5te/OP_AGET.S */ 2072 /* 2073 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2074 * 2075 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2076 * instructions. We use a pair of FETCH_Bs instead. 2077 * 2078 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2079 */ 2080 /* op vAA, vBB, vCC */ 2081 FETCH_B(r2, 1, 0) @ r2<- BB 2082 mov r9, rINST, lsr #8 @ r9<- AA 2083 FETCH_B(r3, 1, 1) @ r3<- CC 2084 GET_VREG(r0, r2) @ r0<- vBB (array object) 2085 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2086 cmp r0, #0 @ null array object? 2087 beq common_errNullObject @ yes, bail 2088 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2089 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2090 cmp r1, r3 @ compare unsigned index, length 2091 bcs common_errArrayIndex @ index >= length, bail 2092 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2093 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2094 GET_INST_OPCODE(ip) @ extract opcode from rINST 2095 SET_VREG(r2, r9) @ vAA<- r2 2096 GOTO_OPCODE(ip) @ jump to next instruction 2097 2098 2099 2100/* ------------------------------ */ 2101 .balign 64 2102.L_OP_AGET_BOOLEAN: /* 0x47 */ 2103/* File: armv5te/OP_AGET_BOOLEAN.S */ 2104/* File: armv5te/OP_AGET.S */ 2105 /* 2106 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2107 * 2108 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2109 * instructions. We use a pair of FETCH_Bs instead. 2110 * 2111 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2112 */ 2113 /* op vAA, vBB, vCC */ 2114 FETCH_B(r2, 1, 0) @ r2<- BB 2115 mov r9, rINST, lsr #8 @ r9<- AA 2116 FETCH_B(r3, 1, 1) @ r3<- CC 2117 GET_VREG(r0, r2) @ r0<- vBB (array object) 2118 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2119 cmp r0, #0 @ null array object? 2120 beq common_errNullObject @ yes, bail 2121 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2122 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2123 cmp r1, r3 @ compare unsigned index, length 2124 bcs common_errArrayIndex @ index >= length, bail 2125 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2126 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2127 GET_INST_OPCODE(ip) @ extract opcode from rINST 2128 SET_VREG(r2, r9) @ vAA<- r2 2129 GOTO_OPCODE(ip) @ jump to next instruction 2130 2131 2132 2133/* ------------------------------ */ 2134 .balign 64 2135.L_OP_AGET_BYTE: /* 0x48 */ 2136/* File: armv5te/OP_AGET_BYTE.S */ 2137/* File: armv5te/OP_AGET.S */ 2138 /* 2139 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2140 * 2141 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2142 * instructions. We use a pair of FETCH_Bs instead. 2143 * 2144 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2145 */ 2146 /* op vAA, vBB, vCC */ 2147 FETCH_B(r2, 1, 0) @ r2<- BB 2148 mov r9, rINST, lsr #8 @ r9<- AA 2149 FETCH_B(r3, 1, 1) @ r3<- CC 2150 GET_VREG(r0, r2) @ r0<- vBB (array object) 2151 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2152 cmp r0, #0 @ null array object? 2153 beq common_errNullObject @ yes, bail 2154 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2155 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2156 cmp r1, r3 @ compare unsigned index, length 2157 bcs common_errArrayIndex @ index >= length, bail 2158 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2159 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2160 GET_INST_OPCODE(ip) @ extract opcode from rINST 2161 SET_VREG(r2, r9) @ vAA<- r2 2162 GOTO_OPCODE(ip) @ jump to next instruction 2163 2164 2165 2166/* ------------------------------ */ 2167 .balign 64 2168.L_OP_AGET_CHAR: /* 0x49 */ 2169/* File: armv5te/OP_AGET_CHAR.S */ 2170/* File: armv5te/OP_AGET.S */ 2171 /* 2172 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2173 * 2174 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2175 * instructions. We use a pair of FETCH_Bs instead. 2176 * 2177 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2178 */ 2179 /* op vAA, vBB, vCC */ 2180 FETCH_B(r2, 1, 0) @ r2<- BB 2181 mov r9, rINST, lsr #8 @ r9<- AA 2182 FETCH_B(r3, 1, 1) @ r3<- CC 2183 GET_VREG(r0, r2) @ r0<- vBB (array object) 2184 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2185 cmp r0, #0 @ null array object? 2186 beq common_errNullObject @ yes, bail 2187 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2188 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2189 cmp r1, r3 @ compare unsigned index, length 2190 bcs common_errArrayIndex @ index >= length, bail 2191 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2192 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2193 GET_INST_OPCODE(ip) @ extract opcode from rINST 2194 SET_VREG(r2, r9) @ vAA<- r2 2195 GOTO_OPCODE(ip) @ jump to next instruction 2196 2197 2198 2199/* ------------------------------ */ 2200 .balign 64 2201.L_OP_AGET_SHORT: /* 0x4a */ 2202/* File: armv5te/OP_AGET_SHORT.S */ 2203/* File: armv5te/OP_AGET.S */ 2204 /* 2205 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2206 * 2207 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2208 * instructions. We use a pair of FETCH_Bs instead. 2209 * 2210 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2211 */ 2212 /* op vAA, vBB, vCC */ 2213 FETCH_B(r2, 1, 0) @ r2<- BB 2214 mov r9, rINST, lsr #8 @ r9<- AA 2215 FETCH_B(r3, 1, 1) @ r3<- CC 2216 GET_VREG(r0, r2) @ r0<- vBB (array object) 2217 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2218 cmp r0, #0 @ null array object? 2219 beq common_errNullObject @ yes, bail 2220 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2221 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2222 cmp r1, r3 @ compare unsigned index, length 2223 bcs common_errArrayIndex @ index >= length, bail 2224 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2225 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2226 GET_INST_OPCODE(ip) @ extract opcode from rINST 2227 SET_VREG(r2, r9) @ vAA<- r2 2228 GOTO_OPCODE(ip) @ jump to next instruction 2229 2230 2231 2232/* ------------------------------ */ 2233 .balign 64 2234.L_OP_APUT: /* 0x4b */ 2235/* File: armv5te/OP_APUT.S */ 2236 /* 2237 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2238 * 2239 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2240 * instructions. We use a pair of FETCH_Bs instead. 2241 * 2242 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2243 */ 2244 /* op vAA, vBB, vCC */ 2245 FETCH_B(r2, 1, 0) @ r2<- BB 2246 mov r9, rINST, lsr #8 @ r9<- AA 2247 FETCH_B(r3, 1, 1) @ r3<- CC 2248 GET_VREG(r0, r2) @ r0<- vBB (array object) 2249 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2250 cmp r0, #0 @ null array object? 2251 beq common_errNullObject @ yes, bail 2252 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2253 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2254 cmp r1, r3 @ compare unsigned index, length 2255 bcs common_errArrayIndex @ index >= length, bail 2256 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2257 GET_VREG(r2, r9) @ r2<- vAA 2258 GET_INST_OPCODE(ip) @ extract opcode from rINST 2259 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2260 GOTO_OPCODE(ip) @ jump to next instruction 2261 2262 2263/* ------------------------------ */ 2264 .balign 64 2265.L_OP_APUT_WIDE: /* 0x4c */ 2266/* File: armv5te/OP_APUT_WIDE.S */ 2267 /* 2268 * Array put, 64 bits. vBB[vCC] <- vAA. 2269 * 2270 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2271 */ 2272 /* aput-wide vAA, vBB, vCC */ 2273 FETCH(r0, 1) @ r0<- CCBB 2274 mov r9, rINST, lsr #8 @ r9<- AA 2275 and r2, r0, #255 @ r2<- BB 2276 mov r3, r0, lsr #8 @ r3<- CC 2277 GET_VREG(r0, r2) @ r0<- vBB (array object) 2278 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2279 cmp r0, #0 @ null array object? 2280 beq common_errNullObject @ yes, bail 2281 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2282 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2283 cmp r1, r3 @ compare unsigned index, length 2284 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2285 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2286 b common_errArrayIndex @ index >= length, bail 2287 @ May want to swap the order of these two branches depending on how the 2288 @ branch prediction (if any) handles conditional forward branches vs. 2289 @ unconditional forward branches. 2290 2291/* ------------------------------ */ 2292 .balign 64 2293.L_OP_APUT_OBJECT: /* 0x4d */ 2294/* File: armv5te/OP_APUT_OBJECT.S */ 2295 /* 2296 * Store an object into an array. vBB[vCC] <- vAA. 2297 * 2298 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2299 * instructions. We use a pair of FETCH_Bs instead. 2300 */ 2301 /* op vAA, vBB, vCC */ 2302 FETCH(r0, 1) @ r0<- CCBB 2303 mov r9, rINST, lsr #8 @ r9<- AA 2304 and r2, r0, #255 @ r2<- BB 2305 mov r3, r0, lsr #8 @ r3<- CC 2306 GET_VREG(r1, r2) @ r1<- vBB (array object) 2307 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2308 cmp r1, #0 @ null array object? 2309 GET_VREG(r9, r9) @ r9<- vAA 2310 beq common_errNullObject @ yes, bail 2311 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2312 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2313 cmp r0, r3 @ compare unsigned index, length 2314 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2315 b common_errArrayIndex @ index >= length, bail 2316 2317 2318/* ------------------------------ */ 2319 .balign 64 2320.L_OP_APUT_BOOLEAN: /* 0x4e */ 2321/* File: armv5te/OP_APUT_BOOLEAN.S */ 2322/* File: armv5te/OP_APUT.S */ 2323 /* 2324 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2325 * 2326 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2327 * instructions. We use a pair of FETCH_Bs instead. 2328 * 2329 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2330 */ 2331 /* op vAA, vBB, vCC */ 2332 FETCH_B(r2, 1, 0) @ r2<- BB 2333 mov r9, rINST, lsr #8 @ r9<- AA 2334 FETCH_B(r3, 1, 1) @ r3<- CC 2335 GET_VREG(r0, r2) @ r0<- vBB (array object) 2336 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2337 cmp r0, #0 @ null array object? 2338 beq common_errNullObject @ yes, bail 2339 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2340 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2341 cmp r1, r3 @ compare unsigned index, length 2342 bcs common_errArrayIndex @ index >= length, bail 2343 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2344 GET_VREG(r2, r9) @ r2<- vAA 2345 GET_INST_OPCODE(ip) @ extract opcode from rINST 2346 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2347 GOTO_OPCODE(ip) @ jump to next instruction 2348 2349 2350 2351/* ------------------------------ */ 2352 .balign 64 2353.L_OP_APUT_BYTE: /* 0x4f */ 2354/* File: armv5te/OP_APUT_BYTE.S */ 2355/* File: armv5te/OP_APUT.S */ 2356 /* 2357 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2358 * 2359 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2360 * instructions. We use a pair of FETCH_Bs instead. 2361 * 2362 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2363 */ 2364 /* op vAA, vBB, vCC */ 2365 FETCH_B(r2, 1, 0) @ r2<- BB 2366 mov r9, rINST, lsr #8 @ r9<- AA 2367 FETCH_B(r3, 1, 1) @ r3<- CC 2368 GET_VREG(r0, r2) @ r0<- vBB (array object) 2369 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2370 cmp r0, #0 @ null array object? 2371 beq common_errNullObject @ yes, bail 2372 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2373 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2374 cmp r1, r3 @ compare unsigned index, length 2375 bcs common_errArrayIndex @ index >= length, bail 2376 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2377 GET_VREG(r2, r9) @ r2<- vAA 2378 GET_INST_OPCODE(ip) @ extract opcode from rINST 2379 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2380 GOTO_OPCODE(ip) @ jump to next instruction 2381 2382 2383 2384/* ------------------------------ */ 2385 .balign 64 2386.L_OP_APUT_CHAR: /* 0x50 */ 2387/* File: armv5te/OP_APUT_CHAR.S */ 2388/* File: armv5te/OP_APUT.S */ 2389 /* 2390 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2391 * 2392 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2393 * instructions. We use a pair of FETCH_Bs instead. 2394 * 2395 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2396 */ 2397 /* op vAA, vBB, vCC */ 2398 FETCH_B(r2, 1, 0) @ r2<- BB 2399 mov r9, rINST, lsr #8 @ r9<- AA 2400 FETCH_B(r3, 1, 1) @ r3<- CC 2401 GET_VREG(r0, r2) @ r0<- vBB (array object) 2402 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2403 cmp r0, #0 @ null array object? 2404 beq common_errNullObject @ yes, bail 2405 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2406 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2407 cmp r1, r3 @ compare unsigned index, length 2408 bcs common_errArrayIndex @ index >= length, bail 2409 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2410 GET_VREG(r2, r9) @ r2<- vAA 2411 GET_INST_OPCODE(ip) @ extract opcode from rINST 2412 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2413 GOTO_OPCODE(ip) @ jump to next instruction 2414 2415 2416 2417/* ------------------------------ */ 2418 .balign 64 2419.L_OP_APUT_SHORT: /* 0x51 */ 2420/* File: armv5te/OP_APUT_SHORT.S */ 2421/* File: armv5te/OP_APUT.S */ 2422 /* 2423 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2424 * 2425 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2426 * instructions. We use a pair of FETCH_Bs instead. 2427 * 2428 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2429 */ 2430 /* op vAA, vBB, vCC */ 2431 FETCH_B(r2, 1, 0) @ r2<- BB 2432 mov r9, rINST, lsr #8 @ r9<- AA 2433 FETCH_B(r3, 1, 1) @ r3<- CC 2434 GET_VREG(r0, r2) @ r0<- vBB (array object) 2435 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2436 cmp r0, #0 @ null array object? 2437 beq common_errNullObject @ yes, bail 2438 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2439 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2440 cmp r1, r3 @ compare unsigned index, length 2441 bcs common_errArrayIndex @ index >= length, bail 2442 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2443 GET_VREG(r2, r9) @ r2<- vAA 2444 GET_INST_OPCODE(ip) @ extract opcode from rINST 2445 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2446 GOTO_OPCODE(ip) @ jump to next instruction 2447 2448 2449 2450/* ------------------------------ */ 2451 .balign 64 2452.L_OP_IGET: /* 0x52 */ 2453/* File: armv5te/OP_IGET.S */ 2454 /* 2455 * General 32-bit instance field get. 2456 * 2457 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2458 */ 2459 /* op vA, vB, field@CCCC */ 2460 mov r0, rINST, lsr #12 @ r0<- B 2461 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2462 FETCH(r1, 1) @ r1<- field ref CCCC 2463 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2464 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2465 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2466 cmp r0, #0 @ is resolved entry null? 2467 bne .LOP_IGET_finish @ no, already resolved 24688: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2469 EXPORT_PC() @ resolve() could throw 2470 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2471 bl dvmResolveInstField @ r0<- resolved InstField ptr 2472 cmp r0, #0 2473 bne .LOP_IGET_finish 2474 b common_exceptionThrown 2475 2476/* ------------------------------ */ 2477 .balign 64 2478.L_OP_IGET_WIDE: /* 0x53 */ 2479/* File: armv5te/OP_IGET_WIDE.S */ 2480 /* 2481 * Wide 32-bit instance field get. 2482 */ 2483 /* iget-wide vA, vB, field@CCCC */ 2484 mov r0, rINST, lsr #12 @ r0<- B 2485 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2486 FETCH(r1, 1) @ r1<- field ref CCCC 2487 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2488 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2489 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2490 cmp r0, #0 @ is resolved entry null? 2491 bne .LOP_IGET_WIDE_finish @ no, already resolved 24928: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2493 EXPORT_PC() @ resolve() could throw 2494 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2495 bl dvmResolveInstField @ r0<- resolved InstField ptr 2496 cmp r0, #0 2497 bne .LOP_IGET_WIDE_finish 2498 b common_exceptionThrown 2499 2500/* ------------------------------ */ 2501 .balign 64 2502.L_OP_IGET_OBJECT: /* 0x54 */ 2503/* File: armv5te/OP_IGET_OBJECT.S */ 2504/* File: armv5te/OP_IGET.S */ 2505 /* 2506 * General 32-bit instance field get. 2507 * 2508 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2509 */ 2510 /* op vA, vB, field@CCCC */ 2511 mov r0, rINST, lsr #12 @ r0<- B 2512 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2513 FETCH(r1, 1) @ r1<- field ref CCCC 2514 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2515 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2516 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2517 cmp r0, #0 @ is resolved entry null? 2518 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25198: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2520 EXPORT_PC() @ resolve() could throw 2521 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2522 bl dvmResolveInstField @ r0<- resolved InstField ptr 2523 cmp r0, #0 2524 bne .LOP_IGET_OBJECT_finish 2525 b common_exceptionThrown 2526 2527 2528/* ------------------------------ */ 2529 .balign 64 2530.L_OP_IGET_BOOLEAN: /* 0x55 */ 2531/* File: armv5te/OP_IGET_BOOLEAN.S */ 2532@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2533/* File: armv5te/OP_IGET.S */ 2534 /* 2535 * General 32-bit instance field get. 2536 * 2537 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2538 */ 2539 /* op vA, vB, field@CCCC */ 2540 mov r0, rINST, lsr #12 @ r0<- B 2541 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2542 FETCH(r1, 1) @ r1<- field ref CCCC 2543 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2544 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2545 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2546 cmp r0, #0 @ is resolved entry null? 2547 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25488: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2549 EXPORT_PC() @ resolve() could throw 2550 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2551 bl dvmResolveInstField @ r0<- resolved InstField ptr 2552 cmp r0, #0 2553 bne .LOP_IGET_BOOLEAN_finish 2554 b common_exceptionThrown 2555 2556 2557/* ------------------------------ */ 2558 .balign 64 2559.L_OP_IGET_BYTE: /* 0x56 */ 2560/* File: armv5te/OP_IGET_BYTE.S */ 2561@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2562/* File: armv5te/OP_IGET.S */ 2563 /* 2564 * General 32-bit instance field get. 2565 * 2566 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2567 */ 2568 /* op vA, vB, field@CCCC */ 2569 mov r0, rINST, lsr #12 @ r0<- B 2570 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2571 FETCH(r1, 1) @ r1<- field ref CCCC 2572 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2573 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2574 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2575 cmp r0, #0 @ is resolved entry null? 2576 bne .LOP_IGET_BYTE_finish @ no, already resolved 25778: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2578 EXPORT_PC() @ resolve() could throw 2579 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2580 bl dvmResolveInstField @ r0<- resolved InstField ptr 2581 cmp r0, #0 2582 bne .LOP_IGET_BYTE_finish 2583 b common_exceptionThrown 2584 2585 2586/* ------------------------------ */ 2587 .balign 64 2588.L_OP_IGET_CHAR: /* 0x57 */ 2589/* File: armv5te/OP_IGET_CHAR.S */ 2590@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2591/* File: armv5te/OP_IGET.S */ 2592 /* 2593 * General 32-bit instance field get. 2594 * 2595 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2596 */ 2597 /* op vA, vB, field@CCCC */ 2598 mov r0, rINST, lsr #12 @ r0<- B 2599 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2600 FETCH(r1, 1) @ r1<- field ref CCCC 2601 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2602 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2603 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2604 cmp r0, #0 @ is resolved entry null? 2605 bne .LOP_IGET_CHAR_finish @ no, already resolved 26068: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2607 EXPORT_PC() @ resolve() could throw 2608 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2609 bl dvmResolveInstField @ r0<- resolved InstField ptr 2610 cmp r0, #0 2611 bne .LOP_IGET_CHAR_finish 2612 b common_exceptionThrown 2613 2614 2615/* ------------------------------ */ 2616 .balign 64 2617.L_OP_IGET_SHORT: /* 0x58 */ 2618/* File: armv5te/OP_IGET_SHORT.S */ 2619@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2620/* File: armv5te/OP_IGET.S */ 2621 /* 2622 * General 32-bit instance field get. 2623 * 2624 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2625 */ 2626 /* op vA, vB, field@CCCC */ 2627 mov r0, rINST, lsr #12 @ r0<- B 2628 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2629 FETCH(r1, 1) @ r1<- field ref CCCC 2630 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2631 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2632 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2633 cmp r0, #0 @ is resolved entry null? 2634 bne .LOP_IGET_SHORT_finish @ no, already resolved 26358: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2636 EXPORT_PC() @ resolve() could throw 2637 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2638 bl dvmResolveInstField @ r0<- resolved InstField ptr 2639 cmp r0, #0 2640 bne .LOP_IGET_SHORT_finish 2641 b common_exceptionThrown 2642 2643 2644/* ------------------------------ */ 2645 .balign 64 2646.L_OP_IPUT: /* 0x59 */ 2647/* File: armv5te/OP_IPUT.S */ 2648 /* 2649 * General 32-bit instance field put. 2650 * 2651 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2652 */ 2653 /* op vA, vB, field@CCCC */ 2654 mov r0, rINST, lsr #12 @ r0<- B 2655 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2656 FETCH(r1, 1) @ r1<- field ref CCCC 2657 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2658 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2659 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2660 cmp r0, #0 @ is resolved entry null? 2661 bne .LOP_IPUT_finish @ no, already resolved 26628: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2663 EXPORT_PC() @ resolve() could throw 2664 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2665 bl dvmResolveInstField @ r0<- resolved InstField ptr 2666 cmp r0, #0 @ success? 2667 bne .LOP_IPUT_finish @ yes, finish up 2668 b common_exceptionThrown 2669 2670/* ------------------------------ */ 2671 .balign 64 2672.L_OP_IPUT_WIDE: /* 0x5a */ 2673/* File: armv5te/OP_IPUT_WIDE.S */ 2674 /* iput-wide vA, vB, field@CCCC */ 2675 mov r0, rINST, lsr #12 @ r0<- B 2676 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2677 FETCH(r1, 1) @ r1<- field ref CCCC 2678 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2679 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2680 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2681 cmp r0, #0 @ is resolved entry null? 2682 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26838: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2684 EXPORT_PC() @ resolve() could throw 2685 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2686 bl dvmResolveInstField @ r0<- resolved InstField ptr 2687 cmp r0, #0 @ success? 2688 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2689 b common_exceptionThrown 2690 2691/* ------------------------------ */ 2692 .balign 64 2693.L_OP_IPUT_OBJECT: /* 0x5b */ 2694/* File: armv5te/OP_IPUT_OBJECT.S */ 2695/* File: armv5te/OP_IPUT.S */ 2696 /* 2697 * General 32-bit instance field put. 2698 * 2699 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2700 */ 2701 /* op vA, vB, field@CCCC */ 2702 mov r0, rINST, lsr #12 @ r0<- B 2703 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2704 FETCH(r1, 1) @ r1<- field ref CCCC 2705 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2706 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2707 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2708 cmp r0, #0 @ is resolved entry null? 2709 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 27108: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2711 EXPORT_PC() @ resolve() could throw 2712 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2713 bl dvmResolveInstField @ r0<- resolved InstField ptr 2714 cmp r0, #0 @ success? 2715 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2716 b common_exceptionThrown 2717 2718 2719/* ------------------------------ */ 2720 .balign 64 2721.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2722/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2723@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2724/* File: armv5te/OP_IPUT.S */ 2725 /* 2726 * General 32-bit instance field put. 2727 * 2728 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2729 */ 2730 /* op vA, vB, field@CCCC */ 2731 mov r0, rINST, lsr #12 @ r0<- B 2732 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2733 FETCH(r1, 1) @ r1<- field ref CCCC 2734 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2735 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2736 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2737 cmp r0, #0 @ is resolved entry null? 2738 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27398: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2740 EXPORT_PC() @ resolve() could throw 2741 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2742 bl dvmResolveInstField @ r0<- resolved InstField ptr 2743 cmp r0, #0 @ success? 2744 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2745 b common_exceptionThrown 2746 2747 2748/* ------------------------------ */ 2749 .balign 64 2750.L_OP_IPUT_BYTE: /* 0x5d */ 2751/* File: armv5te/OP_IPUT_BYTE.S */ 2752@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2753/* File: armv5te/OP_IPUT.S */ 2754 /* 2755 * General 32-bit instance field put. 2756 * 2757 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2758 */ 2759 /* op vA, vB, field@CCCC */ 2760 mov r0, rINST, lsr #12 @ r0<- B 2761 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2762 FETCH(r1, 1) @ r1<- field ref CCCC 2763 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2764 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2765 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2766 cmp r0, #0 @ is resolved entry null? 2767 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27688: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2769 EXPORT_PC() @ resolve() could throw 2770 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2771 bl dvmResolveInstField @ r0<- resolved InstField ptr 2772 cmp r0, #0 @ success? 2773 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2774 b common_exceptionThrown 2775 2776 2777/* ------------------------------ */ 2778 .balign 64 2779.L_OP_IPUT_CHAR: /* 0x5e */ 2780/* File: armv5te/OP_IPUT_CHAR.S */ 2781@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2782/* File: armv5te/OP_IPUT.S */ 2783 /* 2784 * General 32-bit instance field put. 2785 * 2786 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2787 */ 2788 /* op vA, vB, field@CCCC */ 2789 mov r0, rINST, lsr #12 @ r0<- B 2790 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2791 FETCH(r1, 1) @ r1<- field ref CCCC 2792 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2793 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2794 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2795 cmp r0, #0 @ is resolved entry null? 2796 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2798 EXPORT_PC() @ resolve() could throw 2799 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2800 bl dvmResolveInstField @ r0<- resolved InstField ptr 2801 cmp r0, #0 @ success? 2802 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2803 b common_exceptionThrown 2804 2805 2806/* ------------------------------ */ 2807 .balign 64 2808.L_OP_IPUT_SHORT: /* 0x5f */ 2809/* File: armv5te/OP_IPUT_SHORT.S */ 2810@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2811/* File: armv5te/OP_IPUT.S */ 2812 /* 2813 * General 32-bit instance field put. 2814 * 2815 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2816 */ 2817 /* op vA, vB, field@CCCC */ 2818 mov r0, rINST, lsr #12 @ r0<- B 2819 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2820 FETCH(r1, 1) @ r1<- field ref CCCC 2821 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2822 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2823 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2824 cmp r0, #0 @ is resolved entry null? 2825 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28268: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2827 EXPORT_PC() @ resolve() could throw 2828 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2829 bl dvmResolveInstField @ r0<- resolved InstField ptr 2830 cmp r0, #0 @ success? 2831 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2832 b common_exceptionThrown 2833 2834 2835/* ------------------------------ */ 2836 .balign 64 2837.L_OP_SGET: /* 0x60 */ 2838/* File: armv5te/OP_SGET.S */ 2839 /* 2840 * General 32-bit SGET handler. 2841 * 2842 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2843 */ 2844 /* op vAA, field@BBBB */ 2845 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2846 FETCH(r1, 1) @ r1<- field ref BBBB 2847 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2848 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2849 cmp r0, #0 @ is resolved entry null? 2850 beq .LOP_SGET_resolve @ yes, do resolve 2851.LOP_SGET_finish: @ field ptr in r0 2852 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2853 mov r2, rINST, lsr #8 @ r2<- AA 2854 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2855 SET_VREG(r1, r2) @ fp[AA]<- r1 2856 GET_INST_OPCODE(ip) @ extract opcode from rINST 2857 GOTO_OPCODE(ip) @ jump to next instruction 2858 2859/* ------------------------------ */ 2860 .balign 64 2861.L_OP_SGET_WIDE: /* 0x61 */ 2862/* File: armv5te/OP_SGET_WIDE.S */ 2863 /* 2864 * 64-bit SGET handler. 2865 */ 2866 /* sget-wide vAA, field@BBBB */ 2867 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2868 FETCH(r1, 1) @ r1<- field ref BBBB 2869 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2870 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2871 cmp r0, #0 @ is resolved entry null? 2872 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2873.LOP_SGET_WIDE_finish: 2874 mov r9, rINST, lsr #8 @ r9<- AA 2875 .if 0 2876 add r0, r0, #offStaticField_value @ r0<- pointer to data 2877 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 2878 .else 2879 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2880 .endif 2881 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2882 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2883 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2884 GET_INST_OPCODE(ip) @ extract opcode from rINST 2885 GOTO_OPCODE(ip) @ jump to next instruction 2886 2887/* ------------------------------ */ 2888 .balign 64 2889.L_OP_SGET_OBJECT: /* 0x62 */ 2890/* File: armv5te/OP_SGET_OBJECT.S */ 2891/* File: armv5te/OP_SGET.S */ 2892 /* 2893 * General 32-bit SGET handler. 2894 * 2895 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2896 */ 2897 /* op vAA, field@BBBB */ 2898 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2899 FETCH(r1, 1) @ r1<- field ref BBBB 2900 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2901 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2902 cmp r0, #0 @ is resolved entry null? 2903 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2904.LOP_SGET_OBJECT_finish: @ field ptr in r0 2905 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2906 mov r2, rINST, lsr #8 @ r2<- AA 2907 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2908 SET_VREG(r1, r2) @ fp[AA]<- r1 2909 GET_INST_OPCODE(ip) @ extract opcode from rINST 2910 GOTO_OPCODE(ip) @ jump to next instruction 2911 2912 2913/* ------------------------------ */ 2914 .balign 64 2915.L_OP_SGET_BOOLEAN: /* 0x63 */ 2916/* File: armv5te/OP_SGET_BOOLEAN.S */ 2917/* File: armv5te/OP_SGET.S */ 2918 /* 2919 * General 32-bit SGET handler. 2920 * 2921 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2922 */ 2923 /* op vAA, field@BBBB */ 2924 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2925 FETCH(r1, 1) @ r1<- field ref BBBB 2926 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2927 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2928 cmp r0, #0 @ is resolved entry null? 2929 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2930.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2931 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2932 mov r2, rINST, lsr #8 @ r2<- AA 2933 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2934 SET_VREG(r1, r2) @ fp[AA]<- r1 2935 GET_INST_OPCODE(ip) @ extract opcode from rINST 2936 GOTO_OPCODE(ip) @ jump to next instruction 2937 2938 2939/* ------------------------------ */ 2940 .balign 64 2941.L_OP_SGET_BYTE: /* 0x64 */ 2942/* File: armv5te/OP_SGET_BYTE.S */ 2943/* File: armv5te/OP_SGET.S */ 2944 /* 2945 * General 32-bit SGET handler. 2946 * 2947 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2948 */ 2949 /* op vAA, field@BBBB */ 2950 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2951 FETCH(r1, 1) @ r1<- field ref BBBB 2952 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2953 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2954 cmp r0, #0 @ is resolved entry null? 2955 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2956.LOP_SGET_BYTE_finish: @ field ptr in r0 2957 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2958 mov r2, rINST, lsr #8 @ r2<- AA 2959 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2960 SET_VREG(r1, r2) @ fp[AA]<- r1 2961 GET_INST_OPCODE(ip) @ extract opcode from rINST 2962 GOTO_OPCODE(ip) @ jump to next instruction 2963 2964 2965/* ------------------------------ */ 2966 .balign 64 2967.L_OP_SGET_CHAR: /* 0x65 */ 2968/* File: armv5te/OP_SGET_CHAR.S */ 2969/* File: armv5te/OP_SGET.S */ 2970 /* 2971 * General 32-bit SGET handler. 2972 * 2973 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2974 */ 2975 /* op vAA, field@BBBB */ 2976 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2977 FETCH(r1, 1) @ r1<- field ref BBBB 2978 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2979 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2980 cmp r0, #0 @ is resolved entry null? 2981 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2982.LOP_SGET_CHAR_finish: @ field ptr in r0 2983 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2984 mov r2, rINST, lsr #8 @ r2<- AA 2985 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2986 SET_VREG(r1, r2) @ fp[AA]<- r1 2987 GET_INST_OPCODE(ip) @ extract opcode from rINST 2988 GOTO_OPCODE(ip) @ jump to next instruction 2989 2990 2991/* ------------------------------ */ 2992 .balign 64 2993.L_OP_SGET_SHORT: /* 0x66 */ 2994/* File: armv5te/OP_SGET_SHORT.S */ 2995/* File: armv5te/OP_SGET.S */ 2996 /* 2997 * General 32-bit SGET handler. 2998 * 2999 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 3000 */ 3001 /* op vAA, field@BBBB */ 3002 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3003 FETCH(r1, 1) @ r1<- field ref BBBB 3004 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3005 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3006 cmp r0, #0 @ is resolved entry null? 3007 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 3008.LOP_SGET_SHORT_finish: @ field ptr in r0 3009 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3010 mov r2, rINST, lsr #8 @ r2<- AA 3011 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3012 SET_VREG(r1, r2) @ fp[AA]<- r1 3013 GET_INST_OPCODE(ip) @ extract opcode from rINST 3014 GOTO_OPCODE(ip) @ jump to next instruction 3015 3016 3017/* ------------------------------ */ 3018 .balign 64 3019.L_OP_SPUT: /* 0x67 */ 3020/* File: armv5te/OP_SPUT.S */ 3021 /* 3022 * General 32-bit SPUT handler. 3023 * 3024 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3025 */ 3026 /* op vAA, field@BBBB */ 3027 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3028 FETCH(r1, 1) @ r1<- field ref BBBB 3029 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3030 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3031 cmp r0, #0 @ is resolved entry null? 3032 beq .LOP_SPUT_resolve @ yes, do resolve 3033.LOP_SPUT_finish: @ field ptr in r0 3034 mov r2, rINST, lsr #8 @ r2<- AA 3035 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3036 GET_VREG(r1, r2) @ r1<- fp[AA] 3037 GET_INST_OPCODE(ip) @ extract opcode from rINST 3038 str r1, [r0, #offStaticField_value] @ field<- vAA 3039 GOTO_OPCODE(ip) @ jump to next instruction 3040 3041/* ------------------------------ */ 3042 .balign 64 3043.L_OP_SPUT_WIDE: /* 0x68 */ 3044/* File: armv5te/OP_SPUT_WIDE.S */ 3045 /* 3046 * 64-bit SPUT handler. 3047 */ 3048 /* sput-wide vAA, field@BBBB */ 3049 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 3050 FETCH(r1, 1) @ r1<- field ref BBBB 3051 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 3052 mov r9, rINST, lsr #8 @ r9<- AA 3053 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 3054 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3055 cmp r2, #0 @ is resolved entry null? 3056 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3057.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 3058 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3059 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 3060 GET_INST_OPCODE(r10) @ extract opcode from rINST 3061 .if 0 3062 add r2, r2, #offStaticField_value @ r2<- pointer to data 3063 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 3064 .else 3065 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3066 .endif 3067 GOTO_OPCODE(r10) @ jump to next instruction 3068 3069/* ------------------------------ */ 3070 .balign 64 3071.L_OP_SPUT_OBJECT: /* 0x69 */ 3072/* File: armv5te/OP_SPUT_OBJECT.S */ 3073/* File: armv5te/OP_SPUT.S */ 3074 /* 3075 * General 32-bit SPUT handler. 3076 * 3077 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3078 */ 3079 /* op vAA, field@BBBB */ 3080 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3081 FETCH(r1, 1) @ r1<- field ref BBBB 3082 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3083 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3084 cmp r0, #0 @ is resolved entry null? 3085 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3086.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3087 mov r2, rINST, lsr #8 @ r2<- AA 3088 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3089 GET_VREG(r1, r2) @ r1<- fp[AA] 3090 GET_INST_OPCODE(ip) @ extract opcode from rINST 3091 str r1, [r0, #offStaticField_value] @ field<- vAA 3092 GOTO_OPCODE(ip) @ jump to next instruction 3093 3094 3095/* ------------------------------ */ 3096 .balign 64 3097.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3098/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3099/* File: armv5te/OP_SPUT.S */ 3100 /* 3101 * General 32-bit SPUT handler. 3102 * 3103 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3104 */ 3105 /* op vAA, field@BBBB */ 3106 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3107 FETCH(r1, 1) @ r1<- field ref BBBB 3108 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3109 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3110 cmp r0, #0 @ is resolved entry null? 3111 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3112.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3113 mov r2, rINST, lsr #8 @ r2<- AA 3114 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3115 GET_VREG(r1, r2) @ r1<- fp[AA] 3116 GET_INST_OPCODE(ip) @ extract opcode from rINST 3117 str r1, [r0, #offStaticField_value] @ field<- vAA 3118 GOTO_OPCODE(ip) @ jump to next instruction 3119 3120 3121/* ------------------------------ */ 3122 .balign 64 3123.L_OP_SPUT_BYTE: /* 0x6b */ 3124/* File: armv5te/OP_SPUT_BYTE.S */ 3125/* File: armv5te/OP_SPUT.S */ 3126 /* 3127 * General 32-bit SPUT handler. 3128 * 3129 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3130 */ 3131 /* op vAA, field@BBBB */ 3132 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3133 FETCH(r1, 1) @ r1<- field ref BBBB 3134 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3135 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3136 cmp r0, #0 @ is resolved entry null? 3137 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3138.LOP_SPUT_BYTE_finish: @ field ptr in r0 3139 mov r2, rINST, lsr #8 @ r2<- AA 3140 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3141 GET_VREG(r1, r2) @ r1<- fp[AA] 3142 GET_INST_OPCODE(ip) @ extract opcode from rINST 3143 str r1, [r0, #offStaticField_value] @ field<- vAA 3144 GOTO_OPCODE(ip) @ jump to next instruction 3145 3146 3147/* ------------------------------ */ 3148 .balign 64 3149.L_OP_SPUT_CHAR: /* 0x6c */ 3150/* File: armv5te/OP_SPUT_CHAR.S */ 3151/* File: armv5te/OP_SPUT.S */ 3152 /* 3153 * General 32-bit SPUT handler. 3154 * 3155 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3156 */ 3157 /* op vAA, field@BBBB */ 3158 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3159 FETCH(r1, 1) @ r1<- field ref BBBB 3160 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3161 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3162 cmp r0, #0 @ is resolved entry null? 3163 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3164.LOP_SPUT_CHAR_finish: @ field ptr in r0 3165 mov r2, rINST, lsr #8 @ r2<- AA 3166 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3167 GET_VREG(r1, r2) @ r1<- fp[AA] 3168 GET_INST_OPCODE(ip) @ extract opcode from rINST 3169 str r1, [r0, #offStaticField_value] @ field<- vAA 3170 GOTO_OPCODE(ip) @ jump to next instruction 3171 3172 3173/* ------------------------------ */ 3174 .balign 64 3175.L_OP_SPUT_SHORT: /* 0x6d */ 3176/* File: armv5te/OP_SPUT_SHORT.S */ 3177/* File: armv5te/OP_SPUT.S */ 3178 /* 3179 * General 32-bit SPUT handler. 3180 * 3181 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3182 */ 3183 /* op vAA, field@BBBB */ 3184 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3185 FETCH(r1, 1) @ r1<- field ref BBBB 3186 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3187 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3188 cmp r0, #0 @ is resolved entry null? 3189 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3190.LOP_SPUT_SHORT_finish: @ field ptr in r0 3191 mov r2, rINST, lsr #8 @ r2<- AA 3192 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3193 GET_VREG(r1, r2) @ r1<- fp[AA] 3194 GET_INST_OPCODE(ip) @ extract opcode from rINST 3195 str r1, [r0, #offStaticField_value] @ field<- vAA 3196 GOTO_OPCODE(ip) @ jump to next instruction 3197 3198 3199/* ------------------------------ */ 3200 .balign 64 3201.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3202/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3203 /* 3204 * Handle a virtual method call. 3205 * 3206 * for: invoke-virtual, invoke-virtual/range 3207 */ 3208 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3209 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3210 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3211 FETCH(r1, 1) @ r1<- BBBB 3212 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3213 FETCH(r10, 2) @ r10<- GFED or CCCC 3214 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3215 .if (!0) 3216 and r10, r10, #15 @ r10<- D (or stays CCCC) 3217 .endif 3218 cmp r0, #0 @ already resolved? 3219 EXPORT_PC() @ must export for invoke 3220 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3221 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3222 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3223 mov r2, #METHOD_VIRTUAL @ resolver method type 3224 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3225 cmp r0, #0 @ got null? 3226 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3227 b common_exceptionThrown @ yes, handle exception 3228 3229/* ------------------------------ */ 3230 .balign 64 3231.L_OP_INVOKE_SUPER: /* 0x6f */ 3232/* File: armv5te/OP_INVOKE_SUPER.S */ 3233 /* 3234 * Handle a "super" method call. 3235 * 3236 * for: invoke-super, invoke-super/range 3237 */ 3238 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3239 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3240 FETCH(r10, 2) @ r10<- GFED or CCCC 3241 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3242 .if (!0) 3243 and r10, r10, #15 @ r10<- D (or stays CCCC) 3244 .endif 3245 FETCH(r1, 1) @ r1<- BBBB 3246 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3247 GET_VREG(r2, r10) @ r2<- "this" ptr 3248 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3249 cmp r2, #0 @ null "this"? 3250 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3251 beq common_errNullObject @ null "this", throw exception 3252 cmp r0, #0 @ already resolved? 3253 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3254 EXPORT_PC() @ must export for invoke 3255 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3256 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3257 3258/* ------------------------------ */ 3259 .balign 64 3260.L_OP_INVOKE_DIRECT: /* 0x70 */ 3261/* File: armv5te/OP_INVOKE_DIRECT.S */ 3262 /* 3263 * Handle a direct method call. 3264 * 3265 * (We could defer the "is 'this' pointer null" test to the common 3266 * method invocation code, and use a flag to indicate that static 3267 * calls don't count. If we do this as part of copying the arguments 3268 * out we could avoiding loading the first arg twice.) 3269 * 3270 * for: invoke-direct, invoke-direct/range 3271 */ 3272 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3273 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3274 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3275 FETCH(r1, 1) @ r1<- BBBB 3276 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3277 FETCH(r10, 2) @ r10<- GFED or CCCC 3278 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3279 .if (!0) 3280 and r10, r10, #15 @ r10<- D (or stays CCCC) 3281 .endif 3282 cmp r0, #0 @ already resolved? 3283 EXPORT_PC() @ must export for invoke 3284 GET_VREG(r2, r10) @ r2<- "this" ptr 3285 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3286.LOP_INVOKE_DIRECT_finish: 3287 cmp r2, #0 @ null "this" ref? 3288 bne common_invokeMethodNoRange @ no, continue on 3289 b common_errNullObject @ yes, throw exception 3290 3291/* ------------------------------ */ 3292 .balign 64 3293.L_OP_INVOKE_STATIC: /* 0x71 */ 3294/* File: armv5te/OP_INVOKE_STATIC.S */ 3295 /* 3296 * Handle a static method call. 3297 * 3298 * for: invoke-static, invoke-static/range 3299 */ 3300 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3301 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3302 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3303 FETCH(r1, 1) @ r1<- BBBB 3304 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3305 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3306 cmp r0, #0 @ already resolved? 3307 EXPORT_PC() @ must export for invoke 3308 bne common_invokeMethodNoRange @ yes, continue on 33090: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3310 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3311 mov r2, #METHOD_STATIC @ resolver method type 3312 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3313 cmp r0, #0 @ got null? 3314 bne common_invokeMethodNoRange @ no, continue 3315 b common_exceptionThrown @ yes, handle exception 3316 3317 3318/* ------------------------------ */ 3319 .balign 64 3320.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3321/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3322 /* 3323 * Handle an interface method call. 3324 * 3325 * for: invoke-interface, invoke-interface/range 3326 */ 3327 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3328 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3329 FETCH(r2, 2) @ r2<- FEDC or CCCC 3330 FETCH(r1, 1) @ r1<- BBBB 3331 .if (!0) 3332 and r2, r2, #15 @ r2<- C (or stays CCCC) 3333 .endif 3334 EXPORT_PC() @ must export for invoke 3335 GET_VREG(r0, r2) @ r0<- first arg ("this") 3336 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3337 cmp r0, #0 @ null obj? 3338 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3339 beq common_errNullObject @ yes, fail 3340 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3341 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3342 cmp r0, #0 @ failed? 3343 beq common_exceptionThrown @ yes, handle exception 3344 b common_invokeMethodNoRange @ jump to common handler 3345 3346 3347/* ------------------------------ */ 3348 .balign 64 3349.L_OP_UNUSED_73: /* 0x73 */ 3350/* File: armv5te/OP_UNUSED_73.S */ 3351/* File: armv5te/unused.S */ 3352 bl common_abort 3353 3354 3355 3356/* ------------------------------ */ 3357 .balign 64 3358.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3359/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3360/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3361 /* 3362 * Handle a virtual method call. 3363 * 3364 * for: invoke-virtual, invoke-virtual/range 3365 */ 3366 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3367 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3368 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3369 FETCH(r1, 1) @ r1<- BBBB 3370 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3371 FETCH(r10, 2) @ r10<- GFED or CCCC 3372 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3373 .if (!1) 3374 and r10, r10, #15 @ r10<- D (or stays CCCC) 3375 .endif 3376 cmp r0, #0 @ already resolved? 3377 EXPORT_PC() @ must export for invoke 3378 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3379 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3380 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3381 mov r2, #METHOD_VIRTUAL @ resolver method type 3382 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3383 cmp r0, #0 @ got null? 3384 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3385 b common_exceptionThrown @ yes, handle exception 3386 3387 3388/* ------------------------------ */ 3389 .balign 64 3390.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3391/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3392/* File: armv5te/OP_INVOKE_SUPER.S */ 3393 /* 3394 * Handle a "super" method call. 3395 * 3396 * for: invoke-super, invoke-super/range 3397 */ 3398 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3399 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3400 FETCH(r10, 2) @ r10<- GFED or CCCC 3401 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3402 .if (!1) 3403 and r10, r10, #15 @ r10<- D (or stays CCCC) 3404 .endif 3405 FETCH(r1, 1) @ r1<- BBBB 3406 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3407 GET_VREG(r2, r10) @ r2<- "this" ptr 3408 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3409 cmp r2, #0 @ null "this"? 3410 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3411 beq common_errNullObject @ null "this", throw exception 3412 cmp r0, #0 @ already resolved? 3413 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3414 EXPORT_PC() @ must export for invoke 3415 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3416 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3417 3418 3419/* ------------------------------ */ 3420 .balign 64 3421.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3422/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3423/* File: armv5te/OP_INVOKE_DIRECT.S */ 3424 /* 3425 * Handle a direct method call. 3426 * 3427 * (We could defer the "is 'this' pointer null" test to the common 3428 * method invocation code, and use a flag to indicate that static 3429 * calls don't count. If we do this as part of copying the arguments 3430 * out we could avoiding loading the first arg twice.) 3431 * 3432 * for: invoke-direct, invoke-direct/range 3433 */ 3434 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3435 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3436 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3437 FETCH(r1, 1) @ r1<- BBBB 3438 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3439 FETCH(r10, 2) @ r10<- GFED or CCCC 3440 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3441 .if (!1) 3442 and r10, r10, #15 @ r10<- D (or stays CCCC) 3443 .endif 3444 cmp r0, #0 @ already resolved? 3445 EXPORT_PC() @ must export for invoke 3446 GET_VREG(r2, r10) @ r2<- "this" ptr 3447 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3448.LOP_INVOKE_DIRECT_RANGE_finish: 3449 cmp r2, #0 @ null "this" ref? 3450 bne common_invokeMethodRange @ no, continue on 3451 b common_errNullObject @ yes, throw exception 3452 3453 3454/* ------------------------------ */ 3455 .balign 64 3456.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3457/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3458/* File: armv5te/OP_INVOKE_STATIC.S */ 3459 /* 3460 * Handle a static method call. 3461 * 3462 * for: invoke-static, invoke-static/range 3463 */ 3464 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3465 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3466 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3467 FETCH(r1, 1) @ r1<- BBBB 3468 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3469 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3470 cmp r0, #0 @ already resolved? 3471 EXPORT_PC() @ must export for invoke 3472 bne common_invokeMethodRange @ yes, continue on 34730: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3474 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3475 mov r2, #METHOD_STATIC @ resolver method type 3476 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3477 cmp r0, #0 @ got null? 3478 bne common_invokeMethodRange @ no, continue 3479 b common_exceptionThrown @ yes, handle exception 3480 3481 3482 3483/* ------------------------------ */ 3484 .balign 64 3485.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3486/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3487/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3488 /* 3489 * Handle an interface method call. 3490 * 3491 * for: invoke-interface, invoke-interface/range 3492 */ 3493 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3494 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3495 FETCH(r2, 2) @ r2<- FEDC or CCCC 3496 FETCH(r1, 1) @ r1<- BBBB 3497 .if (!1) 3498 and r2, r2, #15 @ r2<- C (or stays CCCC) 3499 .endif 3500 EXPORT_PC() @ must export for invoke 3501 GET_VREG(r0, r2) @ r0<- first arg ("this") 3502 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3503 cmp r0, #0 @ null obj? 3504 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3505 beq common_errNullObject @ yes, fail 3506 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3507 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3508 cmp r0, #0 @ failed? 3509 beq common_exceptionThrown @ yes, handle exception 3510 b common_invokeMethodRange @ jump to common handler 3511 3512 3513 3514/* ------------------------------ */ 3515 .balign 64 3516.L_OP_UNUSED_79: /* 0x79 */ 3517/* File: armv5te/OP_UNUSED_79.S */ 3518/* File: armv5te/unused.S */ 3519 bl common_abort 3520 3521 3522 3523/* ------------------------------ */ 3524 .balign 64 3525.L_OP_UNUSED_7A: /* 0x7a */ 3526/* File: armv5te/OP_UNUSED_7A.S */ 3527/* File: armv5te/unused.S */ 3528 bl common_abort 3529 3530 3531 3532/* ------------------------------ */ 3533 .balign 64 3534.L_OP_NEG_INT: /* 0x7b */ 3535/* File: armv5te/OP_NEG_INT.S */ 3536/* File: armv5te/unop.S */ 3537 /* 3538 * Generic 32-bit unary operation. Provide an "instr" line that 3539 * specifies an instruction that performs "result = op r0". 3540 * This could be an ARM instruction or a function call. 3541 * 3542 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3543 * int-to-byte, int-to-char, int-to-short 3544 */ 3545 /* unop vA, vB */ 3546 mov r3, rINST, lsr #12 @ r3<- B 3547 mov r9, rINST, lsr #8 @ r9<- A+ 3548 GET_VREG(r0, r3) @ r0<- vB 3549 and r9, r9, #15 3550 @ optional op; may set condition codes 3551 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3552 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3553 GET_INST_OPCODE(ip) @ extract opcode from rINST 3554 SET_VREG(r0, r9) @ vAA<- r0 3555 GOTO_OPCODE(ip) @ jump to next instruction 3556 /* 9-10 instructions */ 3557 3558 3559/* ------------------------------ */ 3560 .balign 64 3561.L_OP_NOT_INT: /* 0x7c */ 3562/* File: armv5te/OP_NOT_INT.S */ 3563/* File: armv5te/unop.S */ 3564 /* 3565 * Generic 32-bit unary operation. Provide an "instr" line that 3566 * specifies an instruction that performs "result = op r0". 3567 * This could be an ARM instruction or a function call. 3568 * 3569 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3570 * int-to-byte, int-to-char, int-to-short 3571 */ 3572 /* unop vA, vB */ 3573 mov r3, rINST, lsr #12 @ r3<- B 3574 mov r9, rINST, lsr #8 @ r9<- A+ 3575 GET_VREG(r0, r3) @ r0<- vB 3576 and r9, r9, #15 3577 @ optional op; may set condition codes 3578 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3579 mvn r0, r0 @ r0<- op, r0-r3 changed 3580 GET_INST_OPCODE(ip) @ extract opcode from rINST 3581 SET_VREG(r0, r9) @ vAA<- r0 3582 GOTO_OPCODE(ip) @ jump to next instruction 3583 /* 9-10 instructions */ 3584 3585 3586/* ------------------------------ */ 3587 .balign 64 3588.L_OP_NEG_LONG: /* 0x7d */ 3589/* File: armv5te/OP_NEG_LONG.S */ 3590/* File: armv5te/unopWide.S */ 3591 /* 3592 * Generic 64-bit unary operation. Provide an "instr" line that 3593 * specifies an instruction that performs "result = op r0/r1". 3594 * This could be an ARM instruction or a function call. 3595 * 3596 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3597 */ 3598 /* unop vA, vB */ 3599 mov r9, rINST, lsr #8 @ r9<- A+ 3600 mov r3, rINST, lsr #12 @ r3<- B 3601 and r9, r9, #15 3602 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3603 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3604 ldmia r3, {r0-r1} @ r0/r1<- vAA 3605 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3606 rsbs r0, r0, #0 @ optional op; may set condition codes 3607 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3608 GET_INST_OPCODE(ip) @ extract opcode from rINST 3609 stmia r9, {r0-r1} @ vAA<- r0/r1 3610 GOTO_OPCODE(ip) @ jump to next instruction 3611 /* 12-13 instructions */ 3612 3613 3614 3615/* ------------------------------ */ 3616 .balign 64 3617.L_OP_NOT_LONG: /* 0x7e */ 3618/* File: armv5te/OP_NOT_LONG.S */ 3619/* File: armv5te/unopWide.S */ 3620 /* 3621 * Generic 64-bit unary operation. Provide an "instr" line that 3622 * specifies an instruction that performs "result = op r0/r1". 3623 * This could be an ARM instruction or a function call. 3624 * 3625 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3626 */ 3627 /* unop vA, vB */ 3628 mov r9, rINST, lsr #8 @ r9<- A+ 3629 mov r3, rINST, lsr #12 @ r3<- B 3630 and r9, r9, #15 3631 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3632 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3633 ldmia r3, {r0-r1} @ r0/r1<- vAA 3634 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3635 mvn r0, r0 @ optional op; may set condition codes 3636 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3637 GET_INST_OPCODE(ip) @ extract opcode from rINST 3638 stmia r9, {r0-r1} @ vAA<- r0/r1 3639 GOTO_OPCODE(ip) @ jump to next instruction 3640 /* 12-13 instructions */ 3641 3642 3643 3644/* ------------------------------ */ 3645 .balign 64 3646.L_OP_NEG_FLOAT: /* 0x7f */ 3647/* File: armv5te/OP_NEG_FLOAT.S */ 3648/* File: armv5te/unop.S */ 3649 /* 3650 * Generic 32-bit unary operation. Provide an "instr" line that 3651 * specifies an instruction that performs "result = op r0". 3652 * This could be an ARM instruction or a function call. 3653 * 3654 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3655 * int-to-byte, int-to-char, int-to-short 3656 */ 3657 /* unop vA, vB */ 3658 mov r3, rINST, lsr #12 @ r3<- B 3659 mov r9, rINST, lsr #8 @ r9<- A+ 3660 GET_VREG(r0, r3) @ r0<- vB 3661 and r9, r9, #15 3662 @ optional op; may set condition codes 3663 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3664 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3665 GET_INST_OPCODE(ip) @ extract opcode from rINST 3666 SET_VREG(r0, r9) @ vAA<- r0 3667 GOTO_OPCODE(ip) @ jump to next instruction 3668 /* 9-10 instructions */ 3669 3670 3671/* ------------------------------ */ 3672 .balign 64 3673.L_OP_NEG_DOUBLE: /* 0x80 */ 3674/* File: armv5te/OP_NEG_DOUBLE.S */ 3675/* File: armv5te/unopWide.S */ 3676 /* 3677 * Generic 64-bit unary operation. Provide an "instr" line that 3678 * specifies an instruction that performs "result = op r0/r1". 3679 * This could be an ARM instruction or a function call. 3680 * 3681 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3682 */ 3683 /* unop vA, vB */ 3684 mov r9, rINST, lsr #8 @ r9<- A+ 3685 mov r3, rINST, lsr #12 @ r3<- B 3686 and r9, r9, #15 3687 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3688 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3689 ldmia r3, {r0-r1} @ r0/r1<- vAA 3690 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3691 @ optional op; may set condition codes 3692 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3693 GET_INST_OPCODE(ip) @ extract opcode from rINST 3694 stmia r9, {r0-r1} @ vAA<- r0/r1 3695 GOTO_OPCODE(ip) @ jump to next instruction 3696 /* 12-13 instructions */ 3697 3698 3699 3700/* ------------------------------ */ 3701 .balign 64 3702.L_OP_INT_TO_LONG: /* 0x81 */ 3703/* File: armv5te/OP_INT_TO_LONG.S */ 3704/* File: armv5te/unopWider.S */ 3705 /* 3706 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3707 * that specifies an instruction that performs "result = op r0", where 3708 * "result" is a 64-bit quantity in r0/r1. 3709 * 3710 * For: int-to-long, int-to-double, float-to-long, float-to-double 3711 */ 3712 /* unop vA, vB */ 3713 mov r9, rINST, lsr #8 @ r9<- A+ 3714 mov r3, rINST, lsr #12 @ r3<- B 3715 and r9, r9, #15 3716 GET_VREG(r0, r3) @ r0<- vB 3717 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3718 @ optional op; may set condition codes 3719 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3720 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3721 GET_INST_OPCODE(ip) @ extract opcode from rINST 3722 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3723 GOTO_OPCODE(ip) @ jump to next instruction 3724 /* 10-11 instructions */ 3725 3726 3727/* ------------------------------ */ 3728 .balign 64 3729.L_OP_INT_TO_FLOAT: /* 0x82 */ 3730/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3731/* File: arm-vfp/funop.S */ 3732 /* 3733 * Generic 32-bit unary floating-point operation. Provide an "instr" 3734 * line that specifies an instruction that performs "s1 = op s0". 3735 * 3736 * for: int-to-float, float-to-int 3737 */ 3738 /* unop vA, vB */ 3739 mov r3, rINST, lsr #12 @ r3<- B 3740 mov r9, rINST, lsr #8 @ r9<- A+ 3741 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3742 flds s0, [r3] @ s0<- vB 3743 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3744 and r9, r9, #15 @ r9<- A 3745 fsitos s1, s0 @ s1<- op 3746 GET_INST_OPCODE(ip) @ extract opcode from rINST 3747 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3748 fsts s1, [r9] @ vA<- s1 3749 GOTO_OPCODE(ip) @ jump to next instruction 3750 3751 3752/* ------------------------------ */ 3753 .balign 64 3754.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3755/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3756/* File: arm-vfp/funopWider.S */ 3757 /* 3758 * Generic 32bit-to-64bit floating point unary operation. Provide an 3759 * "instr" line that specifies an instruction that performs "d0 = op s0". 3760 * 3761 * For: int-to-double, float-to-double 3762 */ 3763 /* unop vA, vB */ 3764 mov r3, rINST, lsr #12 @ r3<- B 3765 mov r9, rINST, lsr #8 @ r9<- A+ 3766 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3767 flds s0, [r3] @ s0<- vB 3768 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3769 and r9, r9, #15 @ r9<- A 3770 fsitod d0, s0 @ d0<- op 3771 GET_INST_OPCODE(ip) @ extract opcode from rINST 3772 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3773 fstd d0, [r9] @ vA<- d0 3774 GOTO_OPCODE(ip) @ jump to next instruction 3775 3776 3777/* ------------------------------ */ 3778 .balign 64 3779.L_OP_LONG_TO_INT: /* 0x84 */ 3780/* File: armv5te/OP_LONG_TO_INT.S */ 3781/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3782/* File: armv5te/OP_MOVE.S */ 3783 /* for move, move-object, long-to-int */ 3784 /* op vA, vB */ 3785 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3786 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3787 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3788 GET_VREG(r2, r1) @ r2<- fp[B] 3789 and r0, r0, #15 3790 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3791 SET_VREG(r2, r0) @ fp[A]<- r2 3792 GOTO_OPCODE(ip) @ execute next instruction 3793 3794 3795 3796/* ------------------------------ */ 3797 .balign 64 3798.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3799/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3800/* File: armv5te/unopNarrower.S */ 3801 /* 3802 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3803 * that specifies an instruction that performs "result = op r0/r1", where 3804 * "result" is a 32-bit quantity in r0. 3805 * 3806 * For: long-to-float, double-to-int, double-to-float 3807 * 3808 * (This would work for long-to-int, but that instruction is actually 3809 * an exact match for OP_MOVE.) 3810 */ 3811 /* unop vA, vB */ 3812 mov r3, rINST, lsr #12 @ r3<- B 3813 mov r9, rINST, lsr #8 @ r9<- A+ 3814 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3815 and r9, r9, #15 3816 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3817 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3818 @ optional op; may set condition codes 3819 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3820 GET_INST_OPCODE(ip) @ extract opcode from rINST 3821 SET_VREG(r0, r9) @ vA<- r0 3822 GOTO_OPCODE(ip) @ jump to next instruction 3823 /* 10-11 instructions */ 3824 3825 3826/* ------------------------------ */ 3827 .balign 64 3828.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3829/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3830/* File: armv5te/unopWide.S */ 3831 /* 3832 * Generic 64-bit unary operation. Provide an "instr" line that 3833 * specifies an instruction that performs "result = op r0/r1". 3834 * This could be an ARM instruction or a function call. 3835 * 3836 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3837 */ 3838 /* unop vA, vB */ 3839 mov r9, rINST, lsr #8 @ r9<- A+ 3840 mov r3, rINST, lsr #12 @ r3<- B 3841 and r9, r9, #15 3842 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3843 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3844 ldmia r3, {r0-r1} @ r0/r1<- vAA 3845 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3846 @ optional op; may set condition codes 3847 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3848 GET_INST_OPCODE(ip) @ extract opcode from rINST 3849 stmia r9, {r0-r1} @ vAA<- r0/r1 3850 GOTO_OPCODE(ip) @ jump to next instruction 3851 /* 12-13 instructions */ 3852 3853 3854 3855/* ------------------------------ */ 3856 .balign 64 3857.L_OP_FLOAT_TO_INT: /* 0x87 */ 3858/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3859/* File: arm-vfp/funop.S */ 3860 /* 3861 * Generic 32-bit unary floating-point operation. Provide an "instr" 3862 * line that specifies an instruction that performs "s1 = op s0". 3863 * 3864 * for: int-to-float, float-to-int 3865 */ 3866 /* unop vA, vB */ 3867 mov r3, rINST, lsr #12 @ r3<- B 3868 mov r9, rINST, lsr #8 @ r9<- A+ 3869 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3870 flds s0, [r3] @ s0<- vB 3871 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3872 and r9, r9, #15 @ r9<- A 3873 ftosizs s1, s0 @ s1<- op 3874 GET_INST_OPCODE(ip) @ extract opcode from rINST 3875 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3876 fsts s1, [r9] @ vA<- s1 3877 GOTO_OPCODE(ip) @ jump to next instruction 3878 3879 3880/* ------------------------------ */ 3881 .balign 64 3882.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3883/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3884@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3885/* File: armv5te/unopWider.S */ 3886 /* 3887 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3888 * that specifies an instruction that performs "result = op r0", where 3889 * "result" is a 64-bit quantity in r0/r1. 3890 * 3891 * For: int-to-long, int-to-double, float-to-long, float-to-double 3892 */ 3893 /* unop vA, vB */ 3894 mov r9, rINST, lsr #8 @ r9<- A+ 3895 mov r3, rINST, lsr #12 @ r3<- B 3896 and r9, r9, #15 3897 GET_VREG(r0, r3) @ r0<- vB 3898 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3899 @ optional op; may set condition codes 3900 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3901 bl f2l_doconv @ r0<- op, r0-r3 changed 3902 GET_INST_OPCODE(ip) @ extract opcode from rINST 3903 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3904 GOTO_OPCODE(ip) @ jump to next instruction 3905 /* 10-11 instructions */ 3906 3907 3908 3909/* ------------------------------ */ 3910 .balign 64 3911.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3912/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3913/* File: arm-vfp/funopWider.S */ 3914 /* 3915 * Generic 32bit-to-64bit floating point unary operation. Provide an 3916 * "instr" line that specifies an instruction that performs "d0 = op s0". 3917 * 3918 * For: int-to-double, float-to-double 3919 */ 3920 /* unop vA, vB */ 3921 mov r3, rINST, lsr #12 @ r3<- B 3922 mov r9, rINST, lsr #8 @ r9<- A+ 3923 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3924 flds s0, [r3] @ s0<- vB 3925 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3926 and r9, r9, #15 @ r9<- A 3927 fcvtds d0, s0 @ d0<- op 3928 GET_INST_OPCODE(ip) @ extract opcode from rINST 3929 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3930 fstd d0, [r9] @ vA<- d0 3931 GOTO_OPCODE(ip) @ jump to next instruction 3932 3933 3934/* ------------------------------ */ 3935 .balign 64 3936.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3937/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3938/* File: arm-vfp/funopNarrower.S */ 3939 /* 3940 * Generic 64bit-to-32bit unary floating point operation. Provide an 3941 * "instr" line that specifies an instruction that performs "s0 = op d0". 3942 * 3943 * For: double-to-int, double-to-float 3944 */ 3945 /* unop vA, vB */ 3946 mov r3, rINST, lsr #12 @ r3<- B 3947 mov r9, rINST, lsr #8 @ r9<- A+ 3948 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3949 fldd d0, [r3] @ d0<- vB 3950 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3951 and r9, r9, #15 @ r9<- A 3952 ftosizd s0, d0 @ s0<- op 3953 GET_INST_OPCODE(ip) @ extract opcode from rINST 3954 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3955 fsts s0, [r9] @ vA<- s0 3956 GOTO_OPCODE(ip) @ jump to next instruction 3957 3958 3959/* ------------------------------ */ 3960 .balign 64 3961.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3962/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 3963@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3964/* File: armv5te/unopWide.S */ 3965 /* 3966 * Generic 64-bit unary operation. Provide an "instr" line that 3967 * specifies an instruction that performs "result = op r0/r1". 3968 * This could be an ARM instruction or a function call. 3969 * 3970 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3971 */ 3972 /* unop vA, vB */ 3973 mov r9, rINST, lsr #8 @ r9<- A+ 3974 mov r3, rINST, lsr #12 @ r3<- B 3975 and r9, r9, #15 3976 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3977 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3978 ldmia r3, {r0-r1} @ r0/r1<- vAA 3979 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3980 @ optional op; may set condition codes 3981 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3982 GET_INST_OPCODE(ip) @ extract opcode from rINST 3983 stmia r9, {r0-r1} @ vAA<- r0/r1 3984 GOTO_OPCODE(ip) @ jump to next instruction 3985 /* 12-13 instructions */ 3986 3987 3988 3989 3990/* ------------------------------ */ 3991 .balign 64 3992.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3993/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3994/* File: arm-vfp/funopNarrower.S */ 3995 /* 3996 * Generic 64bit-to-32bit unary floating point operation. Provide an 3997 * "instr" line that specifies an instruction that performs "s0 = op d0". 3998 * 3999 * For: double-to-int, double-to-float 4000 */ 4001 /* unop vA, vB */ 4002 mov r3, rINST, lsr #12 @ r3<- B 4003 mov r9, rINST, lsr #8 @ r9<- A+ 4004 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 4005 fldd d0, [r3] @ d0<- vB 4006 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4007 and r9, r9, #15 @ r9<- A 4008 fcvtsd s0, d0 @ s0<- op 4009 GET_INST_OPCODE(ip) @ extract opcode from rINST 4010 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 4011 fsts s0, [r9] @ vA<- s0 4012 GOTO_OPCODE(ip) @ jump to next instruction 4013 4014 4015/* ------------------------------ */ 4016 .balign 64 4017.L_OP_INT_TO_BYTE: /* 0x8d */ 4018/* File: armv5te/OP_INT_TO_BYTE.S */ 4019/* File: armv5te/unop.S */ 4020 /* 4021 * Generic 32-bit unary operation. Provide an "instr" line that 4022 * specifies an instruction that performs "result = op r0". 4023 * This could be an ARM instruction or a function call. 4024 * 4025 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4026 * int-to-byte, int-to-char, int-to-short 4027 */ 4028 /* unop vA, vB */ 4029 mov r3, rINST, lsr #12 @ r3<- B 4030 mov r9, rINST, lsr #8 @ r9<- A+ 4031 GET_VREG(r0, r3) @ r0<- vB 4032 and r9, r9, #15 4033 mov r0, r0, asl #24 @ optional op; may set condition codes 4034 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4035 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4036 GET_INST_OPCODE(ip) @ extract opcode from rINST 4037 SET_VREG(r0, r9) @ vAA<- r0 4038 GOTO_OPCODE(ip) @ jump to next instruction 4039 /* 9-10 instructions */ 4040 4041 4042/* ------------------------------ */ 4043 .balign 64 4044.L_OP_INT_TO_CHAR: /* 0x8e */ 4045/* File: armv5te/OP_INT_TO_CHAR.S */ 4046/* File: armv5te/unop.S */ 4047 /* 4048 * Generic 32-bit unary operation. Provide an "instr" line that 4049 * specifies an instruction that performs "result = op r0". 4050 * This could be an ARM instruction or a function call. 4051 * 4052 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4053 * int-to-byte, int-to-char, int-to-short 4054 */ 4055 /* unop vA, vB */ 4056 mov r3, rINST, lsr #12 @ r3<- B 4057 mov r9, rINST, lsr #8 @ r9<- A+ 4058 GET_VREG(r0, r3) @ r0<- vB 4059 and r9, r9, #15 4060 mov r0, r0, asl #16 @ optional op; may set condition codes 4061 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4062 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4063 GET_INST_OPCODE(ip) @ extract opcode from rINST 4064 SET_VREG(r0, r9) @ vAA<- r0 4065 GOTO_OPCODE(ip) @ jump to next instruction 4066 /* 9-10 instructions */ 4067 4068 4069/* ------------------------------ */ 4070 .balign 64 4071.L_OP_INT_TO_SHORT: /* 0x8f */ 4072/* File: armv5te/OP_INT_TO_SHORT.S */ 4073/* File: armv5te/unop.S */ 4074 /* 4075 * Generic 32-bit unary operation. Provide an "instr" line that 4076 * specifies an instruction that performs "result = op r0". 4077 * This could be an ARM instruction or a function call. 4078 * 4079 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4080 * int-to-byte, int-to-char, int-to-short 4081 */ 4082 /* unop vA, vB */ 4083 mov r3, rINST, lsr #12 @ r3<- B 4084 mov r9, rINST, lsr #8 @ r9<- A+ 4085 GET_VREG(r0, r3) @ r0<- vB 4086 and r9, r9, #15 4087 mov r0, r0, asl #16 @ optional op; may set condition codes 4088 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4089 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4090 GET_INST_OPCODE(ip) @ extract opcode from rINST 4091 SET_VREG(r0, r9) @ vAA<- r0 4092 GOTO_OPCODE(ip) @ jump to next instruction 4093 /* 9-10 instructions */ 4094 4095 4096/* ------------------------------ */ 4097 .balign 64 4098.L_OP_ADD_INT: /* 0x90 */ 4099/* File: armv5te/OP_ADD_INT.S */ 4100/* File: armv5te/binop.S */ 4101 /* 4102 * Generic 32-bit binary operation. Provide an "instr" line that 4103 * specifies an instruction that performs "result = r0 op r1". 4104 * This could be an ARM instruction or a function call. (If the result 4105 * comes back in a register other than r0, you can override "result".) 4106 * 4107 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4108 * vCC (r1). Useful for integer division and modulus. Note that we 4109 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4110 * handles it correctly. 4111 * 4112 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4113 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4114 * mul-float, div-float, rem-float 4115 */ 4116 /* binop vAA, vBB, vCC */ 4117 FETCH(r0, 1) @ r0<- CCBB 4118 mov r9, rINST, lsr #8 @ r9<- AA 4119 mov r3, r0, lsr #8 @ r3<- CC 4120 and r2, r0, #255 @ r2<- BB 4121 GET_VREG(r1, r3) @ r1<- vCC 4122 GET_VREG(r0, r2) @ r0<- vBB 4123 .if 0 4124 cmp r1, #0 @ is second operand zero? 4125 beq common_errDivideByZero 4126 .endif 4127 4128 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4129 @ optional op; may set condition codes 4130 add r0, r0, r1 @ r0<- op, r0-r3 changed 4131 GET_INST_OPCODE(ip) @ extract opcode from rINST 4132 SET_VREG(r0, r9) @ vAA<- r0 4133 GOTO_OPCODE(ip) @ jump to next instruction 4134 /* 11-14 instructions */ 4135 4136 4137 4138/* ------------------------------ */ 4139 .balign 64 4140.L_OP_SUB_INT: /* 0x91 */ 4141/* File: armv5te/OP_SUB_INT.S */ 4142/* File: armv5te/binop.S */ 4143 /* 4144 * Generic 32-bit binary operation. Provide an "instr" line that 4145 * specifies an instruction that performs "result = r0 op r1". 4146 * This could be an ARM instruction or a function call. (If the result 4147 * comes back in a register other than r0, you can override "result".) 4148 * 4149 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4150 * vCC (r1). Useful for integer division and modulus. Note that we 4151 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4152 * handles it correctly. 4153 * 4154 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4155 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4156 * mul-float, div-float, rem-float 4157 */ 4158 /* binop vAA, vBB, vCC */ 4159 FETCH(r0, 1) @ r0<- CCBB 4160 mov r9, rINST, lsr #8 @ r9<- AA 4161 mov r3, r0, lsr #8 @ r3<- CC 4162 and r2, r0, #255 @ r2<- BB 4163 GET_VREG(r1, r3) @ r1<- vCC 4164 GET_VREG(r0, r2) @ r0<- vBB 4165 .if 0 4166 cmp r1, #0 @ is second operand zero? 4167 beq common_errDivideByZero 4168 .endif 4169 4170 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4171 @ optional op; may set condition codes 4172 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4173 GET_INST_OPCODE(ip) @ extract opcode from rINST 4174 SET_VREG(r0, r9) @ vAA<- r0 4175 GOTO_OPCODE(ip) @ jump to next instruction 4176 /* 11-14 instructions */ 4177 4178 4179 4180/* ------------------------------ */ 4181 .balign 64 4182.L_OP_MUL_INT: /* 0x92 */ 4183/* File: armv5te/OP_MUL_INT.S */ 4184/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4185/* File: armv5te/binop.S */ 4186 /* 4187 * Generic 32-bit binary operation. Provide an "instr" line that 4188 * specifies an instruction that performs "result = r0 op r1". 4189 * This could be an ARM instruction or a function call. (If the result 4190 * comes back in a register other than r0, you can override "result".) 4191 * 4192 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4193 * vCC (r1). Useful for integer division and modulus. Note that we 4194 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4195 * handles it correctly. 4196 * 4197 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4198 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4199 * mul-float, div-float, rem-float 4200 */ 4201 /* binop vAA, vBB, vCC */ 4202 FETCH(r0, 1) @ r0<- CCBB 4203 mov r9, rINST, lsr #8 @ r9<- AA 4204 mov r3, r0, lsr #8 @ r3<- CC 4205 and r2, r0, #255 @ r2<- BB 4206 GET_VREG(r1, r3) @ r1<- vCC 4207 GET_VREG(r0, r2) @ r0<- vBB 4208 .if 0 4209 cmp r1, #0 @ is second operand zero? 4210 beq common_errDivideByZero 4211 .endif 4212 4213 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4214 @ optional op; may set condition codes 4215 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4216 GET_INST_OPCODE(ip) @ extract opcode from rINST 4217 SET_VREG(r0, r9) @ vAA<- r0 4218 GOTO_OPCODE(ip) @ jump to next instruction 4219 /* 11-14 instructions */ 4220 4221 4222 4223/* ------------------------------ */ 4224 .balign 64 4225.L_OP_DIV_INT: /* 0x93 */ 4226/* File: armv5te/OP_DIV_INT.S */ 4227/* File: armv5te/binop.S */ 4228 /* 4229 * Generic 32-bit binary operation. Provide an "instr" line that 4230 * specifies an instruction that performs "result = r0 op r1". 4231 * This could be an ARM instruction or a function call. (If the result 4232 * comes back in a register other than r0, you can override "result".) 4233 * 4234 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4235 * vCC (r1). Useful for integer division and modulus. Note that we 4236 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4237 * handles it correctly. 4238 * 4239 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4240 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4241 * mul-float, div-float, rem-float 4242 */ 4243 /* binop vAA, vBB, vCC */ 4244 FETCH(r0, 1) @ r0<- CCBB 4245 mov r9, rINST, lsr #8 @ r9<- AA 4246 mov r3, r0, lsr #8 @ r3<- CC 4247 and r2, r0, #255 @ r2<- BB 4248 GET_VREG(r1, r3) @ r1<- vCC 4249 GET_VREG(r0, r2) @ r0<- vBB 4250 .if 1 4251 cmp r1, #0 @ is second operand zero? 4252 beq common_errDivideByZero 4253 .endif 4254 4255 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4256 @ optional op; may set condition codes 4257 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4258 GET_INST_OPCODE(ip) @ extract opcode from rINST 4259 SET_VREG(r0, r9) @ vAA<- r0 4260 GOTO_OPCODE(ip) @ jump to next instruction 4261 /* 11-14 instructions */ 4262 4263 4264 4265/* ------------------------------ */ 4266 .balign 64 4267.L_OP_REM_INT: /* 0x94 */ 4268/* File: armv5te/OP_REM_INT.S */ 4269/* idivmod returns quotient in r0 and remainder in r1 */ 4270/* File: armv5te/binop.S */ 4271 /* 4272 * Generic 32-bit binary operation. Provide an "instr" line that 4273 * specifies an instruction that performs "result = r0 op r1". 4274 * This could be an ARM instruction or a function call. (If the result 4275 * comes back in a register other than r0, you can override "result".) 4276 * 4277 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4278 * vCC (r1). Useful for integer division and modulus. Note that we 4279 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4280 * handles it correctly. 4281 * 4282 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4283 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4284 * mul-float, div-float, rem-float 4285 */ 4286 /* binop vAA, vBB, vCC */ 4287 FETCH(r0, 1) @ r0<- CCBB 4288 mov r9, rINST, lsr #8 @ r9<- AA 4289 mov r3, r0, lsr #8 @ r3<- CC 4290 and r2, r0, #255 @ r2<- BB 4291 GET_VREG(r1, r3) @ r1<- vCC 4292 GET_VREG(r0, r2) @ r0<- vBB 4293 .if 1 4294 cmp r1, #0 @ is second operand zero? 4295 beq common_errDivideByZero 4296 .endif 4297 4298 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4299 @ optional op; may set condition codes 4300 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4301 GET_INST_OPCODE(ip) @ extract opcode from rINST 4302 SET_VREG(r1, r9) @ vAA<- r1 4303 GOTO_OPCODE(ip) @ jump to next instruction 4304 /* 11-14 instructions */ 4305 4306 4307 4308/* ------------------------------ */ 4309 .balign 64 4310.L_OP_AND_INT: /* 0x95 */ 4311/* File: armv5te/OP_AND_INT.S */ 4312/* File: armv5te/binop.S */ 4313 /* 4314 * Generic 32-bit binary operation. Provide an "instr" line that 4315 * specifies an instruction that performs "result = r0 op r1". 4316 * This could be an ARM instruction or a function call. (If the result 4317 * comes back in a register other than r0, you can override "result".) 4318 * 4319 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4320 * vCC (r1). Useful for integer division and modulus. Note that we 4321 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4322 * handles it correctly. 4323 * 4324 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4325 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4326 * mul-float, div-float, rem-float 4327 */ 4328 /* binop vAA, vBB, vCC */ 4329 FETCH(r0, 1) @ r0<- CCBB 4330 mov r9, rINST, lsr #8 @ r9<- AA 4331 mov r3, r0, lsr #8 @ r3<- CC 4332 and r2, r0, #255 @ r2<- BB 4333 GET_VREG(r1, r3) @ r1<- vCC 4334 GET_VREG(r0, r2) @ r0<- vBB 4335 .if 0 4336 cmp r1, #0 @ is second operand zero? 4337 beq common_errDivideByZero 4338 .endif 4339 4340 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4341 @ optional op; may set condition codes 4342 and r0, r0, r1 @ r0<- op, r0-r3 changed 4343 GET_INST_OPCODE(ip) @ extract opcode from rINST 4344 SET_VREG(r0, r9) @ vAA<- r0 4345 GOTO_OPCODE(ip) @ jump to next instruction 4346 /* 11-14 instructions */ 4347 4348 4349 4350/* ------------------------------ */ 4351 .balign 64 4352.L_OP_OR_INT: /* 0x96 */ 4353/* File: armv5te/OP_OR_INT.S */ 4354/* File: armv5te/binop.S */ 4355 /* 4356 * Generic 32-bit binary operation. Provide an "instr" line that 4357 * specifies an instruction that performs "result = r0 op r1". 4358 * This could be an ARM instruction or a function call. (If the result 4359 * comes back in a register other than r0, you can override "result".) 4360 * 4361 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4362 * vCC (r1). Useful for integer division and modulus. Note that we 4363 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4364 * handles it correctly. 4365 * 4366 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4367 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4368 * mul-float, div-float, rem-float 4369 */ 4370 /* binop vAA, vBB, vCC */ 4371 FETCH(r0, 1) @ r0<- CCBB 4372 mov r9, rINST, lsr #8 @ r9<- AA 4373 mov r3, r0, lsr #8 @ r3<- CC 4374 and r2, r0, #255 @ r2<- BB 4375 GET_VREG(r1, r3) @ r1<- vCC 4376 GET_VREG(r0, r2) @ r0<- vBB 4377 .if 0 4378 cmp r1, #0 @ is second operand zero? 4379 beq common_errDivideByZero 4380 .endif 4381 4382 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4383 @ optional op; may set condition codes 4384 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4385 GET_INST_OPCODE(ip) @ extract opcode from rINST 4386 SET_VREG(r0, r9) @ vAA<- r0 4387 GOTO_OPCODE(ip) @ jump to next instruction 4388 /* 11-14 instructions */ 4389 4390 4391 4392/* ------------------------------ */ 4393 .balign 64 4394.L_OP_XOR_INT: /* 0x97 */ 4395/* File: armv5te/OP_XOR_INT.S */ 4396/* File: armv5te/binop.S */ 4397 /* 4398 * Generic 32-bit binary operation. Provide an "instr" line that 4399 * specifies an instruction that performs "result = r0 op r1". 4400 * This could be an ARM instruction or a function call. (If the result 4401 * comes back in a register other than r0, you can override "result".) 4402 * 4403 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4404 * vCC (r1). Useful for integer division and modulus. Note that we 4405 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4406 * handles it correctly. 4407 * 4408 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4409 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4410 * mul-float, div-float, rem-float 4411 */ 4412 /* binop vAA, vBB, vCC */ 4413 FETCH(r0, 1) @ r0<- CCBB 4414 mov r9, rINST, lsr #8 @ r9<- AA 4415 mov r3, r0, lsr #8 @ r3<- CC 4416 and r2, r0, #255 @ r2<- BB 4417 GET_VREG(r1, r3) @ r1<- vCC 4418 GET_VREG(r0, r2) @ r0<- vBB 4419 .if 0 4420 cmp r1, #0 @ is second operand zero? 4421 beq common_errDivideByZero 4422 .endif 4423 4424 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4425 @ optional op; may set condition codes 4426 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4427 GET_INST_OPCODE(ip) @ extract opcode from rINST 4428 SET_VREG(r0, r9) @ vAA<- r0 4429 GOTO_OPCODE(ip) @ jump to next instruction 4430 /* 11-14 instructions */ 4431 4432 4433 4434/* ------------------------------ */ 4435 .balign 64 4436.L_OP_SHL_INT: /* 0x98 */ 4437/* File: armv5te/OP_SHL_INT.S */ 4438/* File: armv5te/binop.S */ 4439 /* 4440 * Generic 32-bit binary operation. Provide an "instr" line that 4441 * specifies an instruction that performs "result = r0 op r1". 4442 * This could be an ARM instruction or a function call. (If the result 4443 * comes back in a register other than r0, you can override "result".) 4444 * 4445 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4446 * vCC (r1). Useful for integer division and modulus. Note that we 4447 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4448 * handles it correctly. 4449 * 4450 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4451 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4452 * mul-float, div-float, rem-float 4453 */ 4454 /* binop vAA, vBB, vCC */ 4455 FETCH(r0, 1) @ r0<- CCBB 4456 mov r9, rINST, lsr #8 @ r9<- AA 4457 mov r3, r0, lsr #8 @ r3<- CC 4458 and r2, r0, #255 @ r2<- BB 4459 GET_VREG(r1, r3) @ r1<- vCC 4460 GET_VREG(r0, r2) @ r0<- vBB 4461 .if 0 4462 cmp r1, #0 @ is second operand zero? 4463 beq common_errDivideByZero 4464 .endif 4465 4466 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4467 and r1, r1, #31 @ optional op; may set condition codes 4468 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4469 GET_INST_OPCODE(ip) @ extract opcode from rINST 4470 SET_VREG(r0, r9) @ vAA<- r0 4471 GOTO_OPCODE(ip) @ jump to next instruction 4472 /* 11-14 instructions */ 4473 4474 4475 4476/* ------------------------------ */ 4477 .balign 64 4478.L_OP_SHR_INT: /* 0x99 */ 4479/* File: armv5te/OP_SHR_INT.S */ 4480/* File: armv5te/binop.S */ 4481 /* 4482 * Generic 32-bit binary operation. Provide an "instr" line that 4483 * specifies an instruction that performs "result = r0 op r1". 4484 * This could be an ARM instruction or a function call. (If the result 4485 * comes back in a register other than r0, you can override "result".) 4486 * 4487 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4488 * vCC (r1). Useful for integer division and modulus. Note that we 4489 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4490 * handles it correctly. 4491 * 4492 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4493 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4494 * mul-float, div-float, rem-float 4495 */ 4496 /* binop vAA, vBB, vCC */ 4497 FETCH(r0, 1) @ r0<- CCBB 4498 mov r9, rINST, lsr #8 @ r9<- AA 4499 mov r3, r0, lsr #8 @ r3<- CC 4500 and r2, r0, #255 @ r2<- BB 4501 GET_VREG(r1, r3) @ r1<- vCC 4502 GET_VREG(r0, r2) @ r0<- vBB 4503 .if 0 4504 cmp r1, #0 @ is second operand zero? 4505 beq common_errDivideByZero 4506 .endif 4507 4508 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4509 and r1, r1, #31 @ optional op; may set condition codes 4510 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4511 GET_INST_OPCODE(ip) @ extract opcode from rINST 4512 SET_VREG(r0, r9) @ vAA<- r0 4513 GOTO_OPCODE(ip) @ jump to next instruction 4514 /* 11-14 instructions */ 4515 4516 4517 4518/* ------------------------------ */ 4519 .balign 64 4520.L_OP_USHR_INT: /* 0x9a */ 4521/* File: armv5te/OP_USHR_INT.S */ 4522/* File: armv5te/binop.S */ 4523 /* 4524 * Generic 32-bit binary operation. Provide an "instr" line that 4525 * specifies an instruction that performs "result = r0 op r1". 4526 * This could be an ARM instruction or a function call. (If the result 4527 * comes back in a register other than r0, you can override "result".) 4528 * 4529 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4530 * vCC (r1). Useful for integer division and modulus. Note that we 4531 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4532 * handles it correctly. 4533 * 4534 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4535 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4536 * mul-float, div-float, rem-float 4537 */ 4538 /* binop vAA, vBB, vCC */ 4539 FETCH(r0, 1) @ r0<- CCBB 4540 mov r9, rINST, lsr #8 @ r9<- AA 4541 mov r3, r0, lsr #8 @ r3<- CC 4542 and r2, r0, #255 @ r2<- BB 4543 GET_VREG(r1, r3) @ r1<- vCC 4544 GET_VREG(r0, r2) @ r0<- vBB 4545 .if 0 4546 cmp r1, #0 @ is second operand zero? 4547 beq common_errDivideByZero 4548 .endif 4549 4550 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4551 and r1, r1, #31 @ optional op; may set condition codes 4552 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4553 GET_INST_OPCODE(ip) @ extract opcode from rINST 4554 SET_VREG(r0, r9) @ vAA<- r0 4555 GOTO_OPCODE(ip) @ jump to next instruction 4556 /* 11-14 instructions */ 4557 4558 4559 4560/* ------------------------------ */ 4561 .balign 64 4562.L_OP_ADD_LONG: /* 0x9b */ 4563/* File: armv5te/OP_ADD_LONG.S */ 4564/* File: armv5te/binopWide.S */ 4565 /* 4566 * Generic 64-bit binary operation. Provide an "instr" line that 4567 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4568 * This could be an ARM instruction or a function call. (If the result 4569 * comes back in a register other than r0, you can override "result".) 4570 * 4571 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4572 * vCC (r1). Useful for integer division and modulus. 4573 * 4574 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4575 * xor-long, add-double, sub-double, mul-double, div-double, 4576 * rem-double 4577 * 4578 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4579 */ 4580 /* binop vAA, vBB, vCC */ 4581 FETCH(r0, 1) @ r0<- CCBB 4582 mov r9, rINST, lsr #8 @ r9<- AA 4583 and r2, r0, #255 @ r2<- BB 4584 mov r3, r0, lsr #8 @ r3<- CC 4585 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4586 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4587 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4588 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4589 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4590 .if 0 4591 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4592 beq common_errDivideByZero 4593 .endif 4594 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4595 4596 adds r0, r0, r2 @ optional op; may set condition codes 4597 adc r1, r1, r3 @ result<- op, r0-r3 changed 4598 GET_INST_OPCODE(ip) @ extract opcode from rINST 4599 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4600 GOTO_OPCODE(ip) @ jump to next instruction 4601 /* 14-17 instructions */ 4602 4603 4604 4605/* ------------------------------ */ 4606 .balign 64 4607.L_OP_SUB_LONG: /* 0x9c */ 4608/* File: armv5te/OP_SUB_LONG.S */ 4609/* File: armv5te/binopWide.S */ 4610 /* 4611 * Generic 64-bit binary operation. Provide an "instr" line that 4612 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4613 * This could be an ARM instruction or a function call. (If the result 4614 * comes back in a register other than r0, you can override "result".) 4615 * 4616 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4617 * vCC (r1). Useful for integer division and modulus. 4618 * 4619 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4620 * xor-long, add-double, sub-double, mul-double, div-double, 4621 * rem-double 4622 * 4623 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4624 */ 4625 /* binop vAA, vBB, vCC */ 4626 FETCH(r0, 1) @ r0<- CCBB 4627 mov r9, rINST, lsr #8 @ r9<- AA 4628 and r2, r0, #255 @ r2<- BB 4629 mov r3, r0, lsr #8 @ r3<- CC 4630 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4631 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4632 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4633 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4634 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4635 .if 0 4636 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4637 beq common_errDivideByZero 4638 .endif 4639 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4640 4641 subs r0, r0, r2 @ optional op; may set condition codes 4642 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4643 GET_INST_OPCODE(ip) @ extract opcode from rINST 4644 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4645 GOTO_OPCODE(ip) @ jump to next instruction 4646 /* 14-17 instructions */ 4647 4648 4649 4650/* ------------------------------ */ 4651 .balign 64 4652.L_OP_MUL_LONG: /* 0x9d */ 4653/* File: armv5te/OP_MUL_LONG.S */ 4654 /* 4655 * Signed 64-bit integer multiply. 4656 * 4657 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4658 * WX 4659 * x YZ 4660 * -------- 4661 * ZW ZX 4662 * YW YX 4663 * 4664 * The low word of the result holds ZX, the high word holds 4665 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4666 * it doesn't fit in the low 64 bits. 4667 * 4668 * Unlike most ARM math operations, multiply instructions have 4669 * restrictions on using the same register more than once (Rd and Rm 4670 * cannot be the same). 4671 */ 4672 /* mul-long vAA, vBB, vCC */ 4673 FETCH(r0, 1) @ r0<- CCBB 4674 and r2, r0, #255 @ r2<- BB 4675 mov r3, r0, lsr #8 @ r3<- CC 4676 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4677 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4678 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4679 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4680 mul ip, r2, r1 @ ip<- ZxW 4681 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4682 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4683 mov r0, rINST, lsr #8 @ r0<- AA 4684 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4685 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4686 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4687 b .LOP_MUL_LONG_finish 4688 4689/* ------------------------------ */ 4690 .balign 64 4691.L_OP_DIV_LONG: /* 0x9e */ 4692/* File: armv5te/OP_DIV_LONG.S */ 4693/* File: armv5te/binopWide.S */ 4694 /* 4695 * Generic 64-bit binary operation. Provide an "instr" line that 4696 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4697 * This could be an ARM instruction or a function call. (If the result 4698 * comes back in a register other than r0, you can override "result".) 4699 * 4700 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4701 * vCC (r1). Useful for integer division and modulus. 4702 * 4703 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4704 * xor-long, add-double, sub-double, mul-double, div-double, 4705 * rem-double 4706 * 4707 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4708 */ 4709 /* binop vAA, vBB, vCC */ 4710 FETCH(r0, 1) @ r0<- CCBB 4711 mov r9, rINST, lsr #8 @ r9<- AA 4712 and r2, r0, #255 @ r2<- BB 4713 mov r3, r0, lsr #8 @ r3<- CC 4714 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4715 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4716 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4717 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4718 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4719 .if 1 4720 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4721 beq common_errDivideByZero 4722 .endif 4723 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4724 4725 @ optional op; may set condition codes 4726 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4727 GET_INST_OPCODE(ip) @ extract opcode from rINST 4728 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4729 GOTO_OPCODE(ip) @ jump to next instruction 4730 /* 14-17 instructions */ 4731 4732 4733 4734/* ------------------------------ */ 4735 .balign 64 4736.L_OP_REM_LONG: /* 0x9f */ 4737/* File: armv5te/OP_REM_LONG.S */ 4738/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4739/* File: armv5te/binopWide.S */ 4740 /* 4741 * Generic 64-bit binary operation. Provide an "instr" line that 4742 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4743 * This could be an ARM instruction or a function call. (If the result 4744 * comes back in a register other than r0, you can override "result".) 4745 * 4746 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4747 * vCC (r1). Useful for integer division and modulus. 4748 * 4749 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4750 * xor-long, add-double, sub-double, mul-double, div-double, 4751 * rem-double 4752 * 4753 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4754 */ 4755 /* binop vAA, vBB, vCC */ 4756 FETCH(r0, 1) @ r0<- CCBB 4757 mov r9, rINST, lsr #8 @ r9<- AA 4758 and r2, r0, #255 @ r2<- BB 4759 mov r3, r0, lsr #8 @ r3<- CC 4760 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4761 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4762 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4763 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4764 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4765 .if 1 4766 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4767 beq common_errDivideByZero 4768 .endif 4769 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4770 4771 @ optional op; may set condition codes 4772 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4773 GET_INST_OPCODE(ip) @ extract opcode from rINST 4774 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4775 GOTO_OPCODE(ip) @ jump to next instruction 4776 /* 14-17 instructions */ 4777 4778 4779 4780/* ------------------------------ */ 4781 .balign 64 4782.L_OP_AND_LONG: /* 0xa0 */ 4783/* File: armv5te/OP_AND_LONG.S */ 4784/* File: armv5te/binopWide.S */ 4785 /* 4786 * Generic 64-bit binary operation. Provide an "instr" line that 4787 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4788 * This could be an ARM instruction or a function call. (If the result 4789 * comes back in a register other than r0, you can override "result".) 4790 * 4791 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4792 * vCC (r1). Useful for integer division and modulus. 4793 * 4794 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4795 * xor-long, add-double, sub-double, mul-double, div-double, 4796 * rem-double 4797 * 4798 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4799 */ 4800 /* binop vAA, vBB, vCC */ 4801 FETCH(r0, 1) @ r0<- CCBB 4802 mov r9, rINST, lsr #8 @ r9<- AA 4803 and r2, r0, #255 @ r2<- BB 4804 mov r3, r0, lsr #8 @ r3<- CC 4805 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4806 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4807 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4808 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4809 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4810 .if 0 4811 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4812 beq common_errDivideByZero 4813 .endif 4814 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4815 4816 and r0, r0, r2 @ optional op; may set condition codes 4817 and r1, r1, r3 @ result<- op, r0-r3 changed 4818 GET_INST_OPCODE(ip) @ extract opcode from rINST 4819 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4820 GOTO_OPCODE(ip) @ jump to next instruction 4821 /* 14-17 instructions */ 4822 4823 4824 4825/* ------------------------------ */ 4826 .balign 64 4827.L_OP_OR_LONG: /* 0xa1 */ 4828/* File: armv5te/OP_OR_LONG.S */ 4829/* File: armv5te/binopWide.S */ 4830 /* 4831 * Generic 64-bit binary operation. Provide an "instr" line that 4832 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4833 * This could be an ARM instruction or a function call. (If the result 4834 * comes back in a register other than r0, you can override "result".) 4835 * 4836 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4837 * vCC (r1). Useful for integer division and modulus. 4838 * 4839 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4840 * xor-long, add-double, sub-double, mul-double, div-double, 4841 * rem-double 4842 * 4843 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4844 */ 4845 /* binop vAA, vBB, vCC */ 4846 FETCH(r0, 1) @ r0<- CCBB 4847 mov r9, rINST, lsr #8 @ r9<- AA 4848 and r2, r0, #255 @ r2<- BB 4849 mov r3, r0, lsr #8 @ r3<- CC 4850 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4851 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4852 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4853 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4854 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4855 .if 0 4856 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4857 beq common_errDivideByZero 4858 .endif 4859 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4860 4861 orr r0, r0, r2 @ optional op; may set condition codes 4862 orr r1, r1, r3 @ result<- op, r0-r3 changed 4863 GET_INST_OPCODE(ip) @ extract opcode from rINST 4864 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4865 GOTO_OPCODE(ip) @ jump to next instruction 4866 /* 14-17 instructions */ 4867 4868 4869 4870/* ------------------------------ */ 4871 .balign 64 4872.L_OP_XOR_LONG: /* 0xa2 */ 4873/* File: armv5te/OP_XOR_LONG.S */ 4874/* File: armv5te/binopWide.S */ 4875 /* 4876 * Generic 64-bit binary operation. Provide an "instr" line that 4877 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4878 * This could be an ARM instruction or a function call. (If the result 4879 * comes back in a register other than r0, you can override "result".) 4880 * 4881 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4882 * vCC (r1). Useful for integer division and modulus. 4883 * 4884 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4885 * xor-long, add-double, sub-double, mul-double, div-double, 4886 * rem-double 4887 * 4888 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4889 */ 4890 /* binop vAA, vBB, vCC */ 4891 FETCH(r0, 1) @ r0<- CCBB 4892 mov r9, rINST, lsr #8 @ r9<- AA 4893 and r2, r0, #255 @ r2<- BB 4894 mov r3, r0, lsr #8 @ r3<- CC 4895 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4896 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4897 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4898 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4899 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4900 .if 0 4901 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4902 beq common_errDivideByZero 4903 .endif 4904 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4905 4906 eor r0, r0, r2 @ optional op; may set condition codes 4907 eor r1, r1, r3 @ result<- op, r0-r3 changed 4908 GET_INST_OPCODE(ip) @ extract opcode from rINST 4909 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4910 GOTO_OPCODE(ip) @ jump to next instruction 4911 /* 14-17 instructions */ 4912 4913 4914 4915/* ------------------------------ */ 4916 .balign 64 4917.L_OP_SHL_LONG: /* 0xa3 */ 4918/* File: armv5te/OP_SHL_LONG.S */ 4919 /* 4920 * Long integer shift. This is different from the generic 32/64-bit 4921 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4922 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4923 * 6 bits of the shift distance. 4924 */ 4925 /* shl-long vAA, vBB, vCC */ 4926 FETCH(r0, 1) @ r0<- CCBB 4927 mov r9, rINST, lsr #8 @ r9<- AA 4928 and r3, r0, #255 @ r3<- BB 4929 mov r0, r0, lsr #8 @ r0<- CC 4930 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4931 GET_VREG(r2, r0) @ r2<- vCC 4932 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4933 and r2, r2, #63 @ r2<- r2 & 0x3f 4934 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4935 4936 mov r1, r1, asl r2 @ r1<- r1 << r2 4937 rsb r3, r2, #32 @ r3<- 32 - r2 4938 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4939 subs ip, r2, #32 @ ip<- r2 - 32 4940 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4941 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4942 b .LOP_SHL_LONG_finish 4943 4944/* ------------------------------ */ 4945 .balign 64 4946.L_OP_SHR_LONG: /* 0xa4 */ 4947/* File: armv5te/OP_SHR_LONG.S */ 4948 /* 4949 * Long integer shift. This is different from the generic 32/64-bit 4950 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4951 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4952 * 6 bits of the shift distance. 4953 */ 4954 /* shr-long vAA, vBB, vCC */ 4955 FETCH(r0, 1) @ r0<- CCBB 4956 mov r9, rINST, lsr #8 @ r9<- AA 4957 and r3, r0, #255 @ r3<- BB 4958 mov r0, r0, lsr #8 @ r0<- CC 4959 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4960 GET_VREG(r2, r0) @ r2<- vCC 4961 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4962 and r2, r2, #63 @ r0<- r0 & 0x3f 4963 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4964 4965 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4966 rsb r3, r2, #32 @ r3<- 32 - r2 4967 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4968 subs ip, r2, #32 @ ip<- r2 - 32 4969 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4970 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4971 b .LOP_SHR_LONG_finish 4972 4973/* ------------------------------ */ 4974 .balign 64 4975.L_OP_USHR_LONG: /* 0xa5 */ 4976/* File: armv5te/OP_USHR_LONG.S */ 4977 /* 4978 * Long integer shift. This is different from the generic 32/64-bit 4979 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4980 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4981 * 6 bits of the shift distance. 4982 */ 4983 /* ushr-long vAA, vBB, vCC */ 4984 FETCH(r0, 1) @ r0<- CCBB 4985 mov r9, rINST, lsr #8 @ r9<- AA 4986 and r3, r0, #255 @ r3<- BB 4987 mov r0, r0, lsr #8 @ r0<- CC 4988 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4989 GET_VREG(r2, r0) @ r2<- vCC 4990 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4991 and r2, r2, #63 @ r0<- r0 & 0x3f 4992 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4993 4994 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4995 rsb r3, r2, #32 @ r3<- 32 - r2 4996 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4997 subs ip, r2, #32 @ ip<- r2 - 32 4998 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4999 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5000 b .LOP_USHR_LONG_finish 5001 5002/* ------------------------------ */ 5003 .balign 64 5004.L_OP_ADD_FLOAT: /* 0xa6 */ 5005/* File: arm-vfp/OP_ADD_FLOAT.S */ 5006/* File: arm-vfp/fbinop.S */ 5007 /* 5008 * Generic 32-bit floating-point operation. Provide an "instr" line that 5009 * specifies an instruction that performs "s2 = s0 op s1". Because we 5010 * use the "softfp" ABI, this must be an instruction, not a function call. 5011 * 5012 * For: add-float, sub-float, mul-float, div-float 5013 */ 5014 /* floatop vAA, vBB, vCC */ 5015 FETCH(r0, 1) @ r0<- CCBB 5016 mov r9, rINST, lsr #8 @ r9<- AA 5017 mov r3, r0, lsr #8 @ r3<- CC 5018 and r2, r0, #255 @ r2<- BB 5019 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5020 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5021 flds s1, [r3] @ s1<- vCC 5022 flds s0, [r2] @ s0<- vBB 5023 5024 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5025 fadds s2, s0, s1 @ s2<- op 5026 GET_INST_OPCODE(ip) @ extract opcode from rINST 5027 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5028 fsts s2, [r9] @ vAA<- s2 5029 GOTO_OPCODE(ip) @ jump to next instruction 5030 5031 5032/* ------------------------------ */ 5033 .balign 64 5034.L_OP_SUB_FLOAT: /* 0xa7 */ 5035/* File: arm-vfp/OP_SUB_FLOAT.S */ 5036/* File: arm-vfp/fbinop.S */ 5037 /* 5038 * Generic 32-bit floating-point operation. Provide an "instr" line that 5039 * specifies an instruction that performs "s2 = s0 op s1". Because we 5040 * use the "softfp" ABI, this must be an instruction, not a function call. 5041 * 5042 * For: add-float, sub-float, mul-float, div-float 5043 */ 5044 /* floatop vAA, vBB, vCC */ 5045 FETCH(r0, 1) @ r0<- CCBB 5046 mov r9, rINST, lsr #8 @ r9<- AA 5047 mov r3, r0, lsr #8 @ r3<- CC 5048 and r2, r0, #255 @ r2<- BB 5049 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5050 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5051 flds s1, [r3] @ s1<- vCC 5052 flds s0, [r2] @ s0<- vBB 5053 5054 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5055 fsubs s2, s0, s1 @ s2<- op 5056 GET_INST_OPCODE(ip) @ extract opcode from rINST 5057 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5058 fsts s2, [r9] @ vAA<- s2 5059 GOTO_OPCODE(ip) @ jump to next instruction 5060 5061 5062/* ------------------------------ */ 5063 .balign 64 5064.L_OP_MUL_FLOAT: /* 0xa8 */ 5065/* File: arm-vfp/OP_MUL_FLOAT.S */ 5066/* File: arm-vfp/fbinop.S */ 5067 /* 5068 * Generic 32-bit floating-point operation. Provide an "instr" line that 5069 * specifies an instruction that performs "s2 = s0 op s1". Because we 5070 * use the "softfp" ABI, this must be an instruction, not a function call. 5071 * 5072 * For: add-float, sub-float, mul-float, div-float 5073 */ 5074 /* floatop vAA, vBB, vCC */ 5075 FETCH(r0, 1) @ r0<- CCBB 5076 mov r9, rINST, lsr #8 @ r9<- AA 5077 mov r3, r0, lsr #8 @ r3<- CC 5078 and r2, r0, #255 @ r2<- BB 5079 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5080 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5081 flds s1, [r3] @ s1<- vCC 5082 flds s0, [r2] @ s0<- vBB 5083 5084 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5085 fmuls s2, s0, s1 @ s2<- op 5086 GET_INST_OPCODE(ip) @ extract opcode from rINST 5087 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5088 fsts s2, [r9] @ vAA<- s2 5089 GOTO_OPCODE(ip) @ jump to next instruction 5090 5091 5092/* ------------------------------ */ 5093 .balign 64 5094.L_OP_DIV_FLOAT: /* 0xa9 */ 5095/* File: arm-vfp/OP_DIV_FLOAT.S */ 5096/* File: arm-vfp/fbinop.S */ 5097 /* 5098 * Generic 32-bit floating-point operation. Provide an "instr" line that 5099 * specifies an instruction that performs "s2 = s0 op s1". Because we 5100 * use the "softfp" ABI, this must be an instruction, not a function call. 5101 * 5102 * For: add-float, sub-float, mul-float, div-float 5103 */ 5104 /* floatop vAA, vBB, vCC */ 5105 FETCH(r0, 1) @ r0<- CCBB 5106 mov r9, rINST, lsr #8 @ r9<- AA 5107 mov r3, r0, lsr #8 @ r3<- CC 5108 and r2, r0, #255 @ r2<- BB 5109 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5110 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5111 flds s1, [r3] @ s1<- vCC 5112 flds s0, [r2] @ s0<- vBB 5113 5114 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5115 fdivs s2, s0, s1 @ s2<- op 5116 GET_INST_OPCODE(ip) @ extract opcode from rINST 5117 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5118 fsts s2, [r9] @ vAA<- s2 5119 GOTO_OPCODE(ip) @ jump to next instruction 5120 5121 5122/* ------------------------------ */ 5123 .balign 64 5124.L_OP_REM_FLOAT: /* 0xaa */ 5125/* File: armv5te/OP_REM_FLOAT.S */ 5126/* EABI doesn't define a float remainder function, but libm does */ 5127/* File: armv5te/binop.S */ 5128 /* 5129 * Generic 32-bit binary operation. Provide an "instr" line that 5130 * specifies an instruction that performs "result = r0 op r1". 5131 * This could be an ARM instruction or a function call. (If the result 5132 * comes back in a register other than r0, you can override "result".) 5133 * 5134 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5135 * vCC (r1). Useful for integer division and modulus. Note that we 5136 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5137 * handles it correctly. 5138 * 5139 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5140 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5141 * mul-float, div-float, rem-float 5142 */ 5143 /* binop vAA, vBB, vCC */ 5144 FETCH(r0, 1) @ r0<- CCBB 5145 mov r9, rINST, lsr #8 @ r9<- AA 5146 mov r3, r0, lsr #8 @ r3<- CC 5147 and r2, r0, #255 @ r2<- BB 5148 GET_VREG(r1, r3) @ r1<- vCC 5149 GET_VREG(r0, r2) @ r0<- vBB 5150 .if 0 5151 cmp r1, #0 @ is second operand zero? 5152 beq common_errDivideByZero 5153 .endif 5154 5155 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5156 @ optional op; may set condition codes 5157 bl fmodf @ r0<- op, r0-r3 changed 5158 GET_INST_OPCODE(ip) @ extract opcode from rINST 5159 SET_VREG(r0, r9) @ vAA<- r0 5160 GOTO_OPCODE(ip) @ jump to next instruction 5161 /* 11-14 instructions */ 5162 5163 5164 5165/* ------------------------------ */ 5166 .balign 64 5167.L_OP_ADD_DOUBLE: /* 0xab */ 5168/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5169/* File: arm-vfp/fbinopWide.S */ 5170 /* 5171 * Generic 64-bit double-precision floating point binary operation. 5172 * Provide an "instr" line that specifies an instruction that performs 5173 * "d2 = d0 op d1". 5174 * 5175 * for: add-double, sub-double, mul-double, div-double 5176 */ 5177 /* doubleop vAA, vBB, vCC */ 5178 FETCH(r0, 1) @ r0<- CCBB 5179 mov r9, rINST, lsr #8 @ r9<- AA 5180 mov r3, r0, lsr #8 @ r3<- CC 5181 and r2, r0, #255 @ r2<- BB 5182 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5183 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5184 fldd d1, [r3] @ d1<- vCC 5185 fldd d0, [r2] @ d0<- vBB 5186 5187 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5188 faddd d2, d0, d1 @ s2<- op 5189 GET_INST_OPCODE(ip) @ extract opcode from rINST 5190 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5191 fstd d2, [r9] @ vAA<- d2 5192 GOTO_OPCODE(ip) @ jump to next instruction 5193 5194 5195/* ------------------------------ */ 5196 .balign 64 5197.L_OP_SUB_DOUBLE: /* 0xac */ 5198/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5199/* File: arm-vfp/fbinopWide.S */ 5200 /* 5201 * Generic 64-bit double-precision floating point binary operation. 5202 * Provide an "instr" line that specifies an instruction that performs 5203 * "d2 = d0 op d1". 5204 * 5205 * for: add-double, sub-double, mul-double, div-double 5206 */ 5207 /* doubleop vAA, vBB, vCC */ 5208 FETCH(r0, 1) @ r0<- CCBB 5209 mov r9, rINST, lsr #8 @ r9<- AA 5210 mov r3, r0, lsr #8 @ r3<- CC 5211 and r2, r0, #255 @ r2<- BB 5212 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5213 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5214 fldd d1, [r3] @ d1<- vCC 5215 fldd d0, [r2] @ d0<- vBB 5216 5217 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5218 fsubd d2, d0, d1 @ s2<- op 5219 GET_INST_OPCODE(ip) @ extract opcode from rINST 5220 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5221 fstd d2, [r9] @ vAA<- d2 5222 GOTO_OPCODE(ip) @ jump to next instruction 5223 5224 5225/* ------------------------------ */ 5226 .balign 64 5227.L_OP_MUL_DOUBLE: /* 0xad */ 5228/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5229/* File: arm-vfp/fbinopWide.S */ 5230 /* 5231 * Generic 64-bit double-precision floating point binary operation. 5232 * Provide an "instr" line that specifies an instruction that performs 5233 * "d2 = d0 op d1". 5234 * 5235 * for: add-double, sub-double, mul-double, div-double 5236 */ 5237 /* doubleop vAA, vBB, vCC */ 5238 FETCH(r0, 1) @ r0<- CCBB 5239 mov r9, rINST, lsr #8 @ r9<- AA 5240 mov r3, r0, lsr #8 @ r3<- CC 5241 and r2, r0, #255 @ r2<- BB 5242 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5243 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5244 fldd d1, [r3] @ d1<- vCC 5245 fldd d0, [r2] @ d0<- vBB 5246 5247 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5248 fmuld d2, d0, d1 @ s2<- op 5249 GET_INST_OPCODE(ip) @ extract opcode from rINST 5250 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5251 fstd d2, [r9] @ vAA<- d2 5252 GOTO_OPCODE(ip) @ jump to next instruction 5253 5254 5255/* ------------------------------ */ 5256 .balign 64 5257.L_OP_DIV_DOUBLE: /* 0xae */ 5258/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5259/* File: arm-vfp/fbinopWide.S */ 5260 /* 5261 * Generic 64-bit double-precision floating point binary operation. 5262 * Provide an "instr" line that specifies an instruction that performs 5263 * "d2 = d0 op d1". 5264 * 5265 * for: add-double, sub-double, mul-double, div-double 5266 */ 5267 /* doubleop vAA, vBB, vCC */ 5268 FETCH(r0, 1) @ r0<- CCBB 5269 mov r9, rINST, lsr #8 @ r9<- AA 5270 mov r3, r0, lsr #8 @ r3<- CC 5271 and r2, r0, #255 @ r2<- BB 5272 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5273 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5274 fldd d1, [r3] @ d1<- vCC 5275 fldd d0, [r2] @ d0<- vBB 5276 5277 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5278 fdivd d2, d0, d1 @ s2<- op 5279 GET_INST_OPCODE(ip) @ extract opcode from rINST 5280 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5281 fstd d2, [r9] @ vAA<- d2 5282 GOTO_OPCODE(ip) @ jump to next instruction 5283 5284 5285/* ------------------------------ */ 5286 .balign 64 5287.L_OP_REM_DOUBLE: /* 0xaf */ 5288/* File: armv5te/OP_REM_DOUBLE.S */ 5289/* EABI doesn't define a double remainder function, but libm does */ 5290/* File: armv5te/binopWide.S */ 5291 /* 5292 * Generic 64-bit binary operation. Provide an "instr" line that 5293 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5294 * This could be an ARM instruction or a function call. (If the result 5295 * comes back in a register other than r0, you can override "result".) 5296 * 5297 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5298 * vCC (r1). Useful for integer division and modulus. 5299 * 5300 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5301 * xor-long, add-double, sub-double, mul-double, div-double, 5302 * rem-double 5303 * 5304 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5305 */ 5306 /* binop vAA, vBB, vCC */ 5307 FETCH(r0, 1) @ r0<- CCBB 5308 mov r9, rINST, lsr #8 @ r9<- AA 5309 and r2, r0, #255 @ r2<- BB 5310 mov r3, r0, lsr #8 @ r3<- CC 5311 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5312 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5313 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5314 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5315 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5316 .if 0 5317 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5318 beq common_errDivideByZero 5319 .endif 5320 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5321 5322 @ optional op; may set condition codes 5323 bl fmod @ result<- op, r0-r3 changed 5324 GET_INST_OPCODE(ip) @ extract opcode from rINST 5325 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5326 GOTO_OPCODE(ip) @ jump to next instruction 5327 /* 14-17 instructions */ 5328 5329 5330 5331/* ------------------------------ */ 5332 .balign 64 5333.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5334/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5335/* File: armv5te/binop2addr.S */ 5336 /* 5337 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5338 * that specifies an instruction that performs "result = r0 op r1". 5339 * This could be an ARM instruction or a function call. (If the result 5340 * comes back in a register other than r0, you can override "result".) 5341 * 5342 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5343 * vCC (r1). Useful for integer division and modulus. 5344 * 5345 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5346 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5347 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5348 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5349 */ 5350 /* binop/2addr vA, vB */ 5351 mov r9, rINST, lsr #8 @ r9<- A+ 5352 mov r3, rINST, lsr #12 @ r3<- B 5353 and r9, r9, #15 5354 GET_VREG(r1, r3) @ r1<- vB 5355 GET_VREG(r0, r9) @ r0<- vA 5356 .if 0 5357 cmp r1, #0 @ is second operand zero? 5358 beq common_errDivideByZero 5359 .endif 5360 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5361 5362 @ optional op; may set condition codes 5363 add r0, r0, r1 @ r0<- op, r0-r3 changed 5364 GET_INST_OPCODE(ip) @ extract opcode from rINST 5365 SET_VREG(r0, r9) @ vAA<- r0 5366 GOTO_OPCODE(ip) @ jump to next instruction 5367 /* 10-13 instructions */ 5368 5369 5370 5371/* ------------------------------ */ 5372 .balign 64 5373.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5374/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5375/* File: armv5te/binop2addr.S */ 5376 /* 5377 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5378 * that specifies an instruction that performs "result = r0 op r1". 5379 * This could be an ARM instruction or a function call. (If the result 5380 * comes back in a register other than r0, you can override "result".) 5381 * 5382 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5383 * vCC (r1). Useful for integer division and modulus. 5384 * 5385 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5386 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5387 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5388 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5389 */ 5390 /* binop/2addr vA, vB */ 5391 mov r9, rINST, lsr #8 @ r9<- A+ 5392 mov r3, rINST, lsr #12 @ r3<- B 5393 and r9, r9, #15 5394 GET_VREG(r1, r3) @ r1<- vB 5395 GET_VREG(r0, r9) @ r0<- vA 5396 .if 0 5397 cmp r1, #0 @ is second operand zero? 5398 beq common_errDivideByZero 5399 .endif 5400 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5401 5402 @ optional op; may set condition codes 5403 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5404 GET_INST_OPCODE(ip) @ extract opcode from rINST 5405 SET_VREG(r0, r9) @ vAA<- r0 5406 GOTO_OPCODE(ip) @ jump to next instruction 5407 /* 10-13 instructions */ 5408 5409 5410 5411/* ------------------------------ */ 5412 .balign 64 5413.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5414/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5415/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5416/* File: armv5te/binop2addr.S */ 5417 /* 5418 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5419 * that specifies an instruction that performs "result = r0 op r1". 5420 * This could be an ARM instruction or a function call. (If the result 5421 * comes back in a register other than r0, you can override "result".) 5422 * 5423 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5424 * vCC (r1). Useful for integer division and modulus. 5425 * 5426 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5427 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5428 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5429 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5430 */ 5431 /* binop/2addr vA, vB */ 5432 mov r9, rINST, lsr #8 @ r9<- A+ 5433 mov r3, rINST, lsr #12 @ r3<- B 5434 and r9, r9, #15 5435 GET_VREG(r1, r3) @ r1<- vB 5436 GET_VREG(r0, r9) @ r0<- vA 5437 .if 0 5438 cmp r1, #0 @ is second operand zero? 5439 beq common_errDivideByZero 5440 .endif 5441 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5442 5443 @ optional op; may set condition codes 5444 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5445 GET_INST_OPCODE(ip) @ extract opcode from rINST 5446 SET_VREG(r0, r9) @ vAA<- r0 5447 GOTO_OPCODE(ip) @ jump to next instruction 5448 /* 10-13 instructions */ 5449 5450 5451 5452/* ------------------------------ */ 5453 .balign 64 5454.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5455/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5456/* File: armv5te/binop2addr.S */ 5457 /* 5458 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5459 * that specifies an instruction that performs "result = r0 op r1". 5460 * This could be an ARM instruction or a function call. (If the result 5461 * comes back in a register other than r0, you can override "result".) 5462 * 5463 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5464 * vCC (r1). Useful for integer division and modulus. 5465 * 5466 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5467 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5468 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5469 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5470 */ 5471 /* binop/2addr vA, vB */ 5472 mov r9, rINST, lsr #8 @ r9<- A+ 5473 mov r3, rINST, lsr #12 @ r3<- B 5474 and r9, r9, #15 5475 GET_VREG(r1, r3) @ r1<- vB 5476 GET_VREG(r0, r9) @ r0<- vA 5477 .if 1 5478 cmp r1, #0 @ is second operand zero? 5479 beq common_errDivideByZero 5480 .endif 5481 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5482 5483 @ optional op; may set condition codes 5484 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5485 GET_INST_OPCODE(ip) @ extract opcode from rINST 5486 SET_VREG(r0, r9) @ vAA<- r0 5487 GOTO_OPCODE(ip) @ jump to next instruction 5488 /* 10-13 instructions */ 5489 5490 5491 5492/* ------------------------------ */ 5493 .balign 64 5494.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5495/* File: armv5te/OP_REM_INT_2ADDR.S */ 5496/* idivmod returns quotient in r0 and remainder in r1 */ 5497/* File: armv5te/binop2addr.S */ 5498 /* 5499 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5500 * that specifies an instruction that performs "result = r0 op r1". 5501 * This could be an ARM instruction or a function call. (If the result 5502 * comes back in a register other than r0, you can override "result".) 5503 * 5504 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5505 * vCC (r1). Useful for integer division and modulus. 5506 * 5507 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5508 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5509 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5510 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5511 */ 5512 /* binop/2addr vA, vB */ 5513 mov r9, rINST, lsr #8 @ r9<- A+ 5514 mov r3, rINST, lsr #12 @ r3<- B 5515 and r9, r9, #15 5516 GET_VREG(r1, r3) @ r1<- vB 5517 GET_VREG(r0, r9) @ r0<- vA 5518 .if 1 5519 cmp r1, #0 @ is second operand zero? 5520 beq common_errDivideByZero 5521 .endif 5522 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5523 5524 @ optional op; may set condition codes 5525 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5526 GET_INST_OPCODE(ip) @ extract opcode from rINST 5527 SET_VREG(r1, r9) @ vAA<- r1 5528 GOTO_OPCODE(ip) @ jump to next instruction 5529 /* 10-13 instructions */ 5530 5531 5532 5533/* ------------------------------ */ 5534 .balign 64 5535.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5536/* File: armv5te/OP_AND_INT_2ADDR.S */ 5537/* File: armv5te/binop2addr.S */ 5538 /* 5539 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5540 * that specifies an instruction that performs "result = r0 op r1". 5541 * This could be an ARM instruction or a function call. (If the result 5542 * comes back in a register other than r0, you can override "result".) 5543 * 5544 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5545 * vCC (r1). Useful for integer division and modulus. 5546 * 5547 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5548 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5549 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5550 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5551 */ 5552 /* binop/2addr vA, vB */ 5553 mov r9, rINST, lsr #8 @ r9<- A+ 5554 mov r3, rINST, lsr #12 @ r3<- B 5555 and r9, r9, #15 5556 GET_VREG(r1, r3) @ r1<- vB 5557 GET_VREG(r0, r9) @ r0<- vA 5558 .if 0 5559 cmp r1, #0 @ is second operand zero? 5560 beq common_errDivideByZero 5561 .endif 5562 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5563 5564 @ optional op; may set condition codes 5565 and r0, r0, r1 @ r0<- op, r0-r3 changed 5566 GET_INST_OPCODE(ip) @ extract opcode from rINST 5567 SET_VREG(r0, r9) @ vAA<- r0 5568 GOTO_OPCODE(ip) @ jump to next instruction 5569 /* 10-13 instructions */ 5570 5571 5572 5573/* ------------------------------ */ 5574 .balign 64 5575.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5576/* File: armv5te/OP_OR_INT_2ADDR.S */ 5577/* File: armv5te/binop2addr.S */ 5578 /* 5579 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5580 * that specifies an instruction that performs "result = r0 op r1". 5581 * This could be an ARM instruction or a function call. (If the result 5582 * comes back in a register other than r0, you can override "result".) 5583 * 5584 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5585 * vCC (r1). Useful for integer division and modulus. 5586 * 5587 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5588 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5589 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5590 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5591 */ 5592 /* binop/2addr vA, vB */ 5593 mov r9, rINST, lsr #8 @ r9<- A+ 5594 mov r3, rINST, lsr #12 @ r3<- B 5595 and r9, r9, #15 5596 GET_VREG(r1, r3) @ r1<- vB 5597 GET_VREG(r0, r9) @ r0<- vA 5598 .if 0 5599 cmp r1, #0 @ is second operand zero? 5600 beq common_errDivideByZero 5601 .endif 5602 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5603 5604 @ optional op; may set condition codes 5605 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5606 GET_INST_OPCODE(ip) @ extract opcode from rINST 5607 SET_VREG(r0, r9) @ vAA<- r0 5608 GOTO_OPCODE(ip) @ jump to next instruction 5609 /* 10-13 instructions */ 5610 5611 5612 5613/* ------------------------------ */ 5614 .balign 64 5615.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5616/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5617/* File: armv5te/binop2addr.S */ 5618 /* 5619 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5620 * that specifies an instruction that performs "result = r0 op r1". 5621 * This could be an ARM instruction or a function call. (If the result 5622 * comes back in a register other than r0, you can override "result".) 5623 * 5624 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5625 * vCC (r1). Useful for integer division and modulus. 5626 * 5627 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5628 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5629 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5630 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5631 */ 5632 /* binop/2addr vA, vB */ 5633 mov r9, rINST, lsr #8 @ r9<- A+ 5634 mov r3, rINST, lsr #12 @ r3<- B 5635 and r9, r9, #15 5636 GET_VREG(r1, r3) @ r1<- vB 5637 GET_VREG(r0, r9) @ r0<- vA 5638 .if 0 5639 cmp r1, #0 @ is second operand zero? 5640 beq common_errDivideByZero 5641 .endif 5642 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5643 5644 @ optional op; may set condition codes 5645 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5646 GET_INST_OPCODE(ip) @ extract opcode from rINST 5647 SET_VREG(r0, r9) @ vAA<- r0 5648 GOTO_OPCODE(ip) @ jump to next instruction 5649 /* 10-13 instructions */ 5650 5651 5652 5653/* ------------------------------ */ 5654 .balign 64 5655.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5656/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5657/* File: armv5te/binop2addr.S */ 5658 /* 5659 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5660 * that specifies an instruction that performs "result = r0 op r1". 5661 * This could be an ARM instruction or a function call. (If the result 5662 * comes back in a register other than r0, you can override "result".) 5663 * 5664 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5665 * vCC (r1). Useful for integer division and modulus. 5666 * 5667 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5668 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5669 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5670 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5671 */ 5672 /* binop/2addr vA, vB */ 5673 mov r9, rINST, lsr #8 @ r9<- A+ 5674 mov r3, rINST, lsr #12 @ r3<- B 5675 and r9, r9, #15 5676 GET_VREG(r1, r3) @ r1<- vB 5677 GET_VREG(r0, r9) @ r0<- vA 5678 .if 0 5679 cmp r1, #0 @ is second operand zero? 5680 beq common_errDivideByZero 5681 .endif 5682 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5683 5684 and r1, r1, #31 @ optional op; may set condition codes 5685 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5686 GET_INST_OPCODE(ip) @ extract opcode from rINST 5687 SET_VREG(r0, r9) @ vAA<- r0 5688 GOTO_OPCODE(ip) @ jump to next instruction 5689 /* 10-13 instructions */ 5690 5691 5692 5693/* ------------------------------ */ 5694 .balign 64 5695.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5696/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5697/* File: armv5te/binop2addr.S */ 5698 /* 5699 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5700 * that specifies an instruction that performs "result = r0 op r1". 5701 * This could be an ARM instruction or a function call. (If the result 5702 * comes back in a register other than r0, you can override "result".) 5703 * 5704 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5705 * vCC (r1). Useful for integer division and modulus. 5706 * 5707 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5708 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5709 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5710 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5711 */ 5712 /* binop/2addr vA, vB */ 5713 mov r9, rINST, lsr #8 @ r9<- A+ 5714 mov r3, rINST, lsr #12 @ r3<- B 5715 and r9, r9, #15 5716 GET_VREG(r1, r3) @ r1<- vB 5717 GET_VREG(r0, r9) @ r0<- vA 5718 .if 0 5719 cmp r1, #0 @ is second operand zero? 5720 beq common_errDivideByZero 5721 .endif 5722 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5723 5724 and r1, r1, #31 @ optional op; may set condition codes 5725 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5726 GET_INST_OPCODE(ip) @ extract opcode from rINST 5727 SET_VREG(r0, r9) @ vAA<- r0 5728 GOTO_OPCODE(ip) @ jump to next instruction 5729 /* 10-13 instructions */ 5730 5731 5732 5733/* ------------------------------ */ 5734 .balign 64 5735.L_OP_USHR_INT_2ADDR: /* 0xba */ 5736/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5737/* File: armv5te/binop2addr.S */ 5738 /* 5739 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5740 * that specifies an instruction that performs "result = r0 op r1". 5741 * This could be an ARM instruction or a function call. (If the result 5742 * comes back in a register other than r0, you can override "result".) 5743 * 5744 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5745 * vCC (r1). Useful for integer division and modulus. 5746 * 5747 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5748 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5749 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5750 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5751 */ 5752 /* binop/2addr vA, vB */ 5753 mov r9, rINST, lsr #8 @ r9<- A+ 5754 mov r3, rINST, lsr #12 @ r3<- B 5755 and r9, r9, #15 5756 GET_VREG(r1, r3) @ r1<- vB 5757 GET_VREG(r0, r9) @ r0<- vA 5758 .if 0 5759 cmp r1, #0 @ is second operand zero? 5760 beq common_errDivideByZero 5761 .endif 5762 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5763 5764 and r1, r1, #31 @ optional op; may set condition codes 5765 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5766 GET_INST_OPCODE(ip) @ extract opcode from rINST 5767 SET_VREG(r0, r9) @ vAA<- r0 5768 GOTO_OPCODE(ip) @ jump to next instruction 5769 /* 10-13 instructions */ 5770 5771 5772 5773/* ------------------------------ */ 5774 .balign 64 5775.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5776/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5777/* File: armv5te/binopWide2addr.S */ 5778 /* 5779 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5780 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5781 * This could be an ARM instruction or a function call. (If the result 5782 * comes back in a register other than r0, you can override "result".) 5783 * 5784 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5785 * vCC (r1). Useful for integer division and modulus. 5786 * 5787 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5788 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5789 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5790 * rem-double/2addr 5791 */ 5792 /* binop/2addr vA, vB */ 5793 mov r9, rINST, lsr #8 @ r9<- A+ 5794 mov r1, rINST, lsr #12 @ r1<- B 5795 and r9, r9, #15 5796 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5797 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5798 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5799 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5800 .if 0 5801 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5802 beq common_errDivideByZero 5803 .endif 5804 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5805 5806 adds r0, r0, r2 @ optional op; may set condition codes 5807 adc r1, r1, r3 @ result<- op, r0-r3 changed 5808 GET_INST_OPCODE(ip) @ extract opcode from rINST 5809 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5810 GOTO_OPCODE(ip) @ jump to next instruction 5811 /* 12-15 instructions */ 5812 5813 5814 5815/* ------------------------------ */ 5816 .balign 64 5817.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5818/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5819/* File: armv5te/binopWide2addr.S */ 5820 /* 5821 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5822 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5823 * This could be an ARM instruction or a function call. (If the result 5824 * comes back in a register other than r0, you can override "result".) 5825 * 5826 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5827 * vCC (r1). Useful for integer division and modulus. 5828 * 5829 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5830 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5831 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5832 * rem-double/2addr 5833 */ 5834 /* binop/2addr vA, vB */ 5835 mov r9, rINST, lsr #8 @ r9<- A+ 5836 mov r1, rINST, lsr #12 @ r1<- B 5837 and r9, r9, #15 5838 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5839 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5840 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5841 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5842 .if 0 5843 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5844 beq common_errDivideByZero 5845 .endif 5846 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5847 5848 subs r0, r0, r2 @ optional op; may set condition codes 5849 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5850 GET_INST_OPCODE(ip) @ extract opcode from rINST 5851 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5852 GOTO_OPCODE(ip) @ jump to next instruction 5853 /* 12-15 instructions */ 5854 5855 5856 5857/* ------------------------------ */ 5858 .balign 64 5859.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5860/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 5861 /* 5862 * Signed 64-bit integer multiply, "/2addr" version. 5863 * 5864 * See OP_MUL_LONG for an explanation. 5865 * 5866 * We get a little tight on registers, so to avoid looking up &fp[A] 5867 * again we stuff it into rINST. 5868 */ 5869 /* mul-long/2addr vA, vB */ 5870 mov r9, rINST, lsr #8 @ r9<- A+ 5871 mov r1, rINST, lsr #12 @ r1<- B 5872 and r9, r9, #15 5873 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5874 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5875 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5876 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5877 mul ip, r2, r1 @ ip<- ZxW 5878 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5879 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5880 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5881 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5882 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5883 GET_INST_OPCODE(ip) @ extract opcode from rINST 5884 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5885 GOTO_OPCODE(ip) @ jump to next instruction 5886 5887 5888/* ------------------------------ */ 5889 .balign 64 5890.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5891/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 5892/* File: armv5te/binopWide2addr.S */ 5893 /* 5894 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5895 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5896 * This could be an ARM instruction or a function call. (If the result 5897 * comes back in a register other than r0, you can override "result".) 5898 * 5899 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5900 * vCC (r1). Useful for integer division and modulus. 5901 * 5902 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5903 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5904 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5905 * rem-double/2addr 5906 */ 5907 /* binop/2addr vA, vB */ 5908 mov r9, rINST, lsr #8 @ r9<- A+ 5909 mov r1, rINST, lsr #12 @ r1<- B 5910 and r9, r9, #15 5911 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5912 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5913 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5914 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5915 .if 1 5916 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5917 beq common_errDivideByZero 5918 .endif 5919 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5920 5921 @ optional op; may set condition codes 5922 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5923 GET_INST_OPCODE(ip) @ extract opcode from rINST 5924 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5925 GOTO_OPCODE(ip) @ jump to next instruction 5926 /* 12-15 instructions */ 5927 5928 5929 5930/* ------------------------------ */ 5931 .balign 64 5932.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5933/* File: armv5te/OP_REM_LONG_2ADDR.S */ 5934/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5935/* File: armv5te/binopWide2addr.S */ 5936 /* 5937 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5938 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5939 * This could be an ARM instruction or a function call. (If the result 5940 * comes back in a register other than r0, you can override "result".) 5941 * 5942 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5943 * vCC (r1). Useful for integer division and modulus. 5944 * 5945 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5946 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5947 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5948 * rem-double/2addr 5949 */ 5950 /* binop/2addr vA, vB */ 5951 mov r9, rINST, lsr #8 @ r9<- A+ 5952 mov r1, rINST, lsr #12 @ r1<- B 5953 and r9, r9, #15 5954 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5955 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5956 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5957 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5958 .if 1 5959 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5960 beq common_errDivideByZero 5961 .endif 5962 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5963 5964 @ optional op; may set condition codes 5965 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5966 GET_INST_OPCODE(ip) @ extract opcode from rINST 5967 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5968 GOTO_OPCODE(ip) @ jump to next instruction 5969 /* 12-15 instructions */ 5970 5971 5972 5973/* ------------------------------ */ 5974 .balign 64 5975.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5976/* File: armv5te/OP_AND_LONG_2ADDR.S */ 5977/* File: armv5te/binopWide2addr.S */ 5978 /* 5979 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5980 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5981 * This could be an ARM instruction or a function call. (If the result 5982 * comes back in a register other than r0, you can override "result".) 5983 * 5984 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5985 * vCC (r1). Useful for integer division and modulus. 5986 * 5987 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5988 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5989 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5990 * rem-double/2addr 5991 */ 5992 /* binop/2addr vA, vB */ 5993 mov r9, rINST, lsr #8 @ r9<- A+ 5994 mov r1, rINST, lsr #12 @ r1<- B 5995 and r9, r9, #15 5996 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5997 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5998 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5999 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6000 .if 0 6001 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6002 beq common_errDivideByZero 6003 .endif 6004 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6005 6006 and r0, r0, r2 @ optional op; may set condition codes 6007 and r1, r1, r3 @ result<- op, r0-r3 changed 6008 GET_INST_OPCODE(ip) @ extract opcode from rINST 6009 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6010 GOTO_OPCODE(ip) @ jump to next instruction 6011 /* 12-15 instructions */ 6012 6013 6014 6015/* ------------------------------ */ 6016 .balign 64 6017.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6018/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6019/* File: armv5te/binopWide2addr.S */ 6020 /* 6021 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6022 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6023 * This could be an ARM instruction or a function call. (If the result 6024 * comes back in a register other than r0, you can override "result".) 6025 * 6026 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6027 * vCC (r1). Useful for integer division and modulus. 6028 * 6029 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6030 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6031 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6032 * rem-double/2addr 6033 */ 6034 /* binop/2addr vA, vB */ 6035 mov r9, rINST, lsr #8 @ r9<- A+ 6036 mov r1, rINST, lsr #12 @ r1<- B 6037 and r9, r9, #15 6038 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6039 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6040 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6041 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6042 .if 0 6043 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6044 beq common_errDivideByZero 6045 .endif 6046 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6047 6048 orr r0, r0, r2 @ optional op; may set condition codes 6049 orr r1, r1, r3 @ result<- op, r0-r3 changed 6050 GET_INST_OPCODE(ip) @ extract opcode from rINST 6051 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6052 GOTO_OPCODE(ip) @ jump to next instruction 6053 /* 12-15 instructions */ 6054 6055 6056 6057/* ------------------------------ */ 6058 .balign 64 6059.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6060/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6061/* File: armv5te/binopWide2addr.S */ 6062 /* 6063 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6064 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6065 * This could be an ARM instruction or a function call. (If the result 6066 * comes back in a register other than r0, you can override "result".) 6067 * 6068 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6069 * vCC (r1). Useful for integer division and modulus. 6070 * 6071 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6072 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6073 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6074 * rem-double/2addr 6075 */ 6076 /* binop/2addr vA, vB */ 6077 mov r9, rINST, lsr #8 @ r9<- A+ 6078 mov r1, rINST, lsr #12 @ r1<- B 6079 and r9, r9, #15 6080 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6081 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6082 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6083 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6084 .if 0 6085 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6086 beq common_errDivideByZero 6087 .endif 6088 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6089 6090 eor r0, r0, r2 @ optional op; may set condition codes 6091 eor r1, r1, r3 @ result<- op, r0-r3 changed 6092 GET_INST_OPCODE(ip) @ extract opcode from rINST 6093 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6094 GOTO_OPCODE(ip) @ jump to next instruction 6095 /* 12-15 instructions */ 6096 6097 6098 6099/* ------------------------------ */ 6100 .balign 64 6101.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6102/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6103 /* 6104 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6105 * 32-bit shift distance. 6106 */ 6107 /* shl-long/2addr vA, vB */ 6108 mov r9, rINST, lsr #8 @ r9<- A+ 6109 mov r3, rINST, lsr #12 @ r3<- B 6110 and r9, r9, #15 6111 GET_VREG(r2, r3) @ r2<- vB 6112 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6113 and r2, r2, #63 @ r2<- r2 & 0x3f 6114 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6115 6116 mov r1, r1, asl r2 @ r1<- r1 << r2 6117 rsb r3, r2, #32 @ r3<- 32 - r2 6118 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6119 subs ip, r2, #32 @ ip<- r2 - 32 6120 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6121 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6122 mov r0, r0, asl r2 @ r0<- r0 << r2 6123 b .LOP_SHL_LONG_2ADDR_finish 6124 6125/* ------------------------------ */ 6126 .balign 64 6127.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6128/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6129 /* 6130 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6131 * 32-bit shift distance. 6132 */ 6133 /* shr-long/2addr vA, vB */ 6134 mov r9, rINST, lsr #8 @ r9<- A+ 6135 mov r3, rINST, lsr #12 @ r3<- B 6136 and r9, r9, #15 6137 GET_VREG(r2, r3) @ r2<- vB 6138 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6139 and r2, r2, #63 @ r2<- r2 & 0x3f 6140 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6141 6142 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6143 rsb r3, r2, #32 @ r3<- 32 - r2 6144 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6145 subs ip, r2, #32 @ ip<- r2 - 32 6146 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6147 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6148 mov r1, r1, asr r2 @ r1<- r1 >> r2 6149 b .LOP_SHR_LONG_2ADDR_finish 6150 6151/* ------------------------------ */ 6152 .balign 64 6153.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6154/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6155 /* 6156 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6157 * 32-bit shift distance. 6158 */ 6159 /* ushr-long/2addr vA, vB */ 6160 mov r9, rINST, lsr #8 @ r9<- A+ 6161 mov r3, rINST, lsr #12 @ r3<- B 6162 and r9, r9, #15 6163 GET_VREG(r2, r3) @ r2<- vB 6164 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6165 and r2, r2, #63 @ r2<- r2 & 0x3f 6166 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6167 6168 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6169 rsb r3, r2, #32 @ r3<- 32 - r2 6170 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6171 subs ip, r2, #32 @ ip<- r2 - 32 6172 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6173 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6174 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6175 b .LOP_USHR_LONG_2ADDR_finish 6176 6177/* ------------------------------ */ 6178 .balign 64 6179.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6180/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6181/* File: arm-vfp/fbinop2addr.S */ 6182 /* 6183 * Generic 32-bit floating point "/2addr" binary operation. Provide 6184 * an "instr" line that specifies an instruction that performs 6185 * "s2 = s0 op s1". 6186 * 6187 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6188 */ 6189 /* binop/2addr vA, vB */ 6190 mov r3, rINST, lsr #12 @ r3<- B 6191 mov r9, rINST, lsr #8 @ r9<- A+ 6192 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6193 and r9, r9, #15 @ r9<- A 6194 flds s1, [r3] @ s1<- vB 6195 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6196 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6197 flds s0, [r9] @ s0<- vA 6198 6199 fadds s2, s0, s1 @ s2<- op 6200 GET_INST_OPCODE(ip) @ extract opcode from rINST 6201 fsts s2, [r9] @ vAA<- s2 6202 GOTO_OPCODE(ip) @ jump to next instruction 6203 6204 6205/* ------------------------------ */ 6206 .balign 64 6207.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6208/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6209/* File: arm-vfp/fbinop2addr.S */ 6210 /* 6211 * Generic 32-bit floating point "/2addr" binary operation. Provide 6212 * an "instr" line that specifies an instruction that performs 6213 * "s2 = s0 op s1". 6214 * 6215 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6216 */ 6217 /* binop/2addr vA, vB */ 6218 mov r3, rINST, lsr #12 @ r3<- B 6219 mov r9, rINST, lsr #8 @ r9<- A+ 6220 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6221 and r9, r9, #15 @ r9<- A 6222 flds s1, [r3] @ s1<- vB 6223 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6224 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6225 flds s0, [r9] @ s0<- vA 6226 6227 fsubs s2, s0, s1 @ s2<- op 6228 GET_INST_OPCODE(ip) @ extract opcode from rINST 6229 fsts s2, [r9] @ vAA<- s2 6230 GOTO_OPCODE(ip) @ jump to next instruction 6231 6232 6233/* ------------------------------ */ 6234 .balign 64 6235.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6236/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6237/* File: arm-vfp/fbinop2addr.S */ 6238 /* 6239 * Generic 32-bit floating point "/2addr" binary operation. Provide 6240 * an "instr" line that specifies an instruction that performs 6241 * "s2 = s0 op s1". 6242 * 6243 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6244 */ 6245 /* binop/2addr vA, vB */ 6246 mov r3, rINST, lsr #12 @ r3<- B 6247 mov r9, rINST, lsr #8 @ r9<- A+ 6248 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6249 and r9, r9, #15 @ r9<- A 6250 flds s1, [r3] @ s1<- vB 6251 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6252 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6253 flds s0, [r9] @ s0<- vA 6254 6255 fmuls s2, s0, s1 @ s2<- op 6256 GET_INST_OPCODE(ip) @ extract opcode from rINST 6257 fsts s2, [r9] @ vAA<- s2 6258 GOTO_OPCODE(ip) @ jump to next instruction 6259 6260 6261/* ------------------------------ */ 6262 .balign 64 6263.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6264/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6265/* File: arm-vfp/fbinop2addr.S */ 6266 /* 6267 * Generic 32-bit floating point "/2addr" binary operation. Provide 6268 * an "instr" line that specifies an instruction that performs 6269 * "s2 = s0 op s1". 6270 * 6271 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6272 */ 6273 /* binop/2addr vA, vB */ 6274 mov r3, rINST, lsr #12 @ r3<- B 6275 mov r9, rINST, lsr #8 @ r9<- A+ 6276 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6277 and r9, r9, #15 @ r9<- A 6278 flds s1, [r3] @ s1<- vB 6279 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6280 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6281 flds s0, [r9] @ s0<- vA 6282 6283 fdivs s2, s0, s1 @ s2<- op 6284 GET_INST_OPCODE(ip) @ extract opcode from rINST 6285 fsts s2, [r9] @ vAA<- s2 6286 GOTO_OPCODE(ip) @ jump to next instruction 6287 6288 6289/* ------------------------------ */ 6290 .balign 64 6291.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6292/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6293/* EABI doesn't define a float remainder function, but libm does */ 6294/* File: armv5te/binop2addr.S */ 6295 /* 6296 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6297 * that specifies an instruction that performs "result = r0 op r1". 6298 * This could be an ARM instruction or a function call. (If the result 6299 * comes back in a register other than r0, you can override "result".) 6300 * 6301 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6302 * vCC (r1). Useful for integer division and modulus. 6303 * 6304 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6305 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6306 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6307 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6308 */ 6309 /* binop/2addr vA, vB */ 6310 mov r9, rINST, lsr #8 @ r9<- A+ 6311 mov r3, rINST, lsr #12 @ r3<- B 6312 and r9, r9, #15 6313 GET_VREG(r1, r3) @ r1<- vB 6314 GET_VREG(r0, r9) @ r0<- vA 6315 .if 0 6316 cmp r1, #0 @ is second operand zero? 6317 beq common_errDivideByZero 6318 .endif 6319 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6320 6321 @ optional op; may set condition codes 6322 bl fmodf @ r0<- op, r0-r3 changed 6323 GET_INST_OPCODE(ip) @ extract opcode from rINST 6324 SET_VREG(r0, r9) @ vAA<- r0 6325 GOTO_OPCODE(ip) @ jump to next instruction 6326 /* 10-13 instructions */ 6327 6328 6329 6330/* ------------------------------ */ 6331 .balign 64 6332.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6333/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6334/* File: arm-vfp/fbinopWide2addr.S */ 6335 /* 6336 * Generic 64-bit floating point "/2addr" binary operation. Provide 6337 * an "instr" line that specifies an instruction that performs 6338 * "d2 = d0 op d1". 6339 * 6340 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6341 * div-double/2addr 6342 */ 6343 /* binop/2addr vA, vB */ 6344 mov r3, rINST, lsr #12 @ r3<- B 6345 mov r9, rINST, lsr #8 @ r9<- A+ 6346 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6347 and r9, r9, #15 @ r9<- A 6348 fldd d1, [r3] @ d1<- vB 6349 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6350 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6351 fldd d0, [r9] @ d0<- vA 6352 6353 faddd d2, d0, d1 @ d2<- op 6354 GET_INST_OPCODE(ip) @ extract opcode from rINST 6355 fstd d2, [r9] @ vAA<- d2 6356 GOTO_OPCODE(ip) @ jump to next instruction 6357 6358 6359/* ------------------------------ */ 6360 .balign 64 6361.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6362/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6363/* File: arm-vfp/fbinopWide2addr.S */ 6364 /* 6365 * Generic 64-bit floating point "/2addr" binary operation. Provide 6366 * an "instr" line that specifies an instruction that performs 6367 * "d2 = d0 op d1". 6368 * 6369 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6370 * div-double/2addr 6371 */ 6372 /* binop/2addr vA, vB */ 6373 mov r3, rINST, lsr #12 @ r3<- B 6374 mov r9, rINST, lsr #8 @ r9<- A+ 6375 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6376 and r9, r9, #15 @ r9<- A 6377 fldd d1, [r3] @ d1<- vB 6378 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6379 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6380 fldd d0, [r9] @ d0<- vA 6381 6382 fsubd d2, d0, d1 @ d2<- op 6383 GET_INST_OPCODE(ip) @ extract opcode from rINST 6384 fstd d2, [r9] @ vAA<- d2 6385 GOTO_OPCODE(ip) @ jump to next instruction 6386 6387 6388/* ------------------------------ */ 6389 .balign 64 6390.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6391/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6392/* File: arm-vfp/fbinopWide2addr.S */ 6393 /* 6394 * Generic 64-bit floating point "/2addr" binary operation. Provide 6395 * an "instr" line that specifies an instruction that performs 6396 * "d2 = d0 op d1". 6397 * 6398 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6399 * div-double/2addr 6400 */ 6401 /* binop/2addr vA, vB */ 6402 mov r3, rINST, lsr #12 @ r3<- B 6403 mov r9, rINST, lsr #8 @ r9<- A+ 6404 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6405 and r9, r9, #15 @ r9<- A 6406 fldd d1, [r3] @ d1<- vB 6407 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6408 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6409 fldd d0, [r9] @ d0<- vA 6410 6411 fmuld d2, d0, d1 @ d2<- op 6412 GET_INST_OPCODE(ip) @ extract opcode from rINST 6413 fstd d2, [r9] @ vAA<- d2 6414 GOTO_OPCODE(ip) @ jump to next instruction 6415 6416 6417/* ------------------------------ */ 6418 .balign 64 6419.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6420/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6421/* File: arm-vfp/fbinopWide2addr.S */ 6422 /* 6423 * Generic 64-bit floating point "/2addr" binary operation. Provide 6424 * an "instr" line that specifies an instruction that performs 6425 * "d2 = d0 op d1". 6426 * 6427 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6428 * div-double/2addr 6429 */ 6430 /* binop/2addr vA, vB */ 6431 mov r3, rINST, lsr #12 @ r3<- B 6432 mov r9, rINST, lsr #8 @ r9<- A+ 6433 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6434 and r9, r9, #15 @ r9<- A 6435 fldd d1, [r3] @ d1<- vB 6436 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6437 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6438 fldd d0, [r9] @ d0<- vA 6439 6440 fdivd d2, d0, d1 @ d2<- op 6441 GET_INST_OPCODE(ip) @ extract opcode from rINST 6442 fstd d2, [r9] @ vAA<- d2 6443 GOTO_OPCODE(ip) @ jump to next instruction 6444 6445 6446/* ------------------------------ */ 6447 .balign 64 6448.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6449/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6450/* EABI doesn't define a double remainder function, but libm does */ 6451/* File: armv5te/binopWide2addr.S */ 6452 /* 6453 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6454 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6455 * This could be an ARM instruction or a function call. (If the result 6456 * comes back in a register other than r0, you can override "result".) 6457 * 6458 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6459 * vCC (r1). Useful for integer division and modulus. 6460 * 6461 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6462 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6463 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6464 * rem-double/2addr 6465 */ 6466 /* binop/2addr vA, vB */ 6467 mov r9, rINST, lsr #8 @ r9<- A+ 6468 mov r1, rINST, lsr #12 @ r1<- B 6469 and r9, r9, #15 6470 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6471 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6472 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6473 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6474 .if 0 6475 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6476 beq common_errDivideByZero 6477 .endif 6478 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6479 6480 @ optional op; may set condition codes 6481 bl fmod @ result<- op, r0-r3 changed 6482 GET_INST_OPCODE(ip) @ extract opcode from rINST 6483 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6484 GOTO_OPCODE(ip) @ jump to next instruction 6485 /* 12-15 instructions */ 6486 6487 6488 6489/* ------------------------------ */ 6490 .balign 64 6491.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6492/* File: armv5te/OP_ADD_INT_LIT16.S */ 6493/* File: armv5te/binopLit16.S */ 6494 /* 6495 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6496 * that specifies an instruction that performs "result = r0 op r1". 6497 * This could be an ARM instruction or a function call. (If the result 6498 * comes back in a register other than r0, you can override "result".) 6499 * 6500 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6501 * vCC (r1). Useful for integer division and modulus. 6502 * 6503 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6504 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6505 */ 6506 /* binop/lit16 vA, vB, #+CCCC */ 6507 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6508 mov r2, rINST, lsr #12 @ r2<- B 6509 mov r9, rINST, lsr #8 @ r9<- A+ 6510 GET_VREG(r0, r2) @ r0<- vB 6511 and r9, r9, #15 6512 .if 0 6513 cmp r1, #0 @ is second operand zero? 6514 beq common_errDivideByZero 6515 .endif 6516 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6517 6518 add r0, r0, r1 @ r0<- op, r0-r3 changed 6519 GET_INST_OPCODE(ip) @ extract opcode from rINST 6520 SET_VREG(r0, r9) @ vAA<- r0 6521 GOTO_OPCODE(ip) @ jump to next instruction 6522 /* 10-13 instructions */ 6523 6524 6525 6526/* ------------------------------ */ 6527 .balign 64 6528.L_OP_RSUB_INT: /* 0xd1 */ 6529/* File: armv5te/OP_RSUB_INT.S */ 6530/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6531/* File: armv5te/binopLit16.S */ 6532 /* 6533 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6534 * that specifies an instruction that performs "result = r0 op r1". 6535 * This could be an ARM instruction or a function call. (If the result 6536 * comes back in a register other than r0, you can override "result".) 6537 * 6538 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6539 * vCC (r1). Useful for integer division and modulus. 6540 * 6541 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6542 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6543 */ 6544 /* binop/lit16 vA, vB, #+CCCC */ 6545 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6546 mov r2, rINST, lsr #12 @ r2<- B 6547 mov r9, rINST, lsr #8 @ r9<- A+ 6548 GET_VREG(r0, r2) @ r0<- vB 6549 and r9, r9, #15 6550 .if 0 6551 cmp r1, #0 @ is second operand zero? 6552 beq common_errDivideByZero 6553 .endif 6554 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6555 6556 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6557 GET_INST_OPCODE(ip) @ extract opcode from rINST 6558 SET_VREG(r0, r9) @ vAA<- r0 6559 GOTO_OPCODE(ip) @ jump to next instruction 6560 /* 10-13 instructions */ 6561 6562 6563 6564/* ------------------------------ */ 6565 .balign 64 6566.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6567/* File: armv5te/OP_MUL_INT_LIT16.S */ 6568/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6569/* File: armv5te/binopLit16.S */ 6570 /* 6571 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6572 * that specifies an instruction that performs "result = r0 op r1". 6573 * This could be an ARM instruction or a function call. (If the result 6574 * comes back in a register other than r0, you can override "result".) 6575 * 6576 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6577 * vCC (r1). Useful for integer division and modulus. 6578 * 6579 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6580 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6581 */ 6582 /* binop/lit16 vA, vB, #+CCCC */ 6583 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6584 mov r2, rINST, lsr #12 @ r2<- B 6585 mov r9, rINST, lsr #8 @ r9<- A+ 6586 GET_VREG(r0, r2) @ r0<- vB 6587 and r9, r9, #15 6588 .if 0 6589 cmp r1, #0 @ is second operand zero? 6590 beq common_errDivideByZero 6591 .endif 6592 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6593 6594 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6595 GET_INST_OPCODE(ip) @ extract opcode from rINST 6596 SET_VREG(r0, r9) @ vAA<- r0 6597 GOTO_OPCODE(ip) @ jump to next instruction 6598 /* 10-13 instructions */ 6599 6600 6601 6602/* ------------------------------ */ 6603 .balign 64 6604.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6605/* File: armv5te/OP_DIV_INT_LIT16.S */ 6606/* File: armv5te/binopLit16.S */ 6607 /* 6608 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6609 * that specifies an instruction that performs "result = r0 op r1". 6610 * This could be an ARM instruction or a function call. (If the result 6611 * comes back in a register other than r0, you can override "result".) 6612 * 6613 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6614 * vCC (r1). Useful for integer division and modulus. 6615 * 6616 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6617 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6618 */ 6619 /* binop/lit16 vA, vB, #+CCCC */ 6620 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6621 mov r2, rINST, lsr #12 @ r2<- B 6622 mov r9, rINST, lsr #8 @ r9<- A+ 6623 GET_VREG(r0, r2) @ r0<- vB 6624 and r9, r9, #15 6625 .if 1 6626 cmp r1, #0 @ is second operand zero? 6627 beq common_errDivideByZero 6628 .endif 6629 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6630 6631 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6632 GET_INST_OPCODE(ip) @ extract opcode from rINST 6633 SET_VREG(r0, r9) @ vAA<- r0 6634 GOTO_OPCODE(ip) @ jump to next instruction 6635 /* 10-13 instructions */ 6636 6637 6638 6639/* ------------------------------ */ 6640 .balign 64 6641.L_OP_REM_INT_LIT16: /* 0xd4 */ 6642/* File: armv5te/OP_REM_INT_LIT16.S */ 6643/* idivmod returns quotient in r0 and remainder in r1 */ 6644/* File: armv5te/binopLit16.S */ 6645 /* 6646 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6647 * that specifies an instruction that performs "result = r0 op r1". 6648 * This could be an ARM instruction or a function call. (If the result 6649 * comes back in a register other than r0, you can override "result".) 6650 * 6651 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6652 * vCC (r1). Useful for integer division and modulus. 6653 * 6654 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6655 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6656 */ 6657 /* binop/lit16 vA, vB, #+CCCC */ 6658 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6659 mov r2, rINST, lsr #12 @ r2<- B 6660 mov r9, rINST, lsr #8 @ r9<- A+ 6661 GET_VREG(r0, r2) @ r0<- vB 6662 and r9, r9, #15 6663 .if 1 6664 cmp r1, #0 @ is second operand zero? 6665 beq common_errDivideByZero 6666 .endif 6667 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6668 6669 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6670 GET_INST_OPCODE(ip) @ extract opcode from rINST 6671 SET_VREG(r1, r9) @ vAA<- r1 6672 GOTO_OPCODE(ip) @ jump to next instruction 6673 /* 10-13 instructions */ 6674 6675 6676 6677/* ------------------------------ */ 6678 .balign 64 6679.L_OP_AND_INT_LIT16: /* 0xd5 */ 6680/* File: armv5te/OP_AND_INT_LIT16.S */ 6681/* File: armv5te/binopLit16.S */ 6682 /* 6683 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6684 * that specifies an instruction that performs "result = r0 op r1". 6685 * This could be an ARM instruction or a function call. (If the result 6686 * comes back in a register other than r0, you can override "result".) 6687 * 6688 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6689 * vCC (r1). Useful for integer division and modulus. 6690 * 6691 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6692 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6693 */ 6694 /* binop/lit16 vA, vB, #+CCCC */ 6695 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6696 mov r2, rINST, lsr #12 @ r2<- B 6697 mov r9, rINST, lsr #8 @ r9<- A+ 6698 GET_VREG(r0, r2) @ r0<- vB 6699 and r9, r9, #15 6700 .if 0 6701 cmp r1, #0 @ is second operand zero? 6702 beq common_errDivideByZero 6703 .endif 6704 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6705 6706 and r0, r0, r1 @ r0<- op, r0-r3 changed 6707 GET_INST_OPCODE(ip) @ extract opcode from rINST 6708 SET_VREG(r0, r9) @ vAA<- r0 6709 GOTO_OPCODE(ip) @ jump to next instruction 6710 /* 10-13 instructions */ 6711 6712 6713 6714/* ------------------------------ */ 6715 .balign 64 6716.L_OP_OR_INT_LIT16: /* 0xd6 */ 6717/* File: armv5te/OP_OR_INT_LIT16.S */ 6718/* File: armv5te/binopLit16.S */ 6719 /* 6720 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6721 * that specifies an instruction that performs "result = r0 op r1". 6722 * This could be an ARM instruction or a function call. (If the result 6723 * comes back in a register other than r0, you can override "result".) 6724 * 6725 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6726 * vCC (r1). Useful for integer division and modulus. 6727 * 6728 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6729 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6730 */ 6731 /* binop/lit16 vA, vB, #+CCCC */ 6732 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6733 mov r2, rINST, lsr #12 @ r2<- B 6734 mov r9, rINST, lsr #8 @ r9<- A+ 6735 GET_VREG(r0, r2) @ r0<- vB 6736 and r9, r9, #15 6737 .if 0 6738 cmp r1, #0 @ is second operand zero? 6739 beq common_errDivideByZero 6740 .endif 6741 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6742 6743 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6744 GET_INST_OPCODE(ip) @ extract opcode from rINST 6745 SET_VREG(r0, r9) @ vAA<- r0 6746 GOTO_OPCODE(ip) @ jump to next instruction 6747 /* 10-13 instructions */ 6748 6749 6750 6751/* ------------------------------ */ 6752 .balign 64 6753.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6754/* File: armv5te/OP_XOR_INT_LIT16.S */ 6755/* File: armv5te/binopLit16.S */ 6756 /* 6757 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6758 * that specifies an instruction that performs "result = r0 op r1". 6759 * This could be an ARM instruction or a function call. (If the result 6760 * comes back in a register other than r0, you can override "result".) 6761 * 6762 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6763 * vCC (r1). Useful for integer division and modulus. 6764 * 6765 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6766 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6767 */ 6768 /* binop/lit16 vA, vB, #+CCCC */ 6769 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6770 mov r2, rINST, lsr #12 @ r2<- B 6771 mov r9, rINST, lsr #8 @ r9<- A+ 6772 GET_VREG(r0, r2) @ r0<- vB 6773 and r9, r9, #15 6774 .if 0 6775 cmp r1, #0 @ is second operand zero? 6776 beq common_errDivideByZero 6777 .endif 6778 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6779 6780 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6781 GET_INST_OPCODE(ip) @ extract opcode from rINST 6782 SET_VREG(r0, r9) @ vAA<- r0 6783 GOTO_OPCODE(ip) @ jump to next instruction 6784 /* 10-13 instructions */ 6785 6786 6787 6788/* ------------------------------ */ 6789 .balign 64 6790.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6791/* File: armv5te/OP_ADD_INT_LIT8.S */ 6792/* File: armv5te/binopLit8.S */ 6793 /* 6794 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6795 * that specifies an instruction that performs "result = r0 op r1". 6796 * This could be an ARM instruction or a function call. (If the result 6797 * comes back in a register other than r0, you can override "result".) 6798 * 6799 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6800 * vCC (r1). Useful for integer division and modulus. 6801 * 6802 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6803 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6804 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6805 */ 6806 /* binop/lit8 vAA, vBB, #+CC */ 6807 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6808 mov r9, rINST, lsr #8 @ r9<- AA 6809 and r2, r3, #255 @ r2<- BB 6810 GET_VREG(r0, r2) @ r0<- vBB 6811 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6812 .if 0 6813 @cmp r1, #0 @ is second operand zero? 6814 beq common_errDivideByZero 6815 .endif 6816 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6817 6818 @ optional op; may set condition codes 6819 add r0, r0, r1 @ r0<- op, r0-r3 changed 6820 GET_INST_OPCODE(ip) @ extract opcode from rINST 6821 SET_VREG(r0, r9) @ vAA<- r0 6822 GOTO_OPCODE(ip) @ jump to next instruction 6823 /* 10-12 instructions */ 6824 6825 6826 6827/* ------------------------------ */ 6828 .balign 64 6829.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6830/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6831/* File: armv5te/binopLit8.S */ 6832 /* 6833 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6834 * that specifies an instruction that performs "result = r0 op r1". 6835 * This could be an ARM instruction or a function call. (If the result 6836 * comes back in a register other than r0, you can override "result".) 6837 * 6838 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6839 * vCC (r1). Useful for integer division and modulus. 6840 * 6841 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6842 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6843 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6844 */ 6845 /* binop/lit8 vAA, vBB, #+CC */ 6846 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6847 mov r9, rINST, lsr #8 @ r9<- AA 6848 and r2, r3, #255 @ r2<- BB 6849 GET_VREG(r0, r2) @ r0<- vBB 6850 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6851 .if 0 6852 @cmp r1, #0 @ is second operand zero? 6853 beq common_errDivideByZero 6854 .endif 6855 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6856 6857 @ optional op; may set condition codes 6858 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6859 GET_INST_OPCODE(ip) @ extract opcode from rINST 6860 SET_VREG(r0, r9) @ vAA<- r0 6861 GOTO_OPCODE(ip) @ jump to next instruction 6862 /* 10-12 instructions */ 6863 6864 6865 6866/* ------------------------------ */ 6867 .balign 64 6868.L_OP_MUL_INT_LIT8: /* 0xda */ 6869/* File: armv5te/OP_MUL_INT_LIT8.S */ 6870/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6871/* File: armv5te/binopLit8.S */ 6872 /* 6873 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6874 * that specifies an instruction that performs "result = r0 op r1". 6875 * This could be an ARM instruction or a function call. (If the result 6876 * comes back in a register other than r0, you can override "result".) 6877 * 6878 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6879 * vCC (r1). Useful for integer division and modulus. 6880 * 6881 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6882 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6883 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6884 */ 6885 /* binop/lit8 vAA, vBB, #+CC */ 6886 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6887 mov r9, rINST, lsr #8 @ r9<- AA 6888 and r2, r3, #255 @ r2<- BB 6889 GET_VREG(r0, r2) @ r0<- vBB 6890 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6891 .if 0 6892 @cmp r1, #0 @ is second operand zero? 6893 beq common_errDivideByZero 6894 .endif 6895 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6896 6897 @ optional op; may set condition codes 6898 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6899 GET_INST_OPCODE(ip) @ extract opcode from rINST 6900 SET_VREG(r0, r9) @ vAA<- r0 6901 GOTO_OPCODE(ip) @ jump to next instruction 6902 /* 10-12 instructions */ 6903 6904 6905 6906/* ------------------------------ */ 6907 .balign 64 6908.L_OP_DIV_INT_LIT8: /* 0xdb */ 6909/* File: armv5te/OP_DIV_INT_LIT8.S */ 6910/* File: armv5te/binopLit8.S */ 6911 /* 6912 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6913 * that specifies an instruction that performs "result = r0 op r1". 6914 * This could be an ARM instruction or a function call. (If the result 6915 * comes back in a register other than r0, you can override "result".) 6916 * 6917 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6918 * vCC (r1). Useful for integer division and modulus. 6919 * 6920 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6921 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6922 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6923 */ 6924 /* binop/lit8 vAA, vBB, #+CC */ 6925 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6926 mov r9, rINST, lsr #8 @ r9<- AA 6927 and r2, r3, #255 @ r2<- BB 6928 GET_VREG(r0, r2) @ r0<- vBB 6929 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6930 .if 1 6931 @cmp r1, #0 @ is second operand zero? 6932 beq common_errDivideByZero 6933 .endif 6934 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6935 6936 @ optional op; may set condition codes 6937 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6938 GET_INST_OPCODE(ip) @ extract opcode from rINST 6939 SET_VREG(r0, r9) @ vAA<- r0 6940 GOTO_OPCODE(ip) @ jump to next instruction 6941 /* 10-12 instructions */ 6942 6943 6944 6945/* ------------------------------ */ 6946 .balign 64 6947.L_OP_REM_INT_LIT8: /* 0xdc */ 6948/* File: armv5te/OP_REM_INT_LIT8.S */ 6949/* idivmod returns quotient in r0 and remainder in r1 */ 6950/* File: armv5te/binopLit8.S */ 6951 /* 6952 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6953 * that specifies an instruction that performs "result = r0 op r1". 6954 * This could be an ARM instruction or a function call. (If the result 6955 * comes back in a register other than r0, you can override "result".) 6956 * 6957 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6958 * vCC (r1). Useful for integer division and modulus. 6959 * 6960 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6961 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6962 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6963 */ 6964 /* binop/lit8 vAA, vBB, #+CC */ 6965 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6966 mov r9, rINST, lsr #8 @ r9<- AA 6967 and r2, r3, #255 @ r2<- BB 6968 GET_VREG(r0, r2) @ r0<- vBB 6969 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6970 .if 1 6971 @cmp r1, #0 @ is second operand zero? 6972 beq common_errDivideByZero 6973 .endif 6974 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6975 6976 @ optional op; may set condition codes 6977 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6978 GET_INST_OPCODE(ip) @ extract opcode from rINST 6979 SET_VREG(r1, r9) @ vAA<- r1 6980 GOTO_OPCODE(ip) @ jump to next instruction 6981 /* 10-12 instructions */ 6982 6983 6984 6985/* ------------------------------ */ 6986 .balign 64 6987.L_OP_AND_INT_LIT8: /* 0xdd */ 6988/* File: armv5te/OP_AND_INT_LIT8.S */ 6989/* File: armv5te/binopLit8.S */ 6990 /* 6991 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6992 * that specifies an instruction that performs "result = r0 op r1". 6993 * This could be an ARM instruction or a function call. (If the result 6994 * comes back in a register other than r0, you can override "result".) 6995 * 6996 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6997 * vCC (r1). Useful for integer division and modulus. 6998 * 6999 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7000 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7001 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7002 */ 7003 /* binop/lit8 vAA, vBB, #+CC */ 7004 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7005 mov r9, rINST, lsr #8 @ r9<- AA 7006 and r2, r3, #255 @ r2<- BB 7007 GET_VREG(r0, r2) @ r0<- vBB 7008 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7009 .if 0 7010 @cmp r1, #0 @ is second operand zero? 7011 beq common_errDivideByZero 7012 .endif 7013 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7014 7015 @ optional op; may set condition codes 7016 and r0, r0, r1 @ r0<- op, r0-r3 changed 7017 GET_INST_OPCODE(ip) @ extract opcode from rINST 7018 SET_VREG(r0, r9) @ vAA<- r0 7019 GOTO_OPCODE(ip) @ jump to next instruction 7020 /* 10-12 instructions */ 7021 7022 7023 7024/* ------------------------------ */ 7025 .balign 64 7026.L_OP_OR_INT_LIT8: /* 0xde */ 7027/* File: armv5te/OP_OR_INT_LIT8.S */ 7028/* File: armv5te/binopLit8.S */ 7029 /* 7030 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7031 * that specifies an instruction that performs "result = r0 op r1". 7032 * This could be an ARM instruction or a function call. (If the result 7033 * comes back in a register other than r0, you can override "result".) 7034 * 7035 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7036 * vCC (r1). Useful for integer division and modulus. 7037 * 7038 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7039 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7040 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7041 */ 7042 /* binop/lit8 vAA, vBB, #+CC */ 7043 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7044 mov r9, rINST, lsr #8 @ r9<- AA 7045 and r2, r3, #255 @ r2<- BB 7046 GET_VREG(r0, r2) @ r0<- vBB 7047 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7048 .if 0 7049 @cmp r1, #0 @ is second operand zero? 7050 beq common_errDivideByZero 7051 .endif 7052 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7053 7054 @ optional op; may set condition codes 7055 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7056 GET_INST_OPCODE(ip) @ extract opcode from rINST 7057 SET_VREG(r0, r9) @ vAA<- r0 7058 GOTO_OPCODE(ip) @ jump to next instruction 7059 /* 10-12 instructions */ 7060 7061 7062 7063/* ------------------------------ */ 7064 .balign 64 7065.L_OP_XOR_INT_LIT8: /* 0xdf */ 7066/* File: armv5te/OP_XOR_INT_LIT8.S */ 7067/* File: armv5te/binopLit8.S */ 7068 /* 7069 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7070 * that specifies an instruction that performs "result = r0 op r1". 7071 * This could be an ARM instruction or a function call. (If the result 7072 * comes back in a register other than r0, you can override "result".) 7073 * 7074 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7075 * vCC (r1). Useful for integer division and modulus. 7076 * 7077 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7078 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7079 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7080 */ 7081 /* binop/lit8 vAA, vBB, #+CC */ 7082 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7083 mov r9, rINST, lsr #8 @ r9<- AA 7084 and r2, r3, #255 @ r2<- BB 7085 GET_VREG(r0, r2) @ r0<- vBB 7086 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7087 .if 0 7088 @cmp r1, #0 @ is second operand zero? 7089 beq common_errDivideByZero 7090 .endif 7091 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7092 7093 @ optional op; may set condition codes 7094 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7095 GET_INST_OPCODE(ip) @ extract opcode from rINST 7096 SET_VREG(r0, r9) @ vAA<- r0 7097 GOTO_OPCODE(ip) @ jump to next instruction 7098 /* 10-12 instructions */ 7099 7100 7101 7102/* ------------------------------ */ 7103 .balign 64 7104.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7105/* File: armv5te/OP_SHL_INT_LIT8.S */ 7106/* File: armv5te/binopLit8.S */ 7107 /* 7108 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7109 * that specifies an instruction that performs "result = r0 op r1". 7110 * This could be an ARM instruction or a function call. (If the result 7111 * comes back in a register other than r0, you can override "result".) 7112 * 7113 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7114 * vCC (r1). Useful for integer division and modulus. 7115 * 7116 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7117 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7118 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7119 */ 7120 /* binop/lit8 vAA, vBB, #+CC */ 7121 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7122 mov r9, rINST, lsr #8 @ r9<- AA 7123 and r2, r3, #255 @ r2<- BB 7124 GET_VREG(r0, r2) @ r0<- vBB 7125 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7126 .if 0 7127 @cmp r1, #0 @ is second operand zero? 7128 beq common_errDivideByZero 7129 .endif 7130 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7131 7132 and r1, r1, #31 @ optional op; may set condition codes 7133 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7134 GET_INST_OPCODE(ip) @ extract opcode from rINST 7135 SET_VREG(r0, r9) @ vAA<- r0 7136 GOTO_OPCODE(ip) @ jump to next instruction 7137 /* 10-12 instructions */ 7138 7139 7140 7141/* ------------------------------ */ 7142 .balign 64 7143.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7144/* File: armv5te/OP_SHR_INT_LIT8.S */ 7145/* File: armv5te/binopLit8.S */ 7146 /* 7147 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7148 * that specifies an instruction that performs "result = r0 op r1". 7149 * This could be an ARM instruction or a function call. (If the result 7150 * comes back in a register other than r0, you can override "result".) 7151 * 7152 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7153 * vCC (r1). Useful for integer division and modulus. 7154 * 7155 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7156 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7157 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7158 */ 7159 /* binop/lit8 vAA, vBB, #+CC */ 7160 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7161 mov r9, rINST, lsr #8 @ r9<- AA 7162 and r2, r3, #255 @ r2<- BB 7163 GET_VREG(r0, r2) @ r0<- vBB 7164 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7165 .if 0 7166 @cmp r1, #0 @ is second operand zero? 7167 beq common_errDivideByZero 7168 .endif 7169 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7170 7171 and r1, r1, #31 @ optional op; may set condition codes 7172 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7173 GET_INST_OPCODE(ip) @ extract opcode from rINST 7174 SET_VREG(r0, r9) @ vAA<- r0 7175 GOTO_OPCODE(ip) @ jump to next instruction 7176 /* 10-12 instructions */ 7177 7178 7179 7180/* ------------------------------ */ 7181 .balign 64 7182.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7183/* File: armv5te/OP_USHR_INT_LIT8.S */ 7184/* File: armv5te/binopLit8.S */ 7185 /* 7186 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7187 * that specifies an instruction that performs "result = r0 op r1". 7188 * This could be an ARM instruction or a function call. (If the result 7189 * comes back in a register other than r0, you can override "result".) 7190 * 7191 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7192 * vCC (r1). Useful for integer division and modulus. 7193 * 7194 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7195 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7196 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7197 */ 7198 /* binop/lit8 vAA, vBB, #+CC */ 7199 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7200 mov r9, rINST, lsr #8 @ r9<- AA 7201 and r2, r3, #255 @ r2<- BB 7202 GET_VREG(r0, r2) @ r0<- vBB 7203 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7204 .if 0 7205 @cmp r1, #0 @ is second operand zero? 7206 beq common_errDivideByZero 7207 .endif 7208 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7209 7210 and r1, r1, #31 @ optional op; may set condition codes 7211 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7212 GET_INST_OPCODE(ip) @ extract opcode from rINST 7213 SET_VREG(r0, r9) @ vAA<- r0 7214 GOTO_OPCODE(ip) @ jump to next instruction 7215 /* 10-12 instructions */ 7216 7217 7218 7219/* ------------------------------ */ 7220 .balign 64 7221.L_OP_UNUSED_E3: /* 0xe3 */ 7222/* File: armv5te/OP_UNUSED_E3.S */ 7223/* File: armv5te/unused.S */ 7224 bl common_abort 7225 7226 7227 7228/* ------------------------------ */ 7229 .balign 64 7230.L_OP_UNUSED_E4: /* 0xe4 */ 7231/* File: armv5te/OP_UNUSED_E4.S */ 7232/* File: armv5te/unused.S */ 7233 bl common_abort 7234 7235 7236 7237/* ------------------------------ */ 7238 .balign 64 7239.L_OP_UNUSED_E5: /* 0xe5 */ 7240/* File: armv5te/OP_UNUSED_E5.S */ 7241/* File: armv5te/unused.S */ 7242 bl common_abort 7243 7244 7245 7246/* ------------------------------ */ 7247 .balign 64 7248.L_OP_UNUSED_E6: /* 0xe6 */ 7249/* File: armv5te/OP_UNUSED_E6.S */ 7250/* File: armv5te/unused.S */ 7251 bl common_abort 7252 7253 7254 7255/* ------------------------------ */ 7256 .balign 64 7257.L_OP_UNUSED_E7: /* 0xe7 */ 7258/* File: armv5te/OP_UNUSED_E7.S */ 7259/* File: armv5te/unused.S */ 7260 bl common_abort 7261 7262 7263 7264/* ------------------------------ */ 7265 .balign 64 7266.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7267/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7268/* File: armv5te/OP_IGET_WIDE.S */ 7269 /* 7270 * Wide 32-bit instance field get. 7271 */ 7272 /* iget-wide vA, vB, field@CCCC */ 7273 mov r0, rINST, lsr #12 @ r0<- B 7274 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7275 FETCH(r1, 1) @ r1<- field ref CCCC 7276 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7277 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7278 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7279 cmp r0, #0 @ is resolved entry null? 7280 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 72818: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7282 EXPORT_PC() @ resolve() could throw 7283 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7284 bl dvmResolveInstField @ r0<- resolved InstField ptr 7285 cmp r0, #0 7286 bne .LOP_IGET_WIDE_VOLATILE_finish 7287 b common_exceptionThrown 7288 7289 7290/* ------------------------------ */ 7291 .balign 64 7292.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7293/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7294/* File: armv5te/OP_IPUT_WIDE.S */ 7295 /* iput-wide vA, vB, field@CCCC */ 7296 mov r0, rINST, lsr #12 @ r0<- B 7297 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7298 FETCH(r1, 1) @ r1<- field ref CCCC 7299 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7300 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7301 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7302 cmp r0, #0 @ is resolved entry null? 7303 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 73048: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7305 EXPORT_PC() @ resolve() could throw 7306 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7307 bl dvmResolveInstField @ r0<- resolved InstField ptr 7308 cmp r0, #0 @ success? 7309 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7310 b common_exceptionThrown 7311 7312 7313/* ------------------------------ */ 7314 .balign 64 7315.L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7316/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7317/* File: armv5te/OP_SGET_WIDE.S */ 7318 /* 7319 * 64-bit SGET handler. 7320 */ 7321 /* sget-wide vAA, field@BBBB */ 7322 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7323 FETCH(r1, 1) @ r1<- field ref BBBB 7324 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7325 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7326 cmp r0, #0 @ is resolved entry null? 7327 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7328.LOP_SGET_WIDE_VOLATILE_finish: 7329 mov r9, rINST, lsr #8 @ r9<- AA 7330 .if 1 7331 add r0, r0, #offStaticField_value @ r0<- pointer to data 7332 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 7333 .else 7334 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7335 .endif 7336 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7337 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7338 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7339 GET_INST_OPCODE(ip) @ extract opcode from rINST 7340 GOTO_OPCODE(ip) @ jump to next instruction 7341 7342 7343/* ------------------------------ */ 7344 .balign 64 7345.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7346/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7347/* File: armv5te/OP_SPUT_WIDE.S */ 7348 /* 7349 * 64-bit SPUT handler. 7350 */ 7351 /* sput-wide vAA, field@BBBB */ 7352 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7353 FETCH(r1, 1) @ r1<- field ref BBBB 7354 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7355 mov r9, rINST, lsr #8 @ r9<- AA 7356 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7357 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7358 cmp r2, #0 @ is resolved entry null? 7359 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7360.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7361 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7362 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7363 GET_INST_OPCODE(r10) @ extract opcode from rINST 7364 .if 1 7365 add r2, r2, #offStaticField_value @ r2<- pointer to data 7366 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 7367 .else 7368 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7369 .endif 7370 GOTO_OPCODE(r10) @ jump to next instruction 7371 7372 7373/* ------------------------------ */ 7374 .balign 64 7375.L_OP_BREAKPOINT: /* 0xec */ 7376/* File: armv5te/OP_BREAKPOINT.S */ 7377/* File: armv5te/unused.S */ 7378 bl common_abort 7379 7380 7381 7382/* ------------------------------ */ 7383 .balign 64 7384.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7385/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7386 /* 7387 * Handle a throw-verification-error instruction. This throws an 7388 * exception for an error discovered during verification. The 7389 * exception is indicated by AA, with some detail provided by BBBB. 7390 */ 7391 /* op AA, ref@BBBB */ 7392 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7393 FETCH(r2, 1) @ r2<- BBBB 7394 EXPORT_PC() @ export the PC 7395 mov r1, rINST, lsr #8 @ r1<- AA 7396 bl dvmThrowVerificationError @ always throws 7397 b common_exceptionThrown @ handle exception 7398 7399 7400/* ------------------------------ */ 7401 .balign 64 7402.L_OP_EXECUTE_INLINE: /* 0xee */ 7403/* File: armv5te/OP_EXECUTE_INLINE.S */ 7404 /* 7405 * Execute a "native inline" instruction. 7406 * 7407 * We need to call an InlineOp4Func: 7408 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7409 * 7410 * The first four args are in r0-r3, pointer to return value storage 7411 * is on the stack. The function's return value is a flag that tells 7412 * us if an exception was thrown. 7413 */ 7414 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7415 FETCH(r10, 1) @ r10<- BBBB 7416 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7417 EXPORT_PC() @ can throw 7418 sub sp, sp, #8 @ make room for arg, +64 bit align 7419 mov r0, rINST, lsr #12 @ r0<- B 7420 str r1, [sp] @ push &glue->retval 7421 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7422 add sp, sp, #8 @ pop stack 7423 cmp r0, #0 @ test boolean result of inline 7424 beq common_exceptionThrown @ returned false, handle exception 7425 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7426 GET_INST_OPCODE(ip) @ extract opcode from rINST 7427 GOTO_OPCODE(ip) @ jump to next instruction 7428 7429/* ------------------------------ */ 7430 .balign 64 7431.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7432/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7433 /* 7434 * Execute a "native inline" instruction, using "/range" semantics. 7435 * Same idea as execute-inline, but we get the args differently. 7436 * 7437 * We need to call an InlineOp4Func: 7438 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7439 * 7440 * The first four args are in r0-r3, pointer to return value storage 7441 * is on the stack. The function's return value is a flag that tells 7442 * us if an exception was thrown. 7443 */ 7444 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7445 FETCH(r10, 1) @ r10<- BBBB 7446 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7447 EXPORT_PC() @ can throw 7448 sub sp, sp, #8 @ make room for arg, +64 bit align 7449 mov r0, rINST, lsr #8 @ r0<- AA 7450 str r1, [sp] @ push &glue->retval 7451 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7452 add sp, sp, #8 @ pop stack 7453 cmp r0, #0 @ test boolean result of inline 7454 beq common_exceptionThrown @ returned false, handle exception 7455 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7456 GET_INST_OPCODE(ip) @ extract opcode from rINST 7457 GOTO_OPCODE(ip) @ jump to next instruction 7458 7459/* ------------------------------ */ 7460 .balign 64 7461.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7462/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7463 /* 7464 * invoke-direct-empty is a no-op in a "standard" interpreter. 7465 */ 7466 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7467 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7468 GOTO_OPCODE(ip) @ execute it 7469 7470/* ------------------------------ */ 7471 .balign 64 7472.L_OP_UNUSED_F1: /* 0xf1 */ 7473/* File: armv5te/OP_UNUSED_F1.S */ 7474/* File: armv5te/unused.S */ 7475 bl common_abort 7476 7477 7478 7479/* ------------------------------ */ 7480 .balign 64 7481.L_OP_IGET_QUICK: /* 0xf2 */ 7482/* File: armv5te/OP_IGET_QUICK.S */ 7483 /* For: iget-quick, iget-object-quick */ 7484 /* op vA, vB, offset@CCCC */ 7485 mov r2, rINST, lsr #12 @ r2<- B 7486 GET_VREG(r3, r2) @ r3<- object we're operating on 7487 FETCH(r1, 1) @ r1<- field byte offset 7488 cmp r3, #0 @ check object for null 7489 mov r2, rINST, lsr #8 @ r2<- A(+) 7490 beq common_errNullObject @ object was null 7491 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7492 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7493 and r2, r2, #15 7494 GET_INST_OPCODE(ip) @ extract opcode from rINST 7495 SET_VREG(r0, r2) @ fp[A]<- r0 7496 GOTO_OPCODE(ip) @ jump to next instruction 7497 7498 7499/* ------------------------------ */ 7500 .balign 64 7501.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7502/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7503 /* iget-wide-quick vA, vB, offset@CCCC */ 7504 mov r2, rINST, lsr #12 @ r2<- B 7505 GET_VREG(r3, r2) @ r3<- object we're operating on 7506 FETCH(r1, 1) @ r1<- field byte offset 7507 cmp r3, #0 @ check object for null 7508 mov r2, rINST, lsr #8 @ r2<- A(+) 7509 beq common_errNullObject @ object was null 7510 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned) 7511 and r2, r2, #15 7512 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7513 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7514 GET_INST_OPCODE(ip) @ extract opcode from rINST 7515 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7516 GOTO_OPCODE(ip) @ jump to next instruction 7517 7518 7519/* ------------------------------ */ 7520 .balign 64 7521.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7522/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7523/* File: armv5te/OP_IGET_QUICK.S */ 7524 /* For: iget-quick, iget-object-quick */ 7525 /* op vA, vB, offset@CCCC */ 7526 mov r2, rINST, lsr #12 @ r2<- B 7527 GET_VREG(r3, r2) @ r3<- object we're operating on 7528 FETCH(r1, 1) @ r1<- field byte offset 7529 cmp r3, #0 @ check object for null 7530 mov r2, rINST, lsr #8 @ r2<- A(+) 7531 beq common_errNullObject @ object was null 7532 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7533 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7534 and r2, r2, #15 7535 GET_INST_OPCODE(ip) @ extract opcode from rINST 7536 SET_VREG(r0, r2) @ fp[A]<- r0 7537 GOTO_OPCODE(ip) @ jump to next instruction 7538 7539 7540 7541/* ------------------------------ */ 7542 .balign 64 7543.L_OP_IPUT_QUICK: /* 0xf5 */ 7544/* File: armv5te/OP_IPUT_QUICK.S */ 7545 /* For: iput-quick, iput-object-quick */ 7546 /* op vA, vB, offset@CCCC */ 7547 mov r2, rINST, lsr #12 @ r2<- B 7548 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7549 FETCH(r1, 1) @ r1<- field byte offset 7550 cmp r3, #0 @ check object for null 7551 mov r2, rINST, lsr #8 @ r2<- A(+) 7552 beq common_errNullObject @ object was null 7553 and r2, r2, #15 7554 GET_VREG(r0, r2) @ r0<- fp[A] 7555 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7556 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7557 GET_INST_OPCODE(ip) @ extract opcode from rINST 7558 GOTO_OPCODE(ip) @ jump to next instruction 7559 7560 7561/* ------------------------------ */ 7562 .balign 64 7563.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7564/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7565 /* iput-wide-quick vA, vB, offset@CCCC */ 7566 mov r0, rINST, lsr #8 @ r0<- A(+) 7567 mov r1, rINST, lsr #12 @ r1<- B 7568 and r0, r0, #15 7569 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7570 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7571 cmp r2, #0 @ check object for null 7572 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7573 beq common_errNullObject @ object was null 7574 FETCH(r3, 1) @ r3<- field byte offset 7575 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7576 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7577 GET_INST_OPCODE(ip) @ extract opcode from rINST 7578 GOTO_OPCODE(ip) @ jump to next instruction 7579 7580 7581/* ------------------------------ */ 7582 .balign 64 7583.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7584/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7585/* File: armv5te/OP_IPUT_QUICK.S */ 7586 /* For: iput-quick, iput-object-quick */ 7587 /* op vA, vB, offset@CCCC */ 7588 mov r2, rINST, lsr #12 @ r2<- B 7589 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7590 FETCH(r1, 1) @ r1<- field byte offset 7591 cmp r3, #0 @ check object for null 7592 mov r2, rINST, lsr #8 @ r2<- A(+) 7593 beq common_errNullObject @ object was null 7594 and r2, r2, #15 7595 GET_VREG(r0, r2) @ r0<- fp[A] 7596 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7597 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7598 GET_INST_OPCODE(ip) @ extract opcode from rINST 7599 GOTO_OPCODE(ip) @ jump to next instruction 7600 7601 7602 7603/* ------------------------------ */ 7604 .balign 64 7605.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7606/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7607 /* 7608 * Handle an optimized virtual method call. 7609 * 7610 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7611 */ 7612 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7613 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7614 FETCH(r3, 2) @ r3<- FEDC or CCCC 7615 FETCH(r1, 1) @ r1<- BBBB 7616 .if (!0) 7617 and r3, r3, #15 @ r3<- C (or stays CCCC) 7618 .endif 7619 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7620 cmp r2, #0 @ is "this" null? 7621 beq common_errNullObject @ null "this", throw exception 7622 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7623 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7624 EXPORT_PC() @ invoke must export 7625 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7626 bl common_invokeMethodNoRange @ continue on 7627 7628/* ------------------------------ */ 7629 .balign 64 7630.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7631/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7632/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7633 /* 7634 * Handle an optimized virtual method call. 7635 * 7636 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7637 */ 7638 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7639 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7640 FETCH(r3, 2) @ r3<- FEDC or CCCC 7641 FETCH(r1, 1) @ r1<- BBBB 7642 .if (!1) 7643 and r3, r3, #15 @ r3<- C (or stays CCCC) 7644 .endif 7645 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7646 cmp r2, #0 @ is "this" null? 7647 beq common_errNullObject @ null "this", throw exception 7648 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7649 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7650 EXPORT_PC() @ invoke must export 7651 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7652 bl common_invokeMethodRange @ continue on 7653 7654 7655/* ------------------------------ */ 7656 .balign 64 7657.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7658/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7659 /* 7660 * Handle an optimized "super" method call. 7661 * 7662 * for: [opt] invoke-super-quick, invoke-super-quick/range 7663 */ 7664 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7665 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7666 FETCH(r10, 2) @ r10<- GFED or CCCC 7667 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7668 .if (!0) 7669 and r10, r10, #15 @ r10<- D (or stays CCCC) 7670 .endif 7671 FETCH(r1, 1) @ r1<- BBBB 7672 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7673 EXPORT_PC() @ must export for invoke 7674 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7675 GET_VREG(r3, r10) @ r3<- "this" 7676 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7677 cmp r3, #0 @ null "this" ref? 7678 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7679 beq common_errNullObject @ "this" is null, throw exception 7680 bl common_invokeMethodNoRange @ continue on 7681 7682 7683/* ------------------------------ */ 7684 .balign 64 7685.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7686/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7687/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7688 /* 7689 * Handle an optimized "super" method call. 7690 * 7691 * for: [opt] invoke-super-quick, invoke-super-quick/range 7692 */ 7693 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7694 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7695 FETCH(r10, 2) @ r10<- GFED or CCCC 7696 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7697 .if (!1) 7698 and r10, r10, #15 @ r10<- D (or stays CCCC) 7699 .endif 7700 FETCH(r1, 1) @ r1<- BBBB 7701 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7702 EXPORT_PC() @ must export for invoke 7703 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7704 GET_VREG(r3, r10) @ r3<- "this" 7705 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7706 cmp r3, #0 @ null "this" ref? 7707 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7708 beq common_errNullObject @ "this" is null, throw exception 7709 bl common_invokeMethodRange @ continue on 7710 7711 7712 7713/* ------------------------------ */ 7714 .balign 64 7715.L_OP_UNUSED_FC: /* 0xfc */ 7716/* File: armv5te/OP_UNUSED_FC.S */ 7717/* File: armv5te/unused.S */ 7718 bl common_abort 7719 7720 7721 7722/* ------------------------------ */ 7723 .balign 64 7724.L_OP_UNUSED_FD: /* 0xfd */ 7725/* File: armv5te/OP_UNUSED_FD.S */ 7726/* File: armv5te/unused.S */ 7727 bl common_abort 7728 7729 7730 7731/* ------------------------------ */ 7732 .balign 64 7733.L_OP_UNUSED_FE: /* 0xfe */ 7734/* File: armv5te/OP_UNUSED_FE.S */ 7735/* File: armv5te/unused.S */ 7736 bl common_abort 7737 7738 7739 7740/* ------------------------------ */ 7741 .balign 64 7742.L_OP_UNUSED_FF: /* 0xff */ 7743/* File: armv5te/OP_UNUSED_FF.S */ 7744/* File: armv5te/unused.S */ 7745 bl common_abort 7746 7747 7748 7749 7750 .balign 64 7751 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7752 .global dvmAsmInstructionEnd 7753dvmAsmInstructionEnd: 7754 7755/* 7756 * =========================================================================== 7757 * Sister implementations 7758 * =========================================================================== 7759 */ 7760 .global dvmAsmSisterStart 7761 .type dvmAsmSisterStart, %function 7762 .text 7763 .balign 4 7764dvmAsmSisterStart: 7765 7766/* continuation for OP_CONST_STRING */ 7767 7768 /* 7769 * Continuation if the String has not yet been resolved. 7770 * r1: BBBB (String ref) 7771 * r9: target register 7772 */ 7773.LOP_CONST_STRING_resolve: 7774 EXPORT_PC() 7775 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7776 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7777 bl dvmResolveString @ r0<- String reference 7778 cmp r0, #0 @ failed? 7779 beq common_exceptionThrown @ yup, handle the exception 7780 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7781 GET_INST_OPCODE(ip) @ extract opcode from rINST 7782 SET_VREG(r0, r9) @ vAA<- r0 7783 GOTO_OPCODE(ip) @ jump to next instruction 7784 7785 7786/* continuation for OP_CONST_STRING_JUMBO */ 7787 7788 /* 7789 * Continuation if the String has not yet been resolved. 7790 * r1: BBBBBBBB (String ref) 7791 * r9: target register 7792 */ 7793.LOP_CONST_STRING_JUMBO_resolve: 7794 EXPORT_PC() 7795 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7796 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7797 bl dvmResolveString @ r0<- String reference 7798 cmp r0, #0 @ failed? 7799 beq common_exceptionThrown @ yup, handle the exception 7800 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7801 GET_INST_OPCODE(ip) @ extract opcode from rINST 7802 SET_VREG(r0, r9) @ vAA<- r0 7803 GOTO_OPCODE(ip) @ jump to next instruction 7804 7805 7806/* continuation for OP_CONST_CLASS */ 7807 7808 /* 7809 * Continuation if the Class has not yet been resolved. 7810 * r1: BBBB (Class ref) 7811 * r9: target register 7812 */ 7813.LOP_CONST_CLASS_resolve: 7814 EXPORT_PC() 7815 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7816 mov r2, #1 @ r2<- true 7817 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7818 bl dvmResolveClass @ r0<- Class reference 7819 cmp r0, #0 @ failed? 7820 beq common_exceptionThrown @ yup, handle the exception 7821 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7822 GET_INST_OPCODE(ip) @ extract opcode from rINST 7823 SET_VREG(r0, r9) @ vAA<- r0 7824 GOTO_OPCODE(ip) @ jump to next instruction 7825 7826 7827/* continuation for OP_CHECK_CAST */ 7828 7829 /* 7830 * Trivial test failed, need to perform full check. This is common. 7831 * r0 holds obj->clazz 7832 * r1 holds class resolved from BBBB 7833 * r9 holds object 7834 */ 7835.LOP_CHECK_CAST_fullcheck: 7836 bl dvmInstanceofNonTrivial @ r0<- boolean result 7837 cmp r0, #0 @ failed? 7838 bne .LOP_CHECK_CAST_okay @ no, success 7839 7840 @ A cast has failed. We need to throw a ClassCastException with the 7841 @ class of the object that failed to be cast. 7842 EXPORT_PC() @ about to throw 7843 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7844 ldr r0, .LstrClassCastExceptionPtr 7845 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7846 bl dvmThrowExceptionWithClassMessage 7847 b common_exceptionThrown 7848 7849 /* 7850 * Resolution required. This is the least-likely path. 7851 * 7852 * r2 holds BBBB 7853 * r9 holds object 7854 */ 7855.LOP_CHECK_CAST_resolve: 7856 EXPORT_PC() @ resolve() could throw 7857 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7858 mov r1, r2 @ r1<- BBBB 7859 mov r2, #0 @ r2<- false 7860 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7861 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7862 cmp r0, #0 @ got null? 7863 beq common_exceptionThrown @ yes, handle exception 7864 mov r1, r0 @ r1<- class resolved from BBB 7865 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7866 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7867 7868.LstrClassCastExceptionPtr: 7869 .word .LstrClassCastException 7870 7871 7872/* continuation for OP_INSTANCE_OF */ 7873 7874 /* 7875 * Trivial test failed, need to perform full check. This is common. 7876 * r0 holds obj->clazz 7877 * r1 holds class resolved from BBBB 7878 * r9 holds A 7879 */ 7880.LOP_INSTANCE_OF_fullcheck: 7881 bl dvmInstanceofNonTrivial @ r0<- boolean result 7882 @ fall through to OP_INSTANCE_OF_store 7883 7884 /* 7885 * r0 holds boolean result 7886 * r9 holds A 7887 */ 7888.LOP_INSTANCE_OF_store: 7889 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7890 SET_VREG(r0, r9) @ vA<- r0 7891 GET_INST_OPCODE(ip) @ extract opcode from rINST 7892 GOTO_OPCODE(ip) @ jump to next instruction 7893 7894 /* 7895 * Trivial test succeeded, save and bail. 7896 * r9 holds A 7897 */ 7898.LOP_INSTANCE_OF_trivial: 7899 mov r0, #1 @ indicate success 7900 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7901 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7902 SET_VREG(r0, r9) @ vA<- r0 7903 GET_INST_OPCODE(ip) @ extract opcode from rINST 7904 GOTO_OPCODE(ip) @ jump to next instruction 7905 7906 /* 7907 * Resolution required. This is the least-likely path. 7908 * 7909 * r3 holds BBBB 7910 * r9 holds A 7911 */ 7912.LOP_INSTANCE_OF_resolve: 7913 EXPORT_PC() @ resolve() could throw 7914 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7915 mov r1, r3 @ r1<- BBBB 7916 mov r2, #1 @ r2<- true 7917 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7918 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7919 cmp r0, #0 @ got null? 7920 beq common_exceptionThrown @ yes, handle exception 7921 mov r1, r0 @ r1<- class resolved from BBB 7922 mov r3, rINST, lsr #12 @ r3<- B 7923 GET_VREG(r0, r3) @ r0<- vB (object) 7924 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7925 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7926 7927 7928/* continuation for OP_NEW_INSTANCE */ 7929 7930 .balign 32 @ minimize cache lines 7931.LOP_NEW_INSTANCE_finish: @ r0=new object 7932 mov r3, rINST, lsr #8 @ r3<- AA 7933 cmp r0, #0 @ failed? 7934 beq common_exceptionThrown @ yes, handle the exception 7935 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7936 GET_INST_OPCODE(ip) @ extract opcode from rINST 7937 SET_VREG(r0, r3) @ vAA<- r0 7938 GOTO_OPCODE(ip) @ jump to next instruction 7939 7940 /* 7941 * Class initialization required. 7942 * 7943 * r0 holds class object 7944 */ 7945.LOP_NEW_INSTANCE_needinit: 7946 mov r9, r0 @ save r0 7947 bl dvmInitClass @ initialize class 7948 cmp r0, #0 @ check boolean result 7949 mov r0, r9 @ restore r0 7950 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7951 b common_exceptionThrown @ failed, deal with init exception 7952 7953 /* 7954 * Resolution required. This is the least-likely path. 7955 * 7956 * r1 holds BBBB 7957 */ 7958.LOP_NEW_INSTANCE_resolve: 7959 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7960 mov r2, #0 @ r2<- false 7961 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7962 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7963 cmp r0, #0 @ got null? 7964 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7965 b common_exceptionThrown @ yes, handle exception 7966 7967.LstrInstantiationErrorPtr: 7968 .word .LstrInstantiationError 7969 7970 7971/* continuation for OP_NEW_ARRAY */ 7972 7973 7974 /* 7975 * Resolve class. (This is an uncommon case.) 7976 * 7977 * r1 holds array length 7978 * r2 holds class ref CCCC 7979 */ 7980.LOP_NEW_ARRAY_resolve: 7981 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7982 mov r9, r1 @ r9<- length (save) 7983 mov r1, r2 @ r1<- CCCC 7984 mov r2, #0 @ r2<- false 7985 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7986 bl dvmResolveClass @ r0<- call(clazz, ref) 7987 cmp r0, #0 @ got null? 7988 mov r1, r9 @ r1<- length (restore) 7989 beq common_exceptionThrown @ yes, handle exception 7990 @ fall through to OP_NEW_ARRAY_finish 7991 7992 /* 7993 * Finish allocation. 7994 * 7995 * r0 holds class 7996 * r1 holds array length 7997 */ 7998.LOP_NEW_ARRAY_finish: 7999 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8000 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8001 cmp r0, #0 @ failed? 8002 mov r2, rINST, lsr #8 @ r2<- A+ 8003 beq common_exceptionThrown @ yes, handle the exception 8004 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8005 and r2, r2, #15 @ r2<- A 8006 GET_INST_OPCODE(ip) @ extract opcode from rINST 8007 SET_VREG(r0, r2) @ vA<- r0 8008 GOTO_OPCODE(ip) @ jump to next instruction 8009 8010 8011/* continuation for OP_FILLED_NEW_ARRAY */ 8012 8013 /* 8014 * On entry: 8015 * r0 holds array class 8016 * r10 holds AA or BA 8017 */ 8018.LOP_FILLED_NEW_ARRAY_continue: 8019 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8020 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8021 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8022 .if 0 8023 mov r1, r10 @ r1<- AA (length) 8024 .else 8025 mov r1, r10, lsr #4 @ r1<- B (length) 8026 .endif 8027 cmp r3, #'I' @ array of ints? 8028 cmpne r3, #'L' @ array of objects? 8029 cmpne r3, #'[' @ array of arrays? 8030 mov r9, r1 @ save length in r9 8031 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8032 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8033 cmp r0, #0 @ null return? 8034 beq common_exceptionThrown @ alloc failed, handle exception 8035 8036 FETCH(r1, 2) @ r1<- FEDC or CCCC 8037 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8038 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8039 subs r9, r9, #1 @ length--, check for neg 8040 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8041 bmi 2f @ was zero, bail 8042 8043 @ copy values from registers into the array 8044 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8045 .if 0 8046 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 80471: ldr r3, [r2], #4 @ r3<- *r2++ 8048 subs r9, r9, #1 @ count-- 8049 str r3, [r0], #4 @ *contents++ = vX 8050 bpl 1b 8051 @ continue at 2 8052 .else 8053 cmp r9, #4 @ length was initially 5? 8054 and r2, r10, #15 @ r2<- A 8055 bne 1f @ <= 4 args, branch 8056 GET_VREG(r3, r2) @ r3<- vA 8057 sub r9, r9, #1 @ count-- 8058 str r3, [r0, #16] @ contents[4] = vA 80591: and r2, r1, #15 @ r2<- F/E/D/C 8060 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8061 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8062 subs r9, r9, #1 @ count-- 8063 str r3, [r0], #4 @ *contents++ = vX 8064 bpl 1b 8065 @ continue at 2 8066 .endif 8067 80682: 8069 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8070 GOTO_OPCODE(ip) @ execute it 8071 8072 /* 8073 * Throw an exception indicating that we have not implemented this 8074 * mode of filled-new-array. 8075 */ 8076.LOP_FILLED_NEW_ARRAY_notimpl: 8077 ldr r0, .L_strInternalError 8078 ldr r1, .L_strFilledNewArrayNotImpl 8079 bl dvmThrowException 8080 b common_exceptionThrown 8081 8082 .if (!0) @ define in one or the other, not both 8083.L_strFilledNewArrayNotImpl: 8084 .word .LstrFilledNewArrayNotImpl 8085.L_strInternalError: 8086 .word .LstrInternalError 8087 .endif 8088 8089 8090/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8091 8092 /* 8093 * On entry: 8094 * r0 holds array class 8095 * r10 holds AA or BA 8096 */ 8097.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8098 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8099 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8100 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8101 .if 1 8102 mov r1, r10 @ r1<- AA (length) 8103 .else 8104 mov r1, r10, lsr #4 @ r1<- B (length) 8105 .endif 8106 cmp r3, #'I' @ array of ints? 8107 cmpne r3, #'L' @ array of objects? 8108 cmpne r3, #'[' @ array of arrays? 8109 mov r9, r1 @ save length in r9 8110 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8111 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8112 cmp r0, #0 @ null return? 8113 beq common_exceptionThrown @ alloc failed, handle exception 8114 8115 FETCH(r1, 2) @ r1<- FEDC or CCCC 8116 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8117 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8118 subs r9, r9, #1 @ length--, check for neg 8119 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8120 bmi 2f @ was zero, bail 8121 8122 @ copy values from registers into the array 8123 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8124 .if 1 8125 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 81261: ldr r3, [r2], #4 @ r3<- *r2++ 8127 subs r9, r9, #1 @ count-- 8128 str r3, [r0], #4 @ *contents++ = vX 8129 bpl 1b 8130 @ continue at 2 8131 .else 8132 cmp r9, #4 @ length was initially 5? 8133 and r2, r10, #15 @ r2<- A 8134 bne 1f @ <= 4 args, branch 8135 GET_VREG(r3, r2) @ r3<- vA 8136 sub r9, r9, #1 @ count-- 8137 str r3, [r0, #16] @ contents[4] = vA 81381: and r2, r1, #15 @ r2<- F/E/D/C 8139 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8140 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8141 subs r9, r9, #1 @ count-- 8142 str r3, [r0], #4 @ *contents++ = vX 8143 bpl 1b 8144 @ continue at 2 8145 .endif 8146 81472: 8148 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8149 GOTO_OPCODE(ip) @ execute it 8150 8151 /* 8152 * Throw an exception indicating that we have not implemented this 8153 * mode of filled-new-array. 8154 */ 8155.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8156 ldr r0, .L_strInternalError 8157 ldr r1, .L_strFilledNewArrayNotImpl 8158 bl dvmThrowException 8159 b common_exceptionThrown 8160 8161 .if (!1) @ define in one or the other, not both 8162.L_strFilledNewArrayNotImpl: 8163 .word .LstrFilledNewArrayNotImpl 8164.L_strInternalError: 8165 .word .LstrInternalError 8166 .endif 8167 8168 8169/* continuation for OP_CMPL_FLOAT */ 8170.LOP_CMPL_FLOAT_finish: 8171 SET_VREG(r0, r9) @ vAA<- r0 8172 GOTO_OPCODE(ip) @ jump to next instruction 8173 8174 8175/* continuation for OP_CMPG_FLOAT */ 8176.LOP_CMPG_FLOAT_finish: 8177 SET_VREG(r0, r9) @ vAA<- r0 8178 GOTO_OPCODE(ip) @ jump to next instruction 8179 8180 8181/* continuation for OP_CMPL_DOUBLE */ 8182.LOP_CMPL_DOUBLE_finish: 8183 SET_VREG(r0, r9) @ vAA<- r0 8184 GOTO_OPCODE(ip) @ jump to next instruction 8185 8186 8187/* continuation for OP_CMPG_DOUBLE */ 8188.LOP_CMPG_DOUBLE_finish: 8189 SET_VREG(r0, r9) @ vAA<- r0 8190 GOTO_OPCODE(ip) @ jump to next instruction 8191 8192 8193/* continuation for OP_CMP_LONG */ 8194 8195.LOP_CMP_LONG_less: 8196 mvn r1, #0 @ r1<- -1 8197 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8198 @ instead, we just replicate the tail end. 8199 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8200 SET_VREG(r1, r9) @ vAA<- r1 8201 GET_INST_OPCODE(ip) @ extract opcode from rINST 8202 GOTO_OPCODE(ip) @ jump to next instruction 8203 8204.LOP_CMP_LONG_greater: 8205 mov r1, #1 @ r1<- 1 8206 @ fall through to _finish 8207 8208.LOP_CMP_LONG_finish: 8209 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8210 SET_VREG(r1, r9) @ vAA<- r1 8211 GET_INST_OPCODE(ip) @ extract opcode from rINST 8212 GOTO_OPCODE(ip) @ jump to next instruction 8213 8214 8215/* continuation for OP_AGET_WIDE */ 8216 8217.LOP_AGET_WIDE_finish: 8218 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8219 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8220 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8221 GET_INST_OPCODE(ip) @ extract opcode from rINST 8222 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8223 GOTO_OPCODE(ip) @ jump to next instruction 8224 8225 8226/* continuation for OP_APUT_WIDE */ 8227 8228.LOP_APUT_WIDE_finish: 8229 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8230 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8231 GET_INST_OPCODE(ip) @ extract opcode from rINST 8232 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8233 GOTO_OPCODE(ip) @ jump to next instruction 8234 8235 8236/* continuation for OP_APUT_OBJECT */ 8237 /* 8238 * On entry: 8239 * r1 = vBB (arrayObj) 8240 * r9 = vAA (obj) 8241 * r10 = offset into array (vBB + vCC * width) 8242 */ 8243.LOP_APUT_OBJECT_finish: 8244 cmp r9, #0 @ storing null reference? 8245 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8246 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8247 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8248 bl dvmCanPutArrayElement @ test object type vs. array type 8249 cmp r0, #0 @ okay? 8250 beq common_errArrayStore @ no 8251.LOP_APUT_OBJECT_skip_check: 8252 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8253 GET_INST_OPCODE(ip) @ extract opcode from rINST 8254 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8255 GOTO_OPCODE(ip) @ jump to next instruction 8256 8257 8258/* continuation for OP_IGET */ 8259 8260 /* 8261 * Currently: 8262 * r0 holds resolved field 8263 * r9 holds object 8264 */ 8265.LOP_IGET_finish: 8266 @bl common_squeak0 8267 cmp r9, #0 @ check object for null 8268 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8269 beq common_errNullObject @ object was null 8270 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8271 mov r2, rINST, lsr #8 @ r2<- A+ 8272 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8273 and r2, r2, #15 @ r2<- A 8274 GET_INST_OPCODE(ip) @ extract opcode from rINST 8275 SET_VREG(r0, r2) @ fp[A]<- r0 8276 GOTO_OPCODE(ip) @ jump to next instruction 8277 8278 8279/* continuation for OP_IGET_WIDE */ 8280 8281 /* 8282 * Currently: 8283 * r0 holds resolved field 8284 * r9 holds object 8285 */ 8286.LOP_IGET_WIDE_finish: 8287 cmp r9, #0 @ check object for null 8288 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8289 beq common_errNullObject @ object was null 8290 .if 0 8291 add r0, r9, r3 @ r0<- address of field 8292 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 8293 .else 8294 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8295 .endif 8296 mov r2, rINST, lsr #8 @ r2<- A+ 8297 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8298 and r2, r2, #15 @ r2<- A 8299 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8300 GET_INST_OPCODE(ip) @ extract opcode from rINST 8301 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8302 GOTO_OPCODE(ip) @ jump to next instruction 8303 8304 8305/* continuation for OP_IGET_OBJECT */ 8306 8307 /* 8308 * Currently: 8309 * r0 holds resolved field 8310 * r9 holds object 8311 */ 8312.LOP_IGET_OBJECT_finish: 8313 @bl common_squeak0 8314 cmp r9, #0 @ check object for null 8315 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8316 beq common_errNullObject @ object was null 8317 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8318 mov r2, rINST, lsr #8 @ r2<- A+ 8319 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8320 and r2, r2, #15 @ r2<- A 8321 GET_INST_OPCODE(ip) @ extract opcode from rINST 8322 SET_VREG(r0, r2) @ fp[A]<- r0 8323 GOTO_OPCODE(ip) @ jump to next instruction 8324 8325 8326/* continuation for OP_IGET_BOOLEAN */ 8327 8328 /* 8329 * Currently: 8330 * r0 holds resolved field 8331 * r9 holds object 8332 */ 8333.LOP_IGET_BOOLEAN_finish: 8334 @bl common_squeak1 8335 cmp r9, #0 @ check object for null 8336 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8337 beq common_errNullObject @ object was null 8338 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8339 mov r2, rINST, lsr #8 @ r2<- A+ 8340 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8341 and r2, r2, #15 @ r2<- A 8342 GET_INST_OPCODE(ip) @ extract opcode from rINST 8343 SET_VREG(r0, r2) @ fp[A]<- r0 8344 GOTO_OPCODE(ip) @ jump to next instruction 8345 8346 8347/* continuation for OP_IGET_BYTE */ 8348 8349 /* 8350 * Currently: 8351 * r0 holds resolved field 8352 * r9 holds object 8353 */ 8354.LOP_IGET_BYTE_finish: 8355 @bl common_squeak2 8356 cmp r9, #0 @ check object for null 8357 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8358 beq common_errNullObject @ object was null 8359 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8360 mov r2, rINST, lsr #8 @ r2<- A+ 8361 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8362 and r2, r2, #15 @ r2<- A 8363 GET_INST_OPCODE(ip) @ extract opcode from rINST 8364 SET_VREG(r0, r2) @ fp[A]<- r0 8365 GOTO_OPCODE(ip) @ jump to next instruction 8366 8367 8368/* continuation for OP_IGET_CHAR */ 8369 8370 /* 8371 * Currently: 8372 * r0 holds resolved field 8373 * r9 holds object 8374 */ 8375.LOP_IGET_CHAR_finish: 8376 @bl common_squeak3 8377 cmp r9, #0 @ check object for null 8378 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8379 beq common_errNullObject @ object was null 8380 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8381 mov r2, rINST, lsr #8 @ r2<- A+ 8382 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8383 and r2, r2, #15 @ r2<- A 8384 GET_INST_OPCODE(ip) @ extract opcode from rINST 8385 SET_VREG(r0, r2) @ fp[A]<- r0 8386 GOTO_OPCODE(ip) @ jump to next instruction 8387 8388 8389/* continuation for OP_IGET_SHORT */ 8390 8391 /* 8392 * Currently: 8393 * r0 holds resolved field 8394 * r9 holds object 8395 */ 8396.LOP_IGET_SHORT_finish: 8397 @bl common_squeak4 8398 cmp r9, #0 @ check object for null 8399 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8400 beq common_errNullObject @ object was null 8401 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8402 mov r2, rINST, lsr #8 @ r2<- A+ 8403 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8404 and r2, r2, #15 @ r2<- A 8405 GET_INST_OPCODE(ip) @ extract opcode from rINST 8406 SET_VREG(r0, r2) @ fp[A]<- r0 8407 GOTO_OPCODE(ip) @ jump to next instruction 8408 8409 8410/* continuation for OP_IPUT */ 8411 8412 /* 8413 * Currently: 8414 * r0 holds resolved field 8415 * r9 holds object 8416 */ 8417.LOP_IPUT_finish: 8418 @bl common_squeak0 8419 mov r1, rINST, lsr #8 @ r1<- A+ 8420 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8421 and r1, r1, #15 @ r1<- A 8422 cmp r9, #0 @ check object for null 8423 GET_VREG(r0, r1) @ r0<- fp[A] 8424 beq common_errNullObject @ object was null 8425 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8426 GET_INST_OPCODE(ip) @ extract opcode from rINST 8427 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8428 GOTO_OPCODE(ip) @ jump to next instruction 8429 8430 8431/* continuation for OP_IPUT_WIDE */ 8432 8433 /* 8434 * Currently: 8435 * r0 holds resolved field 8436 * r9 holds object 8437 */ 8438.LOP_IPUT_WIDE_finish: 8439 mov r2, rINST, lsr #8 @ r2<- A+ 8440 cmp r9, #0 @ check object for null 8441 and r2, r2, #15 @ r2<- A 8442 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8443 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8444 beq common_errNullObject @ object was null 8445 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8446 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8447 GET_INST_OPCODE(r10) @ extract opcode from rINST 8448 .if 0 8449 add r2, r9, r3 @ r2<- target address 8450 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 8451 .else 8452 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 8453 .endif 8454 GOTO_OPCODE(r10) @ jump to next instruction 8455 8456 8457/* continuation for OP_IPUT_OBJECT */ 8458 8459 /* 8460 * Currently: 8461 * r0 holds resolved field 8462 * r9 holds object 8463 */ 8464.LOP_IPUT_OBJECT_finish: 8465 @bl common_squeak0 8466 mov r1, rINST, lsr #8 @ r1<- A+ 8467 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8468 and r1, r1, #15 @ r1<- A 8469 cmp r9, #0 @ check object for null 8470 GET_VREG(r0, r1) @ r0<- fp[A] 8471 beq common_errNullObject @ object was null 8472 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8473 GET_INST_OPCODE(ip) @ extract opcode from rINST 8474 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8475 GOTO_OPCODE(ip) @ jump to next instruction 8476 8477 8478/* continuation for OP_IPUT_BOOLEAN */ 8479 8480 /* 8481 * Currently: 8482 * r0 holds resolved field 8483 * r9 holds object 8484 */ 8485.LOP_IPUT_BOOLEAN_finish: 8486 @bl common_squeak1 8487 mov r1, rINST, lsr #8 @ r1<- A+ 8488 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8489 and r1, r1, #15 @ r1<- A 8490 cmp r9, #0 @ check object for null 8491 GET_VREG(r0, r1) @ r0<- fp[A] 8492 beq common_errNullObject @ object was null 8493 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8494 GET_INST_OPCODE(ip) @ extract opcode from rINST 8495 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8496 GOTO_OPCODE(ip) @ jump to next instruction 8497 8498 8499/* continuation for OP_IPUT_BYTE */ 8500 8501 /* 8502 * Currently: 8503 * r0 holds resolved field 8504 * r9 holds object 8505 */ 8506.LOP_IPUT_BYTE_finish: 8507 @bl common_squeak2 8508 mov r1, rINST, lsr #8 @ r1<- A+ 8509 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8510 and r1, r1, #15 @ r1<- A 8511 cmp r9, #0 @ check object for null 8512 GET_VREG(r0, r1) @ r0<- fp[A] 8513 beq common_errNullObject @ object was null 8514 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8515 GET_INST_OPCODE(ip) @ extract opcode from rINST 8516 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8517 GOTO_OPCODE(ip) @ jump to next instruction 8518 8519 8520/* continuation for OP_IPUT_CHAR */ 8521 8522 /* 8523 * Currently: 8524 * r0 holds resolved field 8525 * r9 holds object 8526 */ 8527.LOP_IPUT_CHAR_finish: 8528 @bl common_squeak3 8529 mov r1, rINST, lsr #8 @ r1<- A+ 8530 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8531 and r1, r1, #15 @ r1<- A 8532 cmp r9, #0 @ check object for null 8533 GET_VREG(r0, r1) @ r0<- fp[A] 8534 beq common_errNullObject @ object was null 8535 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8536 GET_INST_OPCODE(ip) @ extract opcode from rINST 8537 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8538 GOTO_OPCODE(ip) @ jump to next instruction 8539 8540 8541/* continuation for OP_IPUT_SHORT */ 8542 8543 /* 8544 * Currently: 8545 * r0 holds resolved field 8546 * r9 holds object 8547 */ 8548.LOP_IPUT_SHORT_finish: 8549 @bl common_squeak4 8550 mov r1, rINST, lsr #8 @ r1<- A+ 8551 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8552 and r1, r1, #15 @ r1<- A 8553 cmp r9, #0 @ check object for null 8554 GET_VREG(r0, r1) @ r0<- fp[A] 8555 beq common_errNullObject @ object was null 8556 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8557 GET_INST_OPCODE(ip) @ extract opcode from rINST 8558 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8559 GOTO_OPCODE(ip) @ jump to next instruction 8560 8561 8562/* continuation for OP_SGET */ 8563 8564 /* 8565 * Continuation if the field has not yet been resolved. 8566 * r1: BBBB field ref 8567 */ 8568.LOP_SGET_resolve: 8569 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8570 EXPORT_PC() @ resolve() could throw, so export now 8571 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8572 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8573 cmp r0, #0 @ success? 8574 bne .LOP_SGET_finish @ yes, finish 8575 b common_exceptionThrown @ no, handle exception 8576 8577 8578/* continuation for OP_SGET_WIDE */ 8579 8580 /* 8581 * Continuation if the field has not yet been resolved. 8582 * r1: BBBB field ref 8583 * 8584 * Returns StaticField pointer in r0. 8585 */ 8586.LOP_SGET_WIDE_resolve: 8587 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8588 EXPORT_PC() @ resolve() could throw, so export now 8589 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8590 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8591 cmp r0, #0 @ success? 8592 bne .LOP_SGET_WIDE_finish @ yes, finish 8593 b common_exceptionThrown @ no, handle exception 8594 8595 8596/* continuation for OP_SGET_OBJECT */ 8597 8598 /* 8599 * Continuation if the field has not yet been resolved. 8600 * r1: BBBB field ref 8601 */ 8602.LOP_SGET_OBJECT_resolve: 8603 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8604 EXPORT_PC() @ resolve() could throw, so export now 8605 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8606 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8607 cmp r0, #0 @ success? 8608 bne .LOP_SGET_OBJECT_finish @ yes, finish 8609 b common_exceptionThrown @ no, handle exception 8610 8611 8612/* continuation for OP_SGET_BOOLEAN */ 8613 8614 /* 8615 * Continuation if the field has not yet been resolved. 8616 * r1: BBBB field ref 8617 */ 8618.LOP_SGET_BOOLEAN_resolve: 8619 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8620 EXPORT_PC() @ resolve() could throw, so export now 8621 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8622 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8623 cmp r0, #0 @ success? 8624 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8625 b common_exceptionThrown @ no, handle exception 8626 8627 8628/* continuation for OP_SGET_BYTE */ 8629 8630 /* 8631 * Continuation if the field has not yet been resolved. 8632 * r1: BBBB field ref 8633 */ 8634.LOP_SGET_BYTE_resolve: 8635 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8636 EXPORT_PC() @ resolve() could throw, so export now 8637 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8638 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8639 cmp r0, #0 @ success? 8640 bne .LOP_SGET_BYTE_finish @ yes, finish 8641 b common_exceptionThrown @ no, handle exception 8642 8643 8644/* continuation for OP_SGET_CHAR */ 8645 8646 /* 8647 * Continuation if the field has not yet been resolved. 8648 * r1: BBBB field ref 8649 */ 8650.LOP_SGET_CHAR_resolve: 8651 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8652 EXPORT_PC() @ resolve() could throw, so export now 8653 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8654 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8655 cmp r0, #0 @ success? 8656 bne .LOP_SGET_CHAR_finish @ yes, finish 8657 b common_exceptionThrown @ no, handle exception 8658 8659 8660/* continuation for OP_SGET_SHORT */ 8661 8662 /* 8663 * Continuation if the field has not yet been resolved. 8664 * r1: BBBB field ref 8665 */ 8666.LOP_SGET_SHORT_resolve: 8667 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8668 EXPORT_PC() @ resolve() could throw, so export now 8669 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8670 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8671 cmp r0, #0 @ success? 8672 bne .LOP_SGET_SHORT_finish @ yes, finish 8673 b common_exceptionThrown @ no, handle exception 8674 8675 8676/* continuation for OP_SPUT */ 8677 8678 /* 8679 * Continuation if the field has not yet been resolved. 8680 * r1: BBBB field ref 8681 */ 8682.LOP_SPUT_resolve: 8683 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8684 EXPORT_PC() @ resolve() could throw, so export now 8685 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8686 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8687 cmp r0, #0 @ success? 8688 bne .LOP_SPUT_finish @ yes, finish 8689 b common_exceptionThrown @ no, handle exception 8690 8691 8692/* continuation for OP_SPUT_WIDE */ 8693 8694 /* 8695 * Continuation if the field has not yet been resolved. 8696 * r1: BBBB field ref 8697 * r9: &fp[AA] 8698 * 8699 * Returns StaticField pointer in r2. 8700 */ 8701.LOP_SPUT_WIDE_resolve: 8702 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8703 EXPORT_PC() @ resolve() could throw, so export now 8704 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8705 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8706 cmp r0, #0 @ success? 8707 mov r2, r0 @ copy to r2 8708 bne .LOP_SPUT_WIDE_finish @ yes, finish 8709 b common_exceptionThrown @ no, handle exception 8710 8711 8712/* continuation for OP_SPUT_OBJECT */ 8713 8714 /* 8715 * Continuation if the field has not yet been resolved. 8716 * r1: BBBB field ref 8717 */ 8718.LOP_SPUT_OBJECT_resolve: 8719 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8720 EXPORT_PC() @ resolve() could throw, so export now 8721 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8722 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8723 cmp r0, #0 @ success? 8724 bne .LOP_SPUT_OBJECT_finish @ yes, finish 8725 b common_exceptionThrown @ no, handle exception 8726 8727 8728/* continuation for OP_SPUT_BOOLEAN */ 8729 8730 /* 8731 * Continuation if the field has not yet been resolved. 8732 * r1: BBBB field ref 8733 */ 8734.LOP_SPUT_BOOLEAN_resolve: 8735 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8736 EXPORT_PC() @ resolve() could throw, so export now 8737 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8738 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8739 cmp r0, #0 @ success? 8740 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8741 b common_exceptionThrown @ no, handle exception 8742 8743 8744/* continuation for OP_SPUT_BYTE */ 8745 8746 /* 8747 * Continuation if the field has not yet been resolved. 8748 * r1: BBBB field ref 8749 */ 8750.LOP_SPUT_BYTE_resolve: 8751 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8752 EXPORT_PC() @ resolve() could throw, so export now 8753 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8754 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8755 cmp r0, #0 @ success? 8756 bne .LOP_SPUT_BYTE_finish @ yes, finish 8757 b common_exceptionThrown @ no, handle exception 8758 8759 8760/* continuation for OP_SPUT_CHAR */ 8761 8762 /* 8763 * Continuation if the field has not yet been resolved. 8764 * r1: BBBB field ref 8765 */ 8766.LOP_SPUT_CHAR_resolve: 8767 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8768 EXPORT_PC() @ resolve() could throw, so export now 8769 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8770 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8771 cmp r0, #0 @ success? 8772 bne .LOP_SPUT_CHAR_finish @ yes, finish 8773 b common_exceptionThrown @ no, handle exception 8774 8775 8776/* continuation for OP_SPUT_SHORT */ 8777 8778 /* 8779 * Continuation if the field has not yet been resolved. 8780 * r1: BBBB field ref 8781 */ 8782.LOP_SPUT_SHORT_resolve: 8783 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8784 EXPORT_PC() @ resolve() could throw, so export now 8785 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8786 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8787 cmp r0, #0 @ success? 8788 bne .LOP_SPUT_SHORT_finish @ yes, finish 8789 b common_exceptionThrown @ no, handle exception 8790 8791 8792/* continuation for OP_INVOKE_VIRTUAL */ 8793 8794 /* 8795 * At this point: 8796 * r0 = resolved base method 8797 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8798 */ 8799.LOP_INVOKE_VIRTUAL_continue: 8800 GET_VREG(r1, r10) @ r1<- "this" ptr 8801 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8802 cmp r1, #0 @ is "this" null? 8803 beq common_errNullObject @ null "this", throw exception 8804 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8805 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8806 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8807 bl common_invokeMethodNoRange @ continue on 8808 8809 8810/* continuation for OP_INVOKE_SUPER */ 8811 8812 /* 8813 * At this point: 8814 * r0 = resolved base method 8815 * r9 = method->clazz 8816 */ 8817.LOP_INVOKE_SUPER_continue: 8818 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8819 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8820 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8821 EXPORT_PC() @ must export for invoke 8822 cmp r2, r3 @ compare (methodIndex, vtableCount) 8823 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8824 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8825 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8826 bl common_invokeMethodNoRange @ continue on 8827 8828.LOP_INVOKE_SUPER_resolve: 8829 mov r0, r9 @ r0<- method->clazz 8830 mov r2, #METHOD_VIRTUAL @ resolver method type 8831 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8832 cmp r0, #0 @ got null? 8833 bne .LOP_INVOKE_SUPER_continue @ no, continue 8834 b common_exceptionThrown @ yes, handle exception 8835 8836 /* 8837 * Throw a NoSuchMethodError with the method name as the message. 8838 * r0 = resolved base method 8839 */ 8840.LOP_INVOKE_SUPER_nsm: 8841 ldr r1, [r0, #offMethod_name] @ r1<- method name 8842 b common_errNoSuchMethod 8843 8844 8845/* continuation for OP_INVOKE_DIRECT */ 8846 8847 /* 8848 * On entry: 8849 * r1 = reference (BBBB or CCCC) 8850 * r10 = "this" register 8851 */ 8852.LOP_INVOKE_DIRECT_resolve: 8853 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8854 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8855 mov r2, #METHOD_DIRECT @ resolver method type 8856 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8857 cmp r0, #0 @ got null? 8858 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8859 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8860 b common_exceptionThrown @ yes, handle exception 8861 8862 8863/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8864 8865 /* 8866 * At this point: 8867 * r0 = resolved base method 8868 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8869 */ 8870.LOP_INVOKE_VIRTUAL_RANGE_continue: 8871 GET_VREG(r1, r10) @ r1<- "this" ptr 8872 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8873 cmp r1, #0 @ is "this" null? 8874 beq common_errNullObject @ null "this", throw exception 8875 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8876 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8877 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8878 bl common_invokeMethodRange @ continue on 8879 8880 8881/* continuation for OP_INVOKE_SUPER_RANGE */ 8882 8883 /* 8884 * At this point: 8885 * r0 = resolved base method 8886 * r9 = method->clazz 8887 */ 8888.LOP_INVOKE_SUPER_RANGE_continue: 8889 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8890 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8891 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8892 EXPORT_PC() @ must export for invoke 8893 cmp r2, r3 @ compare (methodIndex, vtableCount) 8894 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8895 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8896 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8897 bl common_invokeMethodRange @ continue on 8898 8899.LOP_INVOKE_SUPER_RANGE_resolve: 8900 mov r0, r9 @ r0<- method->clazz 8901 mov r2, #METHOD_VIRTUAL @ resolver method type 8902 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8903 cmp r0, #0 @ got null? 8904 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8905 b common_exceptionThrown @ yes, handle exception 8906 8907 /* 8908 * Throw a NoSuchMethodError with the method name as the message. 8909 * r0 = resolved base method 8910 */ 8911.LOP_INVOKE_SUPER_RANGE_nsm: 8912 ldr r1, [r0, #offMethod_name] @ r1<- method name 8913 b common_errNoSuchMethod 8914 8915 8916/* continuation for OP_INVOKE_DIRECT_RANGE */ 8917 8918 /* 8919 * On entry: 8920 * r1 = reference (BBBB or CCCC) 8921 * r10 = "this" register 8922 */ 8923.LOP_INVOKE_DIRECT_RANGE_resolve: 8924 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8925 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8926 mov r2, #METHOD_DIRECT @ resolver method type 8927 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8928 cmp r0, #0 @ got null? 8929 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8930 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8931 b common_exceptionThrown @ yes, handle exception 8932 8933 8934/* continuation for OP_FLOAT_TO_LONG */ 8935/* 8936 * Convert the float in r0 to a long in r0/r1. 8937 * 8938 * We have to clip values to long min/max per the specification. The 8939 * expected common case is a "reasonable" value that converts directly 8940 * to modest integer. The EABI convert function isn't doing this for us. 8941 */ 8942f2l_doconv: 8943 stmfd sp!, {r4, lr} 8944 mov r1, #0x5f000000 @ (float)maxlong 8945 mov r4, r0 8946 bl __aeabi_fcmpge @ is arg >= maxlong? 8947 cmp r0, #0 @ nonzero == yes 8948 mvnne r0, #0 @ return maxlong (7fffffff) 8949 mvnne r1, #0x80000000 8950 ldmnefd sp!, {r4, pc} 8951 8952 mov r0, r4 @ recover arg 8953 mov r1, #0xdf000000 @ (float)minlong 8954 bl __aeabi_fcmple @ is arg <= minlong? 8955 cmp r0, #0 @ nonzero == yes 8956 movne r0, #0 @ return minlong (80000000) 8957 movne r1, #0x80000000 8958 ldmnefd sp!, {r4, pc} 8959 8960 mov r0, r4 @ recover arg 8961 mov r1, r4 8962 bl __aeabi_fcmpeq @ is arg == self? 8963 cmp r0, #0 @ zero == no 8964 moveq r1, #0 @ return zero for NaN 8965 ldmeqfd sp!, {r4, pc} 8966 8967 mov r0, r4 @ recover arg 8968 bl __aeabi_f2lz @ convert float to long 8969 ldmfd sp!, {r4, pc} 8970 8971 8972/* continuation for OP_DOUBLE_TO_LONG */ 8973/* 8974 * Convert the double in r0/r1 to a long in r0/r1. 8975 * 8976 * We have to clip values to long min/max per the specification. The 8977 * expected common case is a "reasonable" value that converts directly 8978 * to modest integer. The EABI convert function isn't doing this for us. 8979 */ 8980d2l_doconv: 8981 stmfd sp!, {r4, r5, lr} @ save regs 8982 mov r3, #0x43000000 @ maxlong, as a double (high word) 8983 add r3, #0x00e00000 @ 0x43e00000 8984 mov r2, #0 @ maxlong, as a double (low word) 8985 sub sp, sp, #4 @ align for EABI 8986 mov r4, r0 @ save a copy of r0 8987 mov r5, r1 @ and r1 8988 bl __aeabi_dcmpge @ is arg >= maxlong? 8989 cmp r0, #0 @ nonzero == yes 8990 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8991 mvnne r1, #0x80000000 8992 bne 1f 8993 8994 mov r0, r4 @ recover arg 8995 mov r1, r5 8996 mov r3, #0xc3000000 @ minlong, as a double (high word) 8997 add r3, #0x00e00000 @ 0xc3e00000 8998 mov r2, #0 @ minlong, as a double (low word) 8999 bl __aeabi_dcmple @ is arg <= minlong? 9000 cmp r0, #0 @ nonzero == yes 9001 movne r0, #0 @ return minlong (8000000000000000) 9002 movne r1, #0x80000000 9003 bne 1f 9004 9005 mov r0, r4 @ recover arg 9006 mov r1, r5 9007 mov r2, r4 @ compare against self 9008 mov r3, r5 9009 bl __aeabi_dcmpeq @ is arg == self? 9010 cmp r0, #0 @ zero == no 9011 moveq r1, #0 @ return zero for NaN 9012 beq 1f 9013 9014 mov r0, r4 @ recover arg 9015 mov r1, r5 9016 bl __aeabi_d2lz @ convert double to long 9017 90181: 9019 add sp, sp, #4 9020 ldmfd sp!, {r4, r5, pc} 9021 9022 9023/* continuation for OP_MUL_LONG */ 9024 9025.LOP_MUL_LONG_finish: 9026 GET_INST_OPCODE(ip) @ extract opcode from rINST 9027 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9028 GOTO_OPCODE(ip) @ jump to next instruction 9029 9030 9031/* continuation for OP_SHL_LONG */ 9032 9033.LOP_SHL_LONG_finish: 9034 mov r0, r0, asl r2 @ r0<- r0 << r2 9035 GET_INST_OPCODE(ip) @ extract opcode from rINST 9036 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9037 GOTO_OPCODE(ip) @ jump to next instruction 9038 9039 9040/* continuation for OP_SHR_LONG */ 9041 9042.LOP_SHR_LONG_finish: 9043 mov r1, r1, asr r2 @ r1<- r1 >> r2 9044 GET_INST_OPCODE(ip) @ extract opcode from rINST 9045 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9046 GOTO_OPCODE(ip) @ jump to next instruction 9047 9048 9049/* continuation for OP_USHR_LONG */ 9050 9051.LOP_USHR_LONG_finish: 9052 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9053 GET_INST_OPCODE(ip) @ extract opcode from rINST 9054 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9055 GOTO_OPCODE(ip) @ jump to next instruction 9056 9057 9058/* continuation for OP_SHL_LONG_2ADDR */ 9059 9060.LOP_SHL_LONG_2ADDR_finish: 9061 GET_INST_OPCODE(ip) @ extract opcode from rINST 9062 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9063 GOTO_OPCODE(ip) @ jump to next instruction 9064 9065 9066/* continuation for OP_SHR_LONG_2ADDR */ 9067 9068.LOP_SHR_LONG_2ADDR_finish: 9069 GET_INST_OPCODE(ip) @ extract opcode from rINST 9070 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9071 GOTO_OPCODE(ip) @ jump to next instruction 9072 9073 9074/* continuation for OP_USHR_LONG_2ADDR */ 9075 9076.LOP_USHR_LONG_2ADDR_finish: 9077 GET_INST_OPCODE(ip) @ extract opcode from rINST 9078 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9079 GOTO_OPCODE(ip) @ jump to next instruction 9080 9081 9082/* continuation for OP_IGET_WIDE_VOLATILE */ 9083 9084 /* 9085 * Currently: 9086 * r0 holds resolved field 9087 * r9 holds object 9088 */ 9089.LOP_IGET_WIDE_VOLATILE_finish: 9090 cmp r9, #0 @ check object for null 9091 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9092 beq common_errNullObject @ object was null 9093 .if 1 9094 add r0, r9, r3 @ r0<- address of field 9095 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 9096 .else 9097 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9098 .endif 9099 mov r2, rINST, lsr #8 @ r2<- A+ 9100 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9101 and r2, r2, #15 @ r2<- A 9102 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9103 GET_INST_OPCODE(ip) @ extract opcode from rINST 9104 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9105 GOTO_OPCODE(ip) @ jump to next instruction 9106 9107 9108/* continuation for OP_IPUT_WIDE_VOLATILE */ 9109 9110 /* 9111 * Currently: 9112 * r0 holds resolved field 9113 * r9 holds object 9114 */ 9115.LOP_IPUT_WIDE_VOLATILE_finish: 9116 mov r2, rINST, lsr #8 @ r2<- A+ 9117 cmp r9, #0 @ check object for null 9118 and r2, r2, #15 @ r2<- A 9119 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9120 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9121 beq common_errNullObject @ object was null 9122 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9123 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9124 GET_INST_OPCODE(r10) @ extract opcode from rINST 9125 .if 1 9126 add r2, r9, r3 @ r2<- target address 9127 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 9128 .else 9129 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9130 .endif 9131 GOTO_OPCODE(r10) @ jump to next instruction 9132 9133 9134/* continuation for OP_SGET_WIDE_VOLATILE */ 9135 9136 /* 9137 * Continuation if the field has not yet been resolved. 9138 * r1: BBBB field ref 9139 * 9140 * Returns StaticField pointer in r0. 9141 */ 9142.LOP_SGET_WIDE_VOLATILE_resolve: 9143 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9144 EXPORT_PC() @ resolve() could throw, so export now 9145 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9146 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9147 cmp r0, #0 @ success? 9148 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9149 b common_exceptionThrown @ no, handle exception 9150 9151 9152/* continuation for OP_SPUT_WIDE_VOLATILE */ 9153 9154 /* 9155 * Continuation if the field has not yet been resolved. 9156 * r1: BBBB field ref 9157 * r9: &fp[AA] 9158 * 9159 * Returns StaticField pointer in r2. 9160 */ 9161.LOP_SPUT_WIDE_VOLATILE_resolve: 9162 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9163 EXPORT_PC() @ resolve() could throw, so export now 9164 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9165 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9166 cmp r0, #0 @ success? 9167 mov r2, r0 @ copy to r2 9168 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9169 b common_exceptionThrown @ no, handle exception 9170 9171 9172/* continuation for OP_EXECUTE_INLINE */ 9173 9174 /* 9175 * Extract args, call function. 9176 * r0 = #of args (0-4) 9177 * r10 = call index 9178 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9179 * 9180 * Other ideas: 9181 * - Use a jump table from the main piece to jump directly into the 9182 * AND/LDR pairs. Costs a data load, saves a branch. 9183 * - Have five separate pieces that do the loading, so we can work the 9184 * interleave a little better. Increases code size. 9185 */ 9186.LOP_EXECUTE_INLINE_continue: 9187 rsb r0, r0, #4 @ r0<- 4-r0 9188 FETCH(r9, 2) @ r9<- FEDC 9189 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9190 bl common_abort @ (skipped due to ARM prefetch) 91914: and ip, r9, #0xf000 @ isolate F 9192 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 91933: and ip, r9, #0x0f00 @ isolate E 9194 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 91952: and ip, r9, #0x00f0 @ isolate D 9196 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 91971: and ip, r9, #0x000f @ isolate C 9198 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 91990: 9200 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9201 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9202 @ (not reached) 9203 9204.LOP_EXECUTE_INLINE_table: 9205 .word gDvmInlineOpsTable 9206 9207 9208/* continuation for OP_EXECUTE_INLINE_RANGE */ 9209 9210 /* 9211 * Extract args, call function. 9212 * r0 = #of args (0-4) 9213 * r10 = call index 9214 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9215 */ 9216.LOP_EXECUTE_INLINE_RANGE_continue: 9217 rsb r0, r0, #4 @ r0<- 4-r0 9218 FETCH(r9, 2) @ r9<- CCCC 9219 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9220 bl common_abort @ (skipped due to ARM prefetch) 92214: add ip, r9, #3 @ base+3 9222 GET_VREG(r3, ip) @ r3<- vBase[3] 92233: add ip, r9, #2 @ base+2 9224 GET_VREG(r2, ip) @ r2<- vBase[2] 92252: add ip, r9, #1 @ base+1 9226 GET_VREG(r1, ip) @ r1<- vBase[1] 92271: add ip, r9, #0 @ (nop) 9228 GET_VREG(r0, ip) @ r0<- vBase[0] 92290: 9230 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9231 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9232 @ (not reached) 9233 9234.LOP_EXECUTE_INLINE_RANGE_table: 9235 .word gDvmInlineOpsTable 9236 9237 9238 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9239 .global dvmAsmSisterEnd 9240dvmAsmSisterEnd: 9241 9242/* File: armv5te/footer.S */ 9243 9244/* 9245 * =========================================================================== 9246 * Common subroutines and data 9247 * =========================================================================== 9248 */ 9249 9250 9251 9252 .text 9253 .align 2 9254 9255#if defined(WITH_JIT) 9256#if defined(WITH_SELF_VERIFICATION) 9257 .global dvmJitToInterpPunt 9258dvmJitToInterpPunt: 9259 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9260 mov r2,#kSVSPunt @ r2<- interpreter entry point 9261 mov r3, #0 9262 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9263 b jitSVShadowRunEnd @ doesn't return 9264 9265 .global dvmJitToInterpSingleStep 9266dvmJitToInterpSingleStep: 9267 str lr,[rGLUE,#offGlue_jitResumeNPC] 9268 str r1,[rGLUE,#offGlue_jitResumeDPC] 9269 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9270 b jitSVShadowRunEnd @ doesn't return 9271 9272 .global dvmJitToInterpTraceSelectNoChain 9273dvmJitToInterpTraceSelectNoChain: 9274 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9275 mov r0,rPC @ pass our target PC 9276 mov r2,#kSVSTraceSelectNoChain @ r2<- interpreter entry point 9277 mov r3, #0 9278 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9279 b jitSVShadowRunEnd @ doesn't return 9280 9281 .global dvmJitToInterpTraceSelect 9282dvmJitToInterpTraceSelect: 9283 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9284 ldr r0,[lr, #-1] @ pass our target PC 9285 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9286 mov r3, #0 9287 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9288 b jitSVShadowRunEnd @ doesn't return 9289 9290 .global dvmJitToInterpBackwardBranch 9291dvmJitToInterpBackwardBranch: 9292 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9293 ldr r0,[lr, #-1] @ pass our target PC 9294 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9295 mov r3, #0 9296 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9297 b jitSVShadowRunEnd @ doesn't return 9298 9299 .global dvmJitToInterpNormal 9300dvmJitToInterpNormal: 9301 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9302 ldr r0,[lr, #-1] @ pass our target PC 9303 mov r2,#kSVSNormal @ r2<- interpreter entry point 9304 mov r3, #0 9305 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9306 b jitSVShadowRunEnd @ doesn't return 9307 9308 .global dvmJitToInterpNoChain 9309dvmJitToInterpNoChain: 9310 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9311 mov r0,rPC @ pass our target PC 9312 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9313 mov r3, #0 9314 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9315 b jitSVShadowRunEnd @ doesn't return 9316#else 9317/* 9318 * Return from the translation cache to the interpreter when the compiler is 9319 * having issues translating/executing a Dalvik instruction. We have to skip 9320 * the code cache lookup otherwise it is possible to indefinitely bouce 9321 * between the interpreter and the code cache if the instruction that fails 9322 * to be compiled happens to be at a trace start. 9323 */ 9324 .global dvmJitToInterpPunt 9325dvmJitToInterpPunt: 9326 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9327 mov rPC, r0 9328#if defined(WITH_JIT_TUNING) 9329 mov r0,lr 9330 bl dvmBumpPunt; 9331#endif 9332 EXPORT_PC() 9333 mov r0, #0 9334 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9335 adrl rIBASE, dvmAsmInstructionStart 9336 FETCH_INST() 9337 GET_INST_OPCODE(ip) 9338 GOTO_OPCODE(ip) 9339 9340/* 9341 * Return to the interpreter to handle a single instruction. 9342 * On entry: 9343 * r0 <= PC 9344 * r1 <= PC of resume instruction 9345 * lr <= resume point in translation 9346 */ 9347 .global dvmJitToInterpSingleStep 9348dvmJitToInterpSingleStep: 9349 str lr,[rGLUE,#offGlue_jitResumeNPC] 9350 str r1,[rGLUE,#offGlue_jitResumeDPC] 9351 mov r1,#kInterpEntryInstr 9352 @ enum is 4 byte in aapcs-EABI 9353 str r1, [rGLUE, #offGlue_entryPoint] 9354 mov rPC,r0 9355 EXPORT_PC() 9356 9357 adrl rIBASE, dvmAsmInstructionStart 9358 mov r2,#kJitSingleStep @ Ask for single step and then revert 9359 str r2,[rGLUE,#offGlue_jitState] 9360 mov r1,#1 @ set changeInterp to bail to debug interp 9361 b common_gotoBail 9362 9363/* 9364 * Return from the translation cache and immediately request 9365 * a translation for the exit target. Commonly used for callees. 9366 */ 9367 .global dvmJitToInterpTraceSelectNoChain 9368dvmJitToInterpTraceSelectNoChain: 9369#if defined(WITH_JIT_TUNING) 9370 bl dvmBumpNoChain 9371#endif 9372 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9373 mov r0,rPC 9374 bl dvmJitGetCodeAddr @ Is there a translation? 9375 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9376 mov r1, rPC @ arg1 of translation may need this 9377 mov lr, #0 @ in case target is HANDLER_INTERPRET 9378 cmp r0,#0 9379 bxne r0 @ continue native execution if so 9380 b 2f 9381 9382/* 9383 * Return from the translation cache and immediately request 9384 * a translation for the exit target. Commonly used following 9385 * invokes. 9386 */ 9387 .global dvmJitToInterpTraceSelect 9388dvmJitToInterpTraceSelect: 9389 ldr rPC,[lr, #-1] @ get our target PC 9390 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9391 add rINST,lr,#-5 @ save start of chain branch 9392 mov r0,rPC 9393 bl dvmJitGetCodeAddr @ Is there a translation? 9394 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9395 cmp r0,#0 9396 beq 2f 9397 mov r1,rINST 9398 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9399 mov r1, rPC @ arg1 of translation may need this 9400 mov lr, #0 @ in case target is HANDLER_INTERPRET 9401 cmp r0,#0 @ successful chain? 9402 bxne r0 @ continue native execution 9403 b toInterpreter @ didn't chain - resume with interpreter 9404 9405/* No translation, so request one if profiling isn't disabled*/ 94062: 9407 adrl rIBASE, dvmAsmInstructionStart 9408 GET_JIT_PROF_TABLE(r0) 9409 FETCH_INST() 9410 cmp r0, #0 9411 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9412 bne common_selectTrace 9413 GET_INST_OPCODE(ip) 9414 GOTO_OPCODE(ip) 9415 9416/* 9417 * Return from the translation cache to the interpreter. 9418 * The return was done with a BLX from thumb mode, and 9419 * the following 32-bit word contains the target rPC value. 9420 * Note that lr (r14) will have its low-order bit set to denote 9421 * its thumb-mode origin. 9422 * 9423 * We'll need to stash our lr origin away, recover the new 9424 * target and then check to see if there is a translation available 9425 * for our new target. If so, we do a translation chain and 9426 * go back to native execution. Otherwise, it's back to the 9427 * interpreter (after treating this entry as a potential 9428 * trace start). 9429 */ 9430 .global dvmJitToInterpNormal 9431dvmJitToInterpNormal: 9432 ldr rPC,[lr, #-1] @ get our target PC 9433 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9434 add rINST,lr,#-5 @ save start of chain branch 9435#if defined(WITH_JIT_TUNING) 9436 bl dvmBumpNormal 9437#endif 9438 mov r0,rPC 9439 bl dvmJitGetCodeAddr @ Is there a translation? 9440 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9441 cmp r0,#0 9442 beq toInterpreter @ go if not, otherwise do chain 9443 mov r1,rINST 9444 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9445 mov r1, rPC @ arg1 of translation may need this 9446 mov lr, #0 @ in case target is HANDLER_INTERPRET 9447 cmp r0,#0 @ successful chain? 9448 bxne r0 @ continue native execution 9449 b toInterpreter @ didn't chain - resume with interpreter 9450 9451/* 9452 * Return from the translation cache to the interpreter to do method invocation. 9453 * Check if translation exists for the callee, but don't chain to it. 9454 */ 9455 .global dvmJitToInterpNoChain 9456dvmJitToInterpNoChain: 9457#if defined(WITH_JIT_TUNING) 9458 bl dvmBumpNoChain 9459#endif 9460 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9461 mov r0,rPC 9462 bl dvmJitGetCodeAddr @ Is there a translation? 9463 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9464 mov r1, rPC @ arg1 of translation may need this 9465 mov lr, #0 @ in case target is HANDLER_INTERPRET 9466 cmp r0,#0 9467 bxne r0 @ continue native execution if so 9468#endif 9469 9470/* 9471 * No translation, restore interpreter regs and start interpreting. 9472 * rGLUE & rFP were preserved in the translated code, and rPC has 9473 * already been restored by the time we get here. We'll need to set 9474 * up rIBASE & rINST, and load the address of the JitTable into r0. 9475 */ 9476toInterpreter: 9477 EXPORT_PC() 9478 adrl rIBASE, dvmAsmInstructionStart 9479 FETCH_INST() 9480 GET_JIT_PROF_TABLE(r0) 9481 @ NOTE: intended fallthrough 9482/* 9483 * Common code to update potential trace start counter, and initiate 9484 * a trace-build if appropriate. On entry, rPC should point to the 9485 * next instruction to execute, and rINST should be already loaded with 9486 * the next opcode word, and r0 holds a pointer to the jit profile 9487 * table (pJitProfTable). 9488 */ 9489common_testUpdateProfile: 9490 cmp r0,#0 9491 GET_INST_OPCODE(ip) 9492 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9493 9494common_updateProfile: 9495 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9496 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9497 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9498 GET_INST_OPCODE(ip) 9499 subs r1,r1,#1 @ decrement counter 9500 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9501 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9502 9503/* 9504 * Here, we switch to the debug interpreter to request 9505 * trace selection. First, though, check to see if there 9506 * is already a native translation in place (and, if so, 9507 * jump to it now). 9508 */ 9509 GET_JIT_THRESHOLD(r1) 9510 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9511 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9512 EXPORT_PC() 9513 mov r0,rPC 9514 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9515 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9516 mov r1, rPC @ arg1 of translation may need this 9517 mov lr, #0 @ in case target is HANDLER_INTERPRET 9518 cmp r0,#0 9519#if !defined(WITH_SELF_VERIFICATION) 9520 bxne r0 @ jump to the translation 9521 mov r2,#kJitTSelectRequest @ ask for trace selection 9522 @ fall-through to common_selectTrace 9523#else 9524 moveq r2,#kJitTSelectRequest @ ask for trace selection 9525 beq common_selectTrace 9526 /* 9527 * At this point, we have a target translation. However, if 9528 * that translation is actually the interpret-only pseudo-translation 9529 * we want to treat it the same as no translation. 9530 */ 9531 mov r10, r0 @ save target 9532 bl dvmCompilerGetInterpretTemplate 9533 cmp r0, r10 @ special case? 9534 bne jitSVShadowRunStart @ set up self verification shadow space 9535 GET_INST_OPCODE(ip) 9536 GOTO_OPCODE(ip) 9537 /* no return */ 9538#endif 9539 9540/* 9541 * On entry: 9542 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9543 */ 9544common_selectTrace: 9545 str r2,[rGLUE,#offGlue_jitState] 9546 mov r2,#kInterpEntryInstr @ normal entry reason 9547 str r2,[rGLUE,#offGlue_entryPoint] 9548 mov r1,#1 @ set changeInterp 9549 b common_gotoBail 9550 9551#if defined(WITH_SELF_VERIFICATION) 9552/* 9553 * Save PC and registers to shadow memory for self verification mode 9554 * before jumping to native translation. 9555 * On entry: 9556 * rPC, rFP, rGLUE: the values that they should contain 9557 * r10: the address of the target translation. 9558 */ 9559jitSVShadowRunStart: 9560 mov r0,rPC @ r0<- program counter 9561 mov r1,rFP @ r1<- frame pointer 9562 mov r2,rGLUE @ r2<- InterpState pointer 9563 mov r3,r10 @ r3<- target translation 9564 bl dvmSelfVerificationSaveState @ save registers to shadow space 9565 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9566 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9567 bx r10 @ jump to the translation 9568 9569/* 9570 * Restore PC, registers, and interpState to original values 9571 * before jumping back to the interpreter. 9572 */ 9573jitSVShadowRunEnd: 9574 mov r1,rFP @ pass ending fp 9575 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9576 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9577 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9578 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9579 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9580 cmp r1,#0 @ check for punt condition 9581 beq 1f 9582 mov r2,#kJitSelfVerification @ ask for self verification 9583 str r2,[rGLUE,#offGlue_jitState] 9584 mov r2,#kInterpEntryInstr @ normal entry reason 9585 str r2,[rGLUE,#offGlue_entryPoint] 9586 mov r1,#1 @ set changeInterp 9587 b common_gotoBail 9588 95891: @ exit to interpreter without check 9590 EXPORT_PC() 9591 adrl rIBASE, dvmAsmInstructionStart 9592 FETCH_INST() 9593 GET_INST_OPCODE(ip) 9594 GOTO_OPCODE(ip) 9595#endif 9596 9597#endif 9598 9599/* 9600 * Common code when a backward branch is taken. 9601 * 9602 * TODO: we could avoid a branch by just setting r0 and falling through 9603 * into the common_periodicChecks code, and having a test on r0 at the 9604 * end determine if we should return to the caller or update & branch to 9605 * the next instr. 9606 * 9607 * On entry: 9608 * r9 is PC adjustment *in bytes* 9609 */ 9610common_backwardBranch: 9611 mov r0, #kInterpEntryInstr 9612 bl common_periodicChecks 9613#if defined(WITH_JIT) 9614 GET_JIT_PROF_TABLE(r0) 9615 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9616 cmp r0,#0 9617 bne common_updateProfile 9618 GET_INST_OPCODE(ip) 9619 GOTO_OPCODE(ip) 9620#else 9621 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9622 GET_INST_OPCODE(ip) @ extract opcode from rINST 9623 GOTO_OPCODE(ip) @ jump to next instruction 9624#endif 9625 9626 9627/* 9628 * Need to see if the thread needs to be suspended or debugger/profiler 9629 * activity has begun. If so, we suspend the thread or side-exit to 9630 * the debug interpreter as appropriate. 9631 * 9632 * The common case is no activity on any of these, so we want to figure 9633 * that out quickly. If something is up, we can then sort out what. 9634 * 9635 * We want to be fast if the VM was built without debugger or profiler 9636 * support, but we also need to recognize that the system is usually 9637 * shipped with both of these enabled. 9638 * 9639 * TODO: reduce this so we're just checking a single location. 9640 * 9641 * On entry: 9642 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 9643 * r9 is trampoline PC adjustment *in bytes* 9644 */ 9645common_periodicChecks: 9646 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9647 9648#if defined(WITH_DEBUGGER) 9649 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9650#endif 9651#if defined(WITH_PROFILER) 9652 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9653#endif 9654 9655 ldr ip, [r3] @ ip<- suspendCount (int) 9656 9657#if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9658 cmp r1, #0 @ debugger enabled? 9659 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9660 ldr r2, [r2] @ r2<- activeProfilers (int) 9661 orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive 9662 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 9663#elif defined(WITH_DEBUGGER) 9664 cmp r1, #0 @ debugger enabled? 9665 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9666 orrsne ip, ip, r1 @ yes, ip<- suspend | debugger; set Z 9667 @ (if not enabled, Z was set by test for r1==0, which is what we want) 9668#elif defined (WITH_PROFILER) 9669 ldr r2, [r2] @ r2<- activeProfilers (int) 9670 orrs ip, ip, r2 @ ip<- suspendCount | activeProfilers 9671#else 9672 cmp ip, #0 @ not ORing anything in; set Z 9673#endif 9674 9675 bxeq lr @ all zero, return 9676 9677 /* 9678 * One or more interesting events have happened. Figure out what. 9679 * 9680 * If debugging or profiling are compiled in, we need to disambiguate. 9681 * 9682 * r0 still holds the reentry type. 9683 */ 9684#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9685 ldr ip, [r3] @ ip<- suspendCount (int) 9686 cmp ip, #0 @ want suspend? 9687 beq 1f @ no, must be debugger/profiler 9688#endif 9689 9690 stmfd sp!, {r0, lr} @ preserve r0 and lr 9691#if defined(WITH_JIT) 9692 /* 9693 * Refresh the Jit's cached copy of profile table pointer. This pointer 9694 * doubles as the Jit's on/off switch. 9695 */ 9696 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9697 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9698 ldr r3, [r3] @ r3 <- pJitProfTable 9699 EXPORT_PC() @ need for precise GC 9700 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9701#else 9702 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9703 EXPORT_PC() @ need for precise GC 9704#endif 9705 bl dvmCheckSuspendPending @ do full check, suspend if necessary 9706 ldmfd sp!, {r0, lr} @ restore r0 and lr 9707 9708#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9709 9710 /* 9711 * Reload the debugger/profiler enable flags. We're checking to see 9712 * if either of these got set while we were suspended. 9713 * 9714 * We can't really avoid the #ifdefs here, because the fields don't 9715 * exist when the feature is disabled. 9716 */ 9717#if defined(WITH_DEBUGGER) 9718 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9719 cmp r1, #0 @ debugger enabled? 9720 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9721#else 9722 mov r1, #0 9723#endif 9724#if defined(WITH_PROFILER) 9725 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9726 ldr r2, [r2] @ r2<- activeProfilers (int) 9727#else 9728 mov r2, #0 9729#endif 9730 9731 orrs r1, r1, r2 9732 beq 2f 9733 97341: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 9735 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 9736 add rPC, rPC, r9 @ update rPC 9737 mov r1, #1 @ "want switch" = true 9738 b common_gotoBail @ side exit 9739 9740#endif /*WITH_DEBUGGER || WITH_PROFILER*/ 9741 97422: 9743 bx lr @ nothing to do, return 9744 9745 9746/* 9747 * The equivalent of "goto bail", this calls through the "bail handler". 9748 * 9749 * State registers will be saved to the "glue" area before bailing. 9750 * 9751 * On entry: 9752 * r1 is "bool changeInterp", indicating if we want to switch to the 9753 * other interpreter or just bail all the way out 9754 */ 9755common_gotoBail: 9756 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9757 mov r0, rGLUE @ r0<- glue ptr 9758 b dvmMterpStdBail @ call(glue, changeInterp) 9759 9760 @add r1, r1, #1 @ using (boolean+1) 9761 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9762 @bl _longjmp @ does not return 9763 @bl common_abort 9764 9765 9766/* 9767 * Common code for method invocation with range. 9768 * 9769 * On entry: 9770 * r0 is "Method* methodToCall", the method we're trying to call 9771 */ 9772common_invokeMethodRange: 9773.LinvokeNewRange: 9774 @ prepare to copy args to "outs" area of current frame 9775 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9776 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9777 beq .LinvokeArgsDone @ if no args, skip the rest 9778 FETCH(r1, 2) @ r1<- CCCC 9779 9780 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9781 @ (very few methods have > 10 args; could unroll for common cases) 9782 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9783 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9784 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 97851: ldr r1, [r3], #4 @ val = *fp++ 9786 subs r2, r2, #1 @ count-- 9787 str r1, [r10], #4 @ *outs++ = val 9788 bne 1b @ ...while count != 0 9789 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9790 b .LinvokeArgsDone 9791 9792/* 9793 * Common code for method invocation without range. 9794 * 9795 * On entry: 9796 * r0 is "Method* methodToCall", the method we're trying to call 9797 */ 9798common_invokeMethodNoRange: 9799.LinvokeNewNoRange: 9800 @ prepare to copy args to "outs" area of current frame 9801 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9802 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9803 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9804 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9805 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9806 beq .LinvokeArgsDone 9807 9808 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9809.LinvokeNonRange: 9810 rsb r2, r2, #5 @ r2<- 5-r2 9811 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9812 bl common_abort @ (skipped due to ARM prefetch) 98135: and ip, rINST, #0x0f00 @ isolate A 9814 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9815 mov r0, r0 @ nop 9816 str r2, [r10, #-4]! @ *--outs = vA 98174: and ip, r1, #0xf000 @ isolate G 9818 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9819 mov r0, r0 @ nop 9820 str r2, [r10, #-4]! @ *--outs = vG 98213: and ip, r1, #0x0f00 @ isolate F 9822 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9823 mov r0, r0 @ nop 9824 str r2, [r10, #-4]! @ *--outs = vF 98252: and ip, r1, #0x00f0 @ isolate E 9826 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9827 mov r0, r0 @ nop 9828 str r2, [r10, #-4]! @ *--outs = vE 98291: and ip, r1, #0x000f @ isolate D 9830 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9831 mov r0, r0 @ nop 9832 str r2, [r10, #-4]! @ *--outs = vD 98330: @ fall through to .LinvokeArgsDone 9834 9835.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9836 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9837 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9838 @ find space for the new stack frame, check for overflow 9839 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9840 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9841 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9842@ bl common_dumpRegs 9843 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9844 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9845 cmp r3, r9 @ bottom < interpStackEnd? 9846 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9847 blo .LstackOverflow @ yes, this frame will overflow stack 9848 9849 @ set up newSaveArea 9850#ifdef EASY_GDB 9851 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9852 str ip, [r10, #offStackSaveArea_prevSave] 9853#endif 9854 str rFP, [r10, #offStackSaveArea_prevFrame] 9855 str rPC, [r10, #offStackSaveArea_savedPc] 9856#if defined(WITH_JIT) 9857 mov r9, #0 9858 str r9, [r10, #offStackSaveArea_returnAddr] 9859#endif 9860 str r0, [r10, #offStackSaveArea_method] 9861 tst r3, #ACC_NATIVE 9862 bne .LinvokeNative 9863 9864 /* 9865 stmfd sp!, {r0-r3} 9866 bl common_printNewline 9867 mov r0, rFP 9868 mov r1, #0 9869 bl dvmDumpFp 9870 ldmfd sp!, {r0-r3} 9871 stmfd sp!, {r0-r3} 9872 mov r0, r1 9873 mov r1, r10 9874 bl dvmDumpFp 9875 bl common_printNewline 9876 ldmfd sp!, {r0-r3} 9877 */ 9878 9879 ldrh r9, [r2] @ r9 <- load INST from new PC 9880 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9881 mov rPC, r2 @ publish new rPC 9882 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9883 9884 @ Update "glue" values for the new method 9885 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9886 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9887 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9888#if defined(WITH_JIT) 9889 GET_JIT_PROF_TABLE(r0) 9890 mov rFP, r1 @ fp = newFp 9891 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9892 mov rINST, r9 @ publish new rINST 9893 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9894 cmp r0,#0 9895 bne common_updateProfile 9896 GOTO_OPCODE(ip) @ jump to next instruction 9897#else 9898 mov rFP, r1 @ fp = newFp 9899 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9900 mov rINST, r9 @ publish new rINST 9901 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9902 GOTO_OPCODE(ip) @ jump to next instruction 9903#endif 9904 9905.LinvokeNative: 9906 @ Prep for the native call 9907 @ r0=methodToCall, r1=newFp, r10=newSaveArea 9908 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9909 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 9910 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 9911 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 9912 mov r9, r3 @ r9<- glue->self (preserve) 9913 9914 mov r2, r0 @ r2<- methodToCall 9915 mov r0, r1 @ r0<- newFp (points to args) 9916 add r1, rGLUE, #offGlue_retval @ r1<- &retval 9917 9918#ifdef ASSIST_DEBUGGER 9919 /* insert fake function header to help gdb find the stack frame */ 9920 b .Lskip 9921 .type dalvik_mterp, %function 9922dalvik_mterp: 9923 .fnstart 9924 MTERP_ENTRY1 9925 MTERP_ENTRY2 9926.Lskip: 9927#endif 9928 9929 @mov lr, pc @ set return addr 9930 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 9931 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 9932 9933#if defined(WITH_JIT) 9934 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 9935#endif 9936 9937 @ native return; r9=self, r10=newSaveArea 9938 @ equivalent to dvmPopJniLocals 9939 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 9940 ldr r1, [r9, #offThread_exception] @ check for exception 9941#if defined(WITH_JIT) 9942 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 9943#endif 9944 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 9945 cmp r1, #0 @ null? 9946 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 9947#if defined(WITH_JIT) 9948 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 9949#endif 9950 bne common_exceptionThrown @ no, handle exception 9951 9952 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 9953 GET_INST_OPCODE(ip) @ extract opcode from rINST 9954 GOTO_OPCODE(ip) @ jump to next instruction 9955 9956.LstackOverflow: @ r0=methodToCall 9957 mov r1, r0 @ r1<- methodToCall 9958 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 9959 bl dvmHandleStackOverflow 9960 b common_exceptionThrown 9961#ifdef ASSIST_DEBUGGER 9962 .fnend 9963#endif 9964 9965 9966 /* 9967 * Common code for method invocation, calling through "glue code". 9968 * 9969 * TODO: now that we have range and non-range invoke handlers, this 9970 * needs to be split into two. Maybe just create entry points 9971 * that set r9 and jump here? 9972 * 9973 * On entry: 9974 * r0 is "Method* methodToCall", the method we're trying to call 9975 * r9 is "bool methodCallRange", indicating if this is a /range variant 9976 */ 9977 .if 0 9978.LinvokeOld: 9979 sub sp, sp, #8 @ space for args + pad 9980 FETCH(ip, 2) @ ip<- FEDC or CCCC 9981 mov r2, r0 @ A2<- methodToCall 9982 mov r0, rGLUE @ A0<- glue 9983 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9984 mov r1, r9 @ A1<- methodCallRange 9985 mov r3, rINST, lsr #8 @ A3<- AA 9986 str ip, [sp, #0] @ A4<- ip 9987 bl dvmMterp_invokeMethod @ call the C invokeMethod 9988 add sp, sp, #8 @ remove arg area 9989 b common_resumeAfterGlueCall @ continue to next instruction 9990 .endif 9991 9992 9993 9994/* 9995 * Common code for handling a return instruction. 9996 * 9997 * This does not return. 9998 */ 9999common_returnFromMethod: 10000.LreturnNew: 10001 mov r0, #kInterpEntryReturn 10002 mov r9, #0 10003 bl common_periodicChecks 10004 10005 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10006 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10007 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10008 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10009 @ r2<- method we're returning to 10010 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10011 cmp r2, #0 @ is this a break frame? 10012 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10013 mov r1, #0 @ "want switch" = false 10014 beq common_gotoBail @ break frame, bail out completely 10015 10016 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10017 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10018 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10019 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10020#if defined(WITH_JIT) 10021 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10022 GET_JIT_PROF_TABLE(r0) 10023 mov rPC, r9 @ publish new rPC 10024 str r1, [rGLUE, #offGlue_methodClassDex] 10025 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10026 cmp r10, #0 @ caller is compiled code 10027 blxne r10 10028 GET_INST_OPCODE(ip) @ extract opcode from rINST 10029 cmp r0,#0 10030 bne common_updateProfile 10031 GOTO_OPCODE(ip) @ jump to next instruction 10032#else 10033 GET_INST_OPCODE(ip) @ extract opcode from rINST 10034 mov rPC, r9 @ publish new rPC 10035 str r1, [rGLUE, #offGlue_methodClassDex] 10036 GOTO_OPCODE(ip) @ jump to next instruction 10037#endif 10038 10039 /* 10040 * Return handling, calls through "glue code". 10041 */ 10042 .if 0 10043.LreturnOld: 10044 SAVE_PC_FP_TO_GLUE() @ export state 10045 mov r0, rGLUE @ arg to function 10046 bl dvmMterp_returnFromMethod 10047 b common_resumeAfterGlueCall 10048 .endif 10049 10050 10051/* 10052 * Somebody has thrown an exception. Handle it. 10053 * 10054 * If the exception processing code returns to us (instead of falling 10055 * out of the interpreter), continue with whatever the next instruction 10056 * now happens to be. 10057 * 10058 * This does not return. 10059 */ 10060 .global dvmMterpCommonExceptionThrown 10061dvmMterpCommonExceptionThrown: 10062common_exceptionThrown: 10063.LexceptionNew: 10064 mov r0, #kInterpEntryThrow 10065 mov r9, #0 10066 bl common_periodicChecks 10067 10068 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10069 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10070 mov r1, r10 @ r1<- self 10071 mov r0, r9 @ r0<- exception 10072 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10073 mov r3, #0 @ r3<- NULL 10074 str r3, [r10, #offThread_exception] @ self->exception = NULL 10075 10076 /* set up args and a local for "&fp" */ 10077 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10078 str rFP, [sp, #-4]! @ *--sp = fp 10079 mov ip, sp @ ip<- &fp 10080 mov r3, #0 @ r3<- false 10081 str ip, [sp, #-4]! @ *--sp = &fp 10082 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10083 mov r0, r10 @ r0<- self 10084 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10085 mov r2, r9 @ r2<- exception 10086 sub r1, rPC, r1 @ r1<- pc - method->insns 10087 mov r1, r1, asr #1 @ r1<- offset in code units 10088 10089 /* call, r0 gets catchRelPc (a code-unit offset) */ 10090 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10091 10092 /* fix earlier stack overflow if necessary; may trash rFP */ 10093 ldrb r1, [r10, #offThread_stackOverflowed] 10094 cmp r1, #0 @ did we overflow earlier? 10095 beq 1f @ no, skip ahead 10096 mov rFP, r0 @ save relPc result in rFP 10097 mov r0, r10 @ r0<- self 10098 mov r1, r9 @ r1<- exception 10099 bl dvmCleanupStackOverflow @ call(self) 10100 mov r0, rFP @ restore result 101011: 10102 10103 /* update frame pointer and check result from dvmFindCatchBlock */ 10104 ldr rFP, [sp, #4] @ retrieve the updated rFP 10105 cmp r0, #0 @ is catchRelPc < 0? 10106 add sp, sp, #8 @ restore stack 10107 bmi .LnotCaughtLocally 10108 10109 /* adjust locals to match self->curFrame and updated PC */ 10110 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10111 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10112 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10113 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10114 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10115 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10116 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10117 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10118 10119 /* release the tracked alloc on the exception */ 10120 mov r0, r9 @ r0<- exception 10121 mov r1, r10 @ r1<- self 10122 bl dvmReleaseTrackedAlloc @ release the exception 10123 10124 /* restore the exception if the handler wants it */ 10125 FETCH_INST() @ load rINST from rPC 10126 GET_INST_OPCODE(ip) @ extract opcode from rINST 10127 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10128 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10129 GOTO_OPCODE(ip) @ jump to next instruction 10130 10131.LnotCaughtLocally: @ r9=exception, r10=self 10132 /* fix stack overflow if necessary */ 10133 ldrb r1, [r10, #offThread_stackOverflowed] 10134 cmp r1, #0 @ did we overflow earlier? 10135 movne r0, r10 @ if yes: r0<- self 10136 movne r1, r9 @ if yes: r1<- exception 10137 blne dvmCleanupStackOverflow @ if yes: call(self) 10138 10139 @ may want to show "not caught locally" debug messages here 10140#if DVM_SHOW_EXCEPTION >= 2 10141 /* call __android_log_print(prio, tag, format, ...) */ 10142 /* "Exception %s from %s:%d not caught locally" */ 10143 @ dvmLineNumFromPC(method, pc - method->insns) 10144 ldr r0, [rGLUE, #offGlue_method] 10145 ldr r1, [r0, #offMethod_insns] 10146 sub r1, rPC, r1 10147 asr r1, r1, #1 10148 bl dvmLineNumFromPC 10149 str r0, [sp, #-4]! 10150 @ dvmGetMethodSourceFile(method) 10151 ldr r0, [rGLUE, #offGlue_method] 10152 bl dvmGetMethodSourceFile 10153 str r0, [sp, #-4]! 10154 @ exception->clazz->descriptor 10155 ldr r3, [r9, #offObject_clazz] 10156 ldr r3, [r3, #offClassObject_descriptor] 10157 @ 10158 ldr r2, strExceptionNotCaughtLocally 10159 ldr r1, strLogTag 10160 mov r0, #3 @ LOG_DEBUG 10161 bl __android_log_print 10162#endif 10163 str r9, [r10, #offThread_exception] @ restore exception 10164 mov r0, r9 @ r0<- exception 10165 mov r1, r10 @ r1<- self 10166 bl dvmReleaseTrackedAlloc @ release the exception 10167 mov r1, #0 @ "want switch" = false 10168 b common_gotoBail @ bail out 10169 10170 10171 /* 10172 * Exception handling, calls through "glue code". 10173 */ 10174 .if 0 10175.LexceptionOld: 10176 SAVE_PC_FP_TO_GLUE() @ export state 10177 mov r0, rGLUE @ arg to function 10178 bl dvmMterp_exceptionThrown 10179 b common_resumeAfterGlueCall 10180 .endif 10181 10182 10183/* 10184 * After returning from a "glued" function, pull out the updated 10185 * values and start executing at the next instruction. 10186 */ 10187common_resumeAfterGlueCall: 10188 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10189 FETCH_INST() @ load rINST from rPC 10190 GET_INST_OPCODE(ip) @ extract opcode from rINST 10191 GOTO_OPCODE(ip) @ jump to next instruction 10192 10193/* 10194 * Invalid array index. 10195 */ 10196common_errArrayIndex: 10197 EXPORT_PC() 10198 ldr r0, strArrayIndexException 10199 mov r1, #0 10200 bl dvmThrowException 10201 b common_exceptionThrown 10202 10203/* 10204 * Invalid array value. 10205 */ 10206common_errArrayStore: 10207 EXPORT_PC() 10208 ldr r0, strArrayStoreException 10209 mov r1, #0 10210 bl dvmThrowException 10211 b common_exceptionThrown 10212 10213/* 10214 * Integer divide or mod by zero. 10215 */ 10216common_errDivideByZero: 10217 EXPORT_PC() 10218 ldr r0, strArithmeticException 10219 ldr r1, strDivideByZero 10220 bl dvmThrowException 10221 b common_exceptionThrown 10222 10223/* 10224 * Attempt to allocate an array with a negative size. 10225 */ 10226common_errNegativeArraySize: 10227 EXPORT_PC() 10228 ldr r0, strNegativeArraySizeException 10229 mov r1, #0 10230 bl dvmThrowException 10231 b common_exceptionThrown 10232 10233/* 10234 * Invocation of a non-existent method. 10235 */ 10236common_errNoSuchMethod: 10237 EXPORT_PC() 10238 ldr r0, strNoSuchMethodError 10239 mov r1, #0 10240 bl dvmThrowException 10241 b common_exceptionThrown 10242 10243/* 10244 * We encountered a null object when we weren't expecting one. We 10245 * export the PC, throw a NullPointerException, and goto the exception 10246 * processing code. 10247 */ 10248common_errNullObject: 10249 EXPORT_PC() 10250 ldr r0, strNullPointerException 10251 mov r1, #0 10252 bl dvmThrowException 10253 b common_exceptionThrown 10254 10255/* 10256 * For debugging, cause an immediate fault. The source address will 10257 * be in lr (use a bl instruction to jump here). 10258 */ 10259common_abort: 10260 ldr pc, .LdeadFood 10261.LdeadFood: 10262 .word 0xdeadf00d 10263 10264/* 10265 * Spit out a "we were here", preserving all registers. (The attempt 10266 * to save ip won't work, but we need to save an even number of 10267 * registers for EABI 64-bit stack alignment.) 10268 */ 10269 .macro SQUEAK num 10270common_squeak\num: 10271 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10272 ldr r0, strSqueak 10273 mov r1, #\num 10274 bl printf 10275 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10276 bx lr 10277 .endm 10278 10279 SQUEAK 0 10280 SQUEAK 1 10281 SQUEAK 2 10282 SQUEAK 3 10283 SQUEAK 4 10284 SQUEAK 5 10285 10286/* 10287 * Spit out the number in r0, preserving registers. 10288 */ 10289common_printNum: 10290 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10291 mov r1, r0 10292 ldr r0, strSqueak 10293 bl printf 10294 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10295 bx lr 10296 10297/* 10298 * Print a newline, preserving registers. 10299 */ 10300common_printNewline: 10301 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10302 ldr r0, strNewline 10303 bl printf 10304 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10305 bx lr 10306 10307 /* 10308 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10309 */ 10310common_printHex: 10311 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10312 mov r1, r0 10313 ldr r0, strPrintHex 10314 bl printf 10315 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10316 bx lr 10317 10318/* 10319 * Print the 64-bit quantity in r0-r1, preserving registers. 10320 */ 10321common_printLong: 10322 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10323 mov r3, r1 10324 mov r2, r0 10325 ldr r0, strPrintLong 10326 bl printf 10327 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10328 bx lr 10329 10330/* 10331 * Print full method info. Pass the Method* in r0. Preserves regs. 10332 */ 10333common_printMethod: 10334 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10335 bl dvmMterpPrintMethod 10336 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10337 bx lr 10338 10339/* 10340 * Call a C helper function that dumps regs and possibly some 10341 * additional info. Requires the C function to be compiled in. 10342 */ 10343 .if 0 10344common_dumpRegs: 10345 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10346 bl dvmMterpDumpArmRegs 10347 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10348 bx lr 10349 .endif 10350 10351#if 0 10352/* 10353 * Experiment on VFP mode. 10354 * 10355 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10356 * 10357 * Updates the bits specified by "mask", setting them to the values in "val". 10358 */ 10359setFPSCR: 10360 and r0, r0, r1 @ make sure no stray bits are set 10361 fmrx r2, fpscr @ get VFP reg 10362 mvn r1, r1 @ bit-invert mask 10363 and r2, r2, r1 @ clear masked bits 10364 orr r2, r2, r0 @ set specified bits 10365 fmxr fpscr, r2 @ set VFP reg 10366 mov r0, r2 @ return new value 10367 bx lr 10368 10369 .align 2 10370 .global dvmConfigureFP 10371 .type dvmConfigureFP, %function 10372dvmConfigureFP: 10373 stmfd sp!, {ip, lr} 10374 /* 0x03000000 sets DN/FZ */ 10375 /* 0x00009f00 clears the six exception enable flags */ 10376 bl common_squeak0 10377 mov r0, #0x03000000 @ r0<- 0x03000000 10378 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10379 bl setFPSCR 10380 ldmfd sp!, {ip, pc} 10381#endif 10382 10383 10384/* 10385 * String references, must be close to the code that uses them. 10386 */ 10387 .align 2 10388strArithmeticException: 10389 .word .LstrArithmeticException 10390strArrayIndexException: 10391 .word .LstrArrayIndexException 10392strArrayStoreException: 10393 .word .LstrArrayStoreException 10394strDivideByZero: 10395 .word .LstrDivideByZero 10396strNegativeArraySizeException: 10397 .word .LstrNegativeArraySizeException 10398strNoSuchMethodError: 10399 .word .LstrNoSuchMethodError 10400strNullPointerException: 10401 .word .LstrNullPointerException 10402 10403strLogTag: 10404 .word .LstrLogTag 10405strExceptionNotCaughtLocally: 10406 .word .LstrExceptionNotCaughtLocally 10407 10408strNewline: 10409 .word .LstrNewline 10410strSqueak: 10411 .word .LstrSqueak 10412strPrintHex: 10413 .word .LstrPrintHex 10414strPrintLong: 10415 .word .LstrPrintLong 10416 10417/* 10418 * Zero-terminated ASCII string data. 10419 * 10420 * On ARM we have two choices: do like gcc does, and LDR from a .word 10421 * with the address, or use an ADR pseudo-op to get the address 10422 * directly. ADR saves 4 bytes and an indirection, but it's using a 10423 * PC-relative addressing mode and hence has a limited range, which 10424 * makes it not work well with mergeable string sections. 10425 */ 10426 .section .rodata.str1.4,"aMS",%progbits,1 10427 10428.LstrBadEntryPoint: 10429 .asciz "Bad entry point %d\n" 10430.LstrArithmeticException: 10431 .asciz "Ljava/lang/ArithmeticException;" 10432.LstrArrayIndexException: 10433 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10434.LstrArrayStoreException: 10435 .asciz "Ljava/lang/ArrayStoreException;" 10436.LstrClassCastException: 10437 .asciz "Ljava/lang/ClassCastException;" 10438.LstrDivideByZero: 10439 .asciz "divide by zero" 10440.LstrFilledNewArrayNotImpl: 10441 .asciz "filled-new-array only implemented for objects and 'int'" 10442.LstrInternalError: 10443 .asciz "Ljava/lang/InternalError;" 10444.LstrInstantiationError: 10445 .asciz "Ljava/lang/InstantiationError;" 10446.LstrNegativeArraySizeException: 10447 .asciz "Ljava/lang/NegativeArraySizeException;" 10448.LstrNoSuchMethodError: 10449 .asciz "Ljava/lang/NoSuchMethodError;" 10450.LstrNullPointerException: 10451 .asciz "Ljava/lang/NullPointerException;" 10452 10453.LstrLogTag: 10454 .asciz "mterp" 10455.LstrExceptionNotCaughtLocally: 10456 .asciz "Exception %s from %s:%d not caught locally\n" 10457 10458.LstrNewline: 10459 .asciz "\n" 10460.LstrSqueak: 10461 .asciz "<%d>" 10462.LstrPrintHex: 10463 .asciz "<0x%x>" 10464.LstrPrintLong: 10465 .asciz "<%lld>" 10466 10467 10468