InterpAsm-armv4t.S revision 6e963e1cfbaeac377fed3ba8d5715c1dccfc1a57
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv4t'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_ENABLED(_reg) ldr _reg,[rGLUE,#offGlue_jitEnabled] 189#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204/* 205 * Power of 2 width in bits of the hash table size. 206 * for ex: 9 -> 512, 10-> 1024, etc. 207#define JIT_PROF_TAB_WIDTH 12 208#define JIT_PROF_TAB_LSHIFT (32 - JIT_PROF_TAB_WIDTH) 209#defnie JIT_PROF_TAB_THRESH_RESET 255 210 211/* File: armv5te/platform.S */ 212/* 213 * =========================================================================== 214 * CPU-version-specific defines 215 * =========================================================================== 216 */ 217 218/* 219 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 220 * one-way branch. 221 * 222 * May modify IP. Does not modify LR. 223 */ 224.macro LDR_PC source 225 ldr pc, \source 226.endm 227 228/* 229 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 230 * Jump to subroutine. 231 * 232 * May modify IP and LR. 233 */ 234.macro LDR_PC_LR source 235 mov lr, pc 236 ldr pc, \source 237.endm 238 239/* 240 * Macro for "LDMFD SP!, {...regs...,PC}". 241 * 242 * May modify IP and LR. 243 */ 244.macro LDMFD_PC regs 245 ldmfd sp!, {\regs,pc} 246.endm 247 248 249/* File: armv5te/entry.S */ 250/* 251 * Copyright (C) 2008 The Android Open Source Project 252 * 253 * Licensed under the Apache License, Version 2.0 (the "License"); 254 * you may not use this file except in compliance with the License. 255 * You may obtain a copy of the License at 256 * 257 * http://www.apache.org/licenses/LICENSE-2.0 258 * 259 * Unless required by applicable law or agreed to in writing, software 260 * distributed under the License is distributed on an "AS IS" BASIS, 261 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 262 * See the License for the specific language governing permissions and 263 * limitations under the License. 264 */ 265/* 266 * Interpreter entry point. 267 */ 268 269/* 270 * We don't have formal stack frames, so gdb scans upward in the code 271 * to find the start of the function (a label with the %function type), 272 * and then looks at the next few instructions to figure out what 273 * got pushed onto the stack. From this it figures out how to restore 274 * the registers, including PC, for the previous stack frame. If gdb 275 * sees a non-function label, it stops scanning, so either we need to 276 * have nothing but assembler-local labels between the entry point and 277 * the break, or we need to fake it out. 278 * 279 * When this is defined, we add some stuff to make gdb less confused. 280 */ 281#define ASSIST_DEBUGGER 1 282 283 .text 284 .align 2 285 .global dvmMterpStdRun 286 .type dvmMterpStdRun, %function 287 288/* 289 * On entry: 290 * r0 MterpGlue* glue 291 * 292 * This function returns a boolean "changeInterp" value. The return comes 293 * via a call to dvmMterpStdBail(). 294 */ 295dvmMterpStdRun: 296#define MTERP_ENTRY1 \ 297 .save {r4-r10,fp,lr}; \ 298 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 299#define MTERP_ENTRY2 \ 300 .pad #4; \ 301 sub sp, sp, #4 @ align 64 302 303 .fnstart 304 MTERP_ENTRY1 305 MTERP_ENTRY2 306 307 /* save stack pointer, add magic word for debuggerd */ 308 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 309 310 /* set up "named" registers, figure out entry point */ 311 mov rGLUE, r0 @ set rGLUE 312 ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char 313 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 314 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 315 cmp r1, #kInterpEntryInstr @ usual case? 316 bne .Lnot_instr @ no, handle it 317 318#if defined(WITH_JIT) 319.Lno_singleStep: 320 /* Entry is always a possible trace start */ 321 GET_JIT_PROF_TABLE(r0) 322 FETCH_INST() 323 cmp r0,#0 324 bne common_updateProfile 325 GET_INST_OPCODE(ip) 326 GOTO_OPCODE(ip) 327#else 328 /* start executing the instruction at rPC */ 329 FETCH_INST() @ load rINST from rPC 330 GET_INST_OPCODE(ip) @ extract opcode from rINST 331 GOTO_OPCODE(ip) @ jump to next instruction 332#endif 333 334.Lnot_instr: 335 cmp r1, #kInterpEntryReturn @ were we returning from a method? 336 beq common_returnFromMethod 337 338.Lnot_return: 339 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 340 beq common_exceptionThrown 341 342#if defined(WITH_JIT) 343.Lnot_throw: 344 ldr r0,[rGLUE, #offGlue_jitResume] 345 ldr r2,[rGLUE, #offGlue_jitResumePC] 346 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 347 bne .Lbad_arg 348 cmp rPC,r2 349 bne .Lno_singleStep @ must have branched, don't resume 350 mov r1, #kInterpEntryInstr 351 strb r1, [rGLUE, #offGlue_entryPoint] 352 ldr rINST, .LdvmCompilerTemplate 353 bx r0 @ re-enter the translation 354.LdvmCompilerTemplate: 355 .word dvmCompilerTemplateStart 356#endif 357 358.Lbad_arg: 359 ldr r0, strBadEntryPoint 360 @ r1 holds value of entryPoint 361 bl printf 362 bl dvmAbort 363 .fnend 364 365 366 .global dvmMterpStdBail 367 .type dvmMterpStdBail, %function 368 369/* 370 * Restore the stack pointer and PC from the save point established on entry. 371 * This is essentially the same as a longjmp, but should be cheaper. The 372 * last instruction causes us to return to whoever called dvmMterpStdRun. 373 * 374 * We pushed some registers on the stack in dvmMterpStdRun, then saved 375 * SP and LR. Here we restore SP, restore the registers, and then restore 376 * LR to PC. 377 * 378 * On entry: 379 * r0 MterpGlue* glue 380 * r1 bool changeInterp 381 */ 382dvmMterpStdBail: 383 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 384 mov r0, r1 @ return the changeInterp value 385 add sp, sp, #4 @ un-align 64 386 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 387 388 389/* 390 * String references. 391 */ 392strBadEntryPoint: 393 .word .LstrBadEntryPoint 394 395 396 397 .global dvmAsmInstructionStart 398 .type dvmAsmInstructionStart, %function 399dvmAsmInstructionStart = .L_OP_NOP 400 .text 401 402/* ------------------------------ */ 403 .balign 64 404.L_OP_NOP: /* 0x00 */ 405/* File: armv5te/OP_NOP.S */ 406 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 407 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 408 GOTO_OPCODE(ip) @ execute it 409 410#ifdef ASSIST_DEBUGGER 411 /* insert fake function header to help gdb find the stack frame */ 412 .type dalvik_inst, %function 413dalvik_inst: 414 .fnstart 415 MTERP_ENTRY1 416 MTERP_ENTRY2 417 .fnend 418#endif 419 420 421/* ------------------------------ */ 422 .balign 64 423.L_OP_MOVE: /* 0x01 */ 424/* File: armv5te/OP_MOVE.S */ 425 /* for move, move-object, long-to-int */ 426 /* op vA, vB */ 427 mov r1, rINST, lsr #12 @ r1<- B from 15:12 428 mov r0, rINST, lsr #8 @ r0<- A from 11:8 429 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 430 GET_VREG(r2, r1) @ r2<- fp[B] 431 and r0, r0, #15 432 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 433 SET_VREG(r2, r0) @ fp[A]<- r2 434 GOTO_OPCODE(ip) @ execute next instruction 435 436 437/* ------------------------------ */ 438 .balign 64 439.L_OP_MOVE_FROM16: /* 0x02 */ 440/* File: armv5te/OP_MOVE_FROM16.S */ 441 /* for: move/from16, move-object/from16 */ 442 /* op vAA, vBBBB */ 443 FETCH(r1, 1) @ r1<- BBBB 444 mov r0, rINST, lsr #8 @ r0<- AA 445 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 446 GET_VREG(r2, r1) @ r2<- fp[BBBB] 447 GET_INST_OPCODE(ip) @ extract opcode from rINST 448 SET_VREG(r2, r0) @ fp[AA]<- r2 449 GOTO_OPCODE(ip) @ jump to next instruction 450 451 452/* ------------------------------ */ 453 .balign 64 454.L_OP_MOVE_16: /* 0x03 */ 455/* File: armv5te/OP_MOVE_16.S */ 456 /* for: move/16, move-object/16 */ 457 /* op vAAAA, vBBBB */ 458 FETCH(r1, 2) @ r1<- BBBB 459 FETCH(r0, 1) @ r0<- AAAA 460 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 461 GET_VREG(r2, r1) @ r2<- fp[BBBB] 462 GET_INST_OPCODE(ip) @ extract opcode from rINST 463 SET_VREG(r2, r0) @ fp[AAAA]<- r2 464 GOTO_OPCODE(ip) @ jump to next instruction 465 466 467/* ------------------------------ */ 468 .balign 64 469.L_OP_MOVE_WIDE: /* 0x04 */ 470/* File: armv5te/OP_MOVE_WIDE.S */ 471 /* move-wide vA, vB */ 472 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 473 mov r2, rINST, lsr #8 @ r2<- A(+) 474 mov r3, rINST, lsr #12 @ r3<- B 475 and r2, r2, #15 476 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 477 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 478 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 479 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 480 GET_INST_OPCODE(ip) @ extract opcode from rINST 481 stmia r2, {r0-r1} @ fp[A]<- r0/r1 482 GOTO_OPCODE(ip) @ jump to next instruction 483 484 485/* ------------------------------ */ 486 .balign 64 487.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 488/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 489 /* move-wide/from16 vAA, vBBBB */ 490 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 491 FETCH(r3, 1) @ r3<- BBBB 492 mov r2, rINST, lsr #8 @ r2<- AA 493 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 494 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 495 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 496 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 497 GET_INST_OPCODE(ip) @ extract opcode from rINST 498 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 499 GOTO_OPCODE(ip) @ jump to next instruction 500 501 502/* ------------------------------ */ 503 .balign 64 504.L_OP_MOVE_WIDE_16: /* 0x06 */ 505/* File: armv5te/OP_MOVE_WIDE_16.S */ 506 /* move-wide/16 vAAAA, vBBBB */ 507 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 508 FETCH(r3, 2) @ r3<- BBBB 509 FETCH(r2, 1) @ r2<- AAAA 510 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 511 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 512 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 513 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 514 GET_INST_OPCODE(ip) @ extract opcode from rINST 515 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 516 GOTO_OPCODE(ip) @ jump to next instruction 517 518 519/* ------------------------------ */ 520 .balign 64 521.L_OP_MOVE_OBJECT: /* 0x07 */ 522/* File: armv5te/OP_MOVE_OBJECT.S */ 523/* File: armv5te/OP_MOVE.S */ 524 /* for move, move-object, long-to-int */ 525 /* op vA, vB */ 526 mov r1, rINST, lsr #12 @ r1<- B from 15:12 527 mov r0, rINST, lsr #8 @ r0<- A from 11:8 528 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 529 GET_VREG(r2, r1) @ r2<- fp[B] 530 and r0, r0, #15 531 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 532 SET_VREG(r2, r0) @ fp[A]<- r2 533 GOTO_OPCODE(ip) @ execute next instruction 534 535 536 537/* ------------------------------ */ 538 .balign 64 539.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 540/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 541/* File: armv5te/OP_MOVE_FROM16.S */ 542 /* for: move/from16, move-object/from16 */ 543 /* op vAA, vBBBB */ 544 FETCH(r1, 1) @ r1<- BBBB 545 mov r0, rINST, lsr #8 @ r0<- AA 546 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 547 GET_VREG(r2, r1) @ r2<- fp[BBBB] 548 GET_INST_OPCODE(ip) @ extract opcode from rINST 549 SET_VREG(r2, r0) @ fp[AA]<- r2 550 GOTO_OPCODE(ip) @ jump to next instruction 551 552 553 554/* ------------------------------ */ 555 .balign 64 556.L_OP_MOVE_OBJECT_16: /* 0x09 */ 557/* File: armv5te/OP_MOVE_OBJECT_16.S */ 558/* File: armv5te/OP_MOVE_16.S */ 559 /* for: move/16, move-object/16 */ 560 /* op vAAAA, vBBBB */ 561 FETCH(r1, 2) @ r1<- BBBB 562 FETCH(r0, 1) @ r0<- AAAA 563 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 564 GET_VREG(r2, r1) @ r2<- fp[BBBB] 565 GET_INST_OPCODE(ip) @ extract opcode from rINST 566 SET_VREG(r2, r0) @ fp[AAAA]<- r2 567 GOTO_OPCODE(ip) @ jump to next instruction 568 569 570 571/* ------------------------------ */ 572 .balign 64 573.L_OP_MOVE_RESULT: /* 0x0a */ 574/* File: armv5te/OP_MOVE_RESULT.S */ 575 /* for: move-result, move-result-object */ 576 /* op vAA */ 577 mov r2, rINST, lsr #8 @ r2<- AA 578 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 579 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 580 GET_INST_OPCODE(ip) @ extract opcode from rINST 581 SET_VREG(r0, r2) @ fp[AA]<- r0 582 GOTO_OPCODE(ip) @ jump to next instruction 583 584 585/* ------------------------------ */ 586 .balign 64 587.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 588/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 589 /* move-result-wide vAA */ 590 mov r2, rINST, lsr #8 @ r2<- AA 591 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 592 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 593 ldmia r3, {r0-r1} @ r0/r1<- retval.j 594 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 595 GET_INST_OPCODE(ip) @ extract opcode from rINST 596 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 597 GOTO_OPCODE(ip) @ jump to next instruction 598 599 600/* ------------------------------ */ 601 .balign 64 602.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 603/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 604/* File: armv5te/OP_MOVE_RESULT.S */ 605 /* for: move-result, move-result-object */ 606 /* op vAA */ 607 mov r2, rINST, lsr #8 @ r2<- AA 608 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 609 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 610 GET_INST_OPCODE(ip) @ extract opcode from rINST 611 SET_VREG(r0, r2) @ fp[AA]<- r0 612 GOTO_OPCODE(ip) @ jump to next instruction 613 614 615 616/* ------------------------------ */ 617 .balign 64 618.L_OP_MOVE_EXCEPTION: /* 0x0d */ 619/* File: armv5te/OP_MOVE_EXCEPTION.S */ 620 /* move-exception vAA */ 621 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 622 mov r2, rINST, lsr #8 @ r2<- AA 623 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 624 mov r1, #0 @ r1<- 0 625 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 626 SET_VREG(r3, r2) @ fp[AA]<- exception obj 627 GET_INST_OPCODE(ip) @ extract opcode from rINST 628 str r1, [r0, #offThread_exception] @ dvmClearException bypass 629 GOTO_OPCODE(ip) @ jump to next instruction 630 631 632/* ------------------------------ */ 633 .balign 64 634.L_OP_RETURN_VOID: /* 0x0e */ 635/* File: armv5te/OP_RETURN_VOID.S */ 636 b common_returnFromMethod 637 638 639/* ------------------------------ */ 640 .balign 64 641.L_OP_RETURN: /* 0x0f */ 642/* File: armv5te/OP_RETURN.S */ 643 /* 644 * Return a 32-bit value. Copies the return value into the "glue" 645 * structure, then jumps to the return handler. 646 * 647 * for: return, return-object 648 */ 649 /* op vAA */ 650 mov r2, rINST, lsr #8 @ r2<- AA 651 GET_VREG(r0, r2) @ r0<- vAA 652 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 653 b common_returnFromMethod 654 655 656/* ------------------------------ */ 657 .balign 64 658.L_OP_RETURN_WIDE: /* 0x10 */ 659/* File: armv5te/OP_RETURN_WIDE.S */ 660 /* 661 * Return a 64-bit value. Copies the return value into the "glue" 662 * structure, then jumps to the return handler. 663 */ 664 /* return-wide vAA */ 665 mov r2, rINST, lsr #8 @ r2<- AA 666 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 667 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 668 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 669 stmia r3, {r0-r1} @ retval<- r0/r1 670 b common_returnFromMethod 671 672 673/* ------------------------------ */ 674 .balign 64 675.L_OP_RETURN_OBJECT: /* 0x11 */ 676/* File: armv5te/OP_RETURN_OBJECT.S */ 677/* File: armv5te/OP_RETURN.S */ 678 /* 679 * Return a 32-bit value. Copies the return value into the "glue" 680 * structure, then jumps to the return handler. 681 * 682 * for: return, return-object 683 */ 684 /* op vAA */ 685 mov r2, rINST, lsr #8 @ r2<- AA 686 GET_VREG(r0, r2) @ r0<- vAA 687 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 688 b common_returnFromMethod 689 690 691 692/* ------------------------------ */ 693 .balign 64 694.L_OP_CONST_4: /* 0x12 */ 695/* File: armv5te/OP_CONST_4.S */ 696 /* const/4 vA, #+B */ 697 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 698 mov r0, rINST, lsr #8 @ r0<- A+ 699 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 700 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 701 and r0, r0, #15 702 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 703 SET_VREG(r1, r0) @ fp[A]<- r1 704 GOTO_OPCODE(ip) @ execute next instruction 705 706 707/* ------------------------------ */ 708 .balign 64 709.L_OP_CONST_16: /* 0x13 */ 710/* File: armv5te/OP_CONST_16.S */ 711 /* const/16 vAA, #+BBBB */ 712 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 713 mov r3, rINST, lsr #8 @ r3<- AA 714 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 715 SET_VREG(r0, r3) @ vAA<- r0 716 GET_INST_OPCODE(ip) @ extract opcode from rINST 717 GOTO_OPCODE(ip) @ jump to next instruction 718 719 720/* ------------------------------ */ 721 .balign 64 722.L_OP_CONST: /* 0x14 */ 723/* File: armv5te/OP_CONST.S */ 724 /* const vAA, #+BBBBbbbb */ 725 mov r3, rINST, lsr #8 @ r3<- AA 726 FETCH(r0, 1) @ r0<- bbbb (low) 727 FETCH(r1, 2) @ r1<- BBBB (high) 728 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 729 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 730 GET_INST_OPCODE(ip) @ extract opcode from rINST 731 SET_VREG(r0, r3) @ vAA<- r0 732 GOTO_OPCODE(ip) @ jump to next instruction 733 734 735/* ------------------------------ */ 736 .balign 64 737.L_OP_CONST_HIGH16: /* 0x15 */ 738/* File: armv5te/OP_CONST_HIGH16.S */ 739 /* const/high16 vAA, #+BBBB0000 */ 740 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 741 mov r3, rINST, lsr #8 @ r3<- AA 742 mov r0, r0, lsl #16 @ r0<- BBBB0000 743 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 744 SET_VREG(r0, r3) @ vAA<- r0 745 GET_INST_OPCODE(ip) @ extract opcode from rINST 746 GOTO_OPCODE(ip) @ jump to next instruction 747 748 749/* ------------------------------ */ 750 .balign 64 751.L_OP_CONST_WIDE_16: /* 0x16 */ 752/* File: armv5te/OP_CONST_WIDE_16.S */ 753 /* const-wide/16 vAA, #+BBBB */ 754 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 755 mov r3, rINST, lsr #8 @ r3<- AA 756 mov r1, r0, asr #31 @ r1<- ssssssss 757 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 758 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 759 GET_INST_OPCODE(ip) @ extract opcode from rINST 760 stmia r3, {r0-r1} @ vAA<- r0/r1 761 GOTO_OPCODE(ip) @ jump to next instruction 762 763 764/* ------------------------------ */ 765 .balign 64 766.L_OP_CONST_WIDE_32: /* 0x17 */ 767/* File: armv5te/OP_CONST_WIDE_32.S */ 768 /* const-wide/32 vAA, #+BBBBbbbb */ 769 FETCH(r0, 1) @ r0<- 0000bbbb (low) 770 mov r3, rINST, lsr #8 @ r3<- AA 771 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 772 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 773 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 774 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 775 mov r1, r0, asr #31 @ r1<- ssssssss 776 GET_INST_OPCODE(ip) @ extract opcode from rINST 777 stmia r3, {r0-r1} @ vAA<- r0/r1 778 GOTO_OPCODE(ip) @ jump to next instruction 779 780 781/* ------------------------------ */ 782 .balign 64 783.L_OP_CONST_WIDE: /* 0x18 */ 784/* File: armv5te/OP_CONST_WIDE.S */ 785 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 786 FETCH(r0, 1) @ r0<- bbbb (low) 787 FETCH(r1, 2) @ r1<- BBBB (low middle) 788 FETCH(r2, 3) @ r2<- hhhh (high middle) 789 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 790 FETCH(r3, 4) @ r3<- HHHH (high) 791 mov r9, rINST, lsr #8 @ r9<- AA 792 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 793 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 794 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 795 GET_INST_OPCODE(ip) @ extract opcode from rINST 796 stmia r9, {r0-r1} @ vAA<- r0/r1 797 GOTO_OPCODE(ip) @ jump to next instruction 798 799 800/* ------------------------------ */ 801 .balign 64 802.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 803/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 804 /* const-wide/high16 vAA, #+BBBB000000000000 */ 805 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 806 mov r3, rINST, lsr #8 @ r3<- AA 807 mov r0, #0 @ r0<- 00000000 808 mov r1, r1, lsl #16 @ r1<- BBBB0000 809 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 810 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 811 GET_INST_OPCODE(ip) @ extract opcode from rINST 812 stmia r3, {r0-r1} @ vAA<- r0/r1 813 GOTO_OPCODE(ip) @ jump to next instruction 814 815 816/* ------------------------------ */ 817 .balign 64 818.L_OP_CONST_STRING: /* 0x1a */ 819/* File: armv5te/OP_CONST_STRING.S */ 820 /* const/string vAA, String@BBBB */ 821 FETCH(r1, 1) @ r1<- BBBB 822 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 823 mov r9, rINST, lsr #8 @ r9<- AA 824 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 825 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 826 cmp r0, #0 @ not yet resolved? 827 beq .LOP_CONST_STRING_resolve 828 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 829 GET_INST_OPCODE(ip) @ extract opcode from rINST 830 SET_VREG(r0, r9) @ vAA<- r0 831 GOTO_OPCODE(ip) @ jump to next instruction 832 833/* ------------------------------ */ 834 .balign 64 835.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 836/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 837 /* const/string vAA, String@BBBBBBBB */ 838 FETCH(r0, 1) @ r0<- bbbb (low) 839 FETCH(r1, 2) @ r1<- BBBB (high) 840 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 841 mov r9, rINST, lsr #8 @ r9<- AA 842 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 843 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 844 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 845 cmp r0, #0 846 beq .LOP_CONST_STRING_JUMBO_resolve 847 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 848 GET_INST_OPCODE(ip) @ extract opcode from rINST 849 SET_VREG(r0, r9) @ vAA<- r0 850 GOTO_OPCODE(ip) @ jump to next instruction 851 852/* ------------------------------ */ 853 .balign 64 854.L_OP_CONST_CLASS: /* 0x1c */ 855/* File: armv5te/OP_CONST_CLASS.S */ 856 /* const/class vAA, Class@BBBB */ 857 FETCH(r1, 1) @ r1<- BBBB 858 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 859 mov r9, rINST, lsr #8 @ r9<- AA 860 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 861 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 862 cmp r0, #0 @ not yet resolved? 863 beq .LOP_CONST_CLASS_resolve 864 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 865 GET_INST_OPCODE(ip) @ extract opcode from rINST 866 SET_VREG(r0, r9) @ vAA<- r0 867 GOTO_OPCODE(ip) @ jump to next instruction 868 869/* ------------------------------ */ 870 .balign 64 871.L_OP_MONITOR_ENTER: /* 0x1d */ 872/* File: armv5te/OP_MONITOR_ENTER.S */ 873 /* 874 * Synchronize on an object. 875 */ 876 /* monitor-enter vAA */ 877 mov r2, rINST, lsr #8 @ r2<- AA 878 GET_VREG(r1, r2) @ r1<- vAA (object) 879 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 880 cmp r1, #0 @ null object? 881 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 882 beq common_errNullObject @ null object, throw an exception 883 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 884 bl dvmLockObject @ call(self, obj) 885#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 886 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 887 ldr r1, [r0, #offThread_exception] @ check for exception 888 cmp r1, #0 889 bne common_exceptionThrown @ exception raised, bail out 890#endif 891 GET_INST_OPCODE(ip) @ extract opcode from rINST 892 GOTO_OPCODE(ip) @ jump to next instruction 893 894 895/* ------------------------------ */ 896 .balign 64 897.L_OP_MONITOR_EXIT: /* 0x1e */ 898/* File: armv5te/OP_MONITOR_EXIT.S */ 899 /* 900 * Unlock an object. 901 * 902 * Exceptions that occur when unlocking a monitor need to appear as 903 * if they happened at the following instruction. See the Dalvik 904 * instruction spec. 905 */ 906 /* monitor-exit vAA */ 907 mov r2, rINST, lsr #8 @ r2<- AA 908 EXPORT_PC() @ before fetch: export the PC 909 GET_VREG(r1, r2) @ r1<- vAA (object) 910 cmp r1, #0 @ null object? 911 beq common_errNullObject @ yes 912 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 913 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 914 cmp r0, #0 @ failed? 915 beq common_exceptionThrown @ yes, exception is pending 916 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 917 GET_INST_OPCODE(ip) @ extract opcode from rINST 918 GOTO_OPCODE(ip) @ jump to next instruction 919 920 921/* ------------------------------ */ 922 .balign 64 923.L_OP_CHECK_CAST: /* 0x1f */ 924/* File: armv5te/OP_CHECK_CAST.S */ 925 /* 926 * Check to see if a cast from one class to another is allowed. 927 */ 928 /* check-cast vAA, class@BBBB */ 929 mov r3, rINST, lsr #8 @ r3<- AA 930 FETCH(r2, 1) @ r2<- BBBB 931 GET_VREG(r9, r3) @ r9<- object 932 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 933 cmp r9, #0 @ is object null? 934 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 935 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 936 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 937 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 938 cmp r1, #0 @ have we resolved this before? 939 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 940.LOP_CHECK_CAST_resolved: 941 cmp r0, r1 @ same class (trivial success)? 942 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 943.LOP_CHECK_CAST_okay: 944 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 945 GET_INST_OPCODE(ip) @ extract opcode from rINST 946 GOTO_OPCODE(ip) @ jump to next instruction 947 948/* ------------------------------ */ 949 .balign 64 950.L_OP_INSTANCE_OF: /* 0x20 */ 951/* File: armv5te/OP_INSTANCE_OF.S */ 952 /* 953 * Check to see if an object reference is an instance of a class. 954 * 955 * Most common situation is a non-null object, being compared against 956 * an already-resolved class. 957 */ 958 /* instance-of vA, vB, class@CCCC */ 959 mov r3, rINST, lsr #12 @ r3<- B 960 mov r9, rINST, lsr #8 @ r9<- A+ 961 GET_VREG(r0, r3) @ r0<- vB (object) 962 and r9, r9, #15 @ r9<- A 963 cmp r0, #0 @ is object null? 964 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 965 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 966 FETCH(r3, 1) @ r3<- CCCC 967 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 968 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 969 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 970 cmp r1, #0 @ have we resolved this before? 971 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 972.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 973 cmp r0, r1 @ same class (trivial success)? 974 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 975 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 976 977/* ------------------------------ */ 978 .balign 64 979.L_OP_ARRAY_LENGTH: /* 0x21 */ 980/* File: armv5te/OP_ARRAY_LENGTH.S */ 981 /* 982 * Return the length of an array. 983 */ 984 mov r1, rINST, lsr #12 @ r1<- B 985 mov r2, rINST, lsr #8 @ r2<- A+ 986 GET_VREG(r0, r1) @ r0<- vB (object ref) 987 and r2, r2, #15 @ r2<- A 988 cmp r0, #0 @ is object null? 989 beq common_errNullObject @ yup, fail 990 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 991 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 992 GET_INST_OPCODE(ip) @ extract opcode from rINST 993 SET_VREG(r3, r2) @ vB<- length 994 GOTO_OPCODE(ip) @ jump to next instruction 995 996 997/* ------------------------------ */ 998 .balign 64 999.L_OP_NEW_INSTANCE: /* 0x22 */ 1000/* File: armv5te/OP_NEW_INSTANCE.S */ 1001 /* 1002 * Create a new instance of a class. 1003 */ 1004 /* new-instance vAA, class@BBBB */ 1005 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1006 FETCH(r1, 1) @ r1<- BBBB 1007 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1008 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1009 EXPORT_PC() @ req'd for init, resolve, alloc 1010 cmp r0, #0 @ already resolved? 1011 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1012.LOP_NEW_INSTANCE_resolved: @ r0=class 1013 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1014 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1015 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1016.LOP_NEW_INSTANCE_initialized: @ r0=class 1017 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1018 bl dvmAllocObject @ r0<- new object 1019 b .LOP_NEW_INSTANCE_finish @ continue 1020 1021/* ------------------------------ */ 1022 .balign 64 1023.L_OP_NEW_ARRAY: /* 0x23 */ 1024/* File: armv5te/OP_NEW_ARRAY.S */ 1025 /* 1026 * Allocate an array of objects, specified with the array class 1027 * and a count. 1028 * 1029 * The verifier guarantees that this is an array class, so we don't 1030 * check for it here. 1031 */ 1032 /* new-array vA, vB, class@CCCC */ 1033 mov r0, rINST, lsr #12 @ r0<- B 1034 FETCH(r2, 1) @ r2<- CCCC 1035 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1036 GET_VREG(r1, r0) @ r1<- vB (array length) 1037 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1038 cmp r1, #0 @ check length 1039 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1040 bmi common_errNegativeArraySize @ negative length, bail 1041 cmp r0, #0 @ already resolved? 1042 EXPORT_PC() @ req'd for resolve, alloc 1043 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1044 b .LOP_NEW_ARRAY_resolve @ do resolve now 1045 1046/* ------------------------------ */ 1047 .balign 64 1048.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1049/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1050 /* 1051 * Create a new array with elements filled from registers. 1052 * 1053 * for: filled-new-array, filled-new-array/range 1054 */ 1055 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1056 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1057 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1058 FETCH(r1, 1) @ r1<- BBBB 1059 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1060 EXPORT_PC() @ need for resolve and alloc 1061 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1062 mov r10, rINST, lsr #8 @ r10<- AA or BA 1063 cmp r0, #0 @ already resolved? 1064 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10658: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1066 mov r2, #0 @ r2<- false 1067 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1068 bl dvmResolveClass @ r0<- call(clazz, ref) 1069 cmp r0, #0 @ got null? 1070 beq common_exceptionThrown @ yes, handle exception 1071 b .LOP_FILLED_NEW_ARRAY_continue 1072 1073/* ------------------------------ */ 1074 .balign 64 1075.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1076/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1077/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1078 /* 1079 * Create a new array with elements filled from registers. 1080 * 1081 * for: filled-new-array, filled-new-array/range 1082 */ 1083 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1084 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1085 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1086 FETCH(r1, 1) @ r1<- BBBB 1087 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1088 EXPORT_PC() @ need for resolve and alloc 1089 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1090 mov r10, rINST, lsr #8 @ r10<- AA or BA 1091 cmp r0, #0 @ already resolved? 1092 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10938: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1094 mov r2, #0 @ r2<- false 1095 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1096 bl dvmResolveClass @ r0<- call(clazz, ref) 1097 cmp r0, #0 @ got null? 1098 beq common_exceptionThrown @ yes, handle exception 1099 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1100 1101 1102/* ------------------------------ */ 1103 .balign 64 1104.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1105/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1106 /* fill-array-data vAA, +BBBBBBBB */ 1107 FETCH(r0, 1) @ r0<- bbbb (lo) 1108 FETCH(r1, 2) @ r1<- BBBB (hi) 1109 mov r3, rINST, lsr #8 @ r3<- AA 1110 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1111 GET_VREG(r0, r3) @ r0<- vAA (array object) 1112 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1113 EXPORT_PC(); 1114 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1115 cmp r0, #0 @ 0 means an exception is thrown 1116 beq common_exceptionThrown @ has exception 1117 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1118 GET_INST_OPCODE(ip) @ extract opcode from rINST 1119 GOTO_OPCODE(ip) @ jump to next instruction 1120 1121/* ------------------------------ */ 1122 .balign 64 1123.L_OP_THROW: /* 0x27 */ 1124/* File: armv5te/OP_THROW.S */ 1125 /* 1126 * Throw an exception object in the current thread. 1127 */ 1128 /* throw vAA */ 1129 mov r2, rINST, lsr #8 @ r2<- AA 1130 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1131 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1132 cmp r1, #0 @ null object? 1133 beq common_errNullObject @ yes, throw an NPE instead 1134 @ bypass dvmSetException, just store it 1135 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1136 b common_exceptionThrown 1137 1138 1139/* ------------------------------ */ 1140 .balign 64 1141.L_OP_GOTO: /* 0x28 */ 1142/* File: armv5te/OP_GOTO.S */ 1143 /* 1144 * Unconditional branch, 8-bit offset. 1145 * 1146 * The branch distance is a signed code-unit offset, which we need to 1147 * double to get a byte offset. 1148 */ 1149 /* goto +AA */ 1150 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1151 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1152 mov r9, r9, lsl #1 @ r9<- byte offset 1153 bmi common_backwardBranch @ backward branch, do periodic checks 1154#if defined(WITH_JIT) 1155 GET_JIT_PROF_TABLE(r0) 1156 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1157 cmp r0,#0 1158 bne common_updateProfile 1159 GET_INST_OPCODE(ip) @ extract opcode from rINST 1160 GOTO_OPCODE(ip) @ jump to next instruction 1161#else 1162 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1163 GET_INST_OPCODE(ip) @ extract opcode from rINST 1164 GOTO_OPCODE(ip) @ jump to next instruction 1165#endif 1166 1167/* ------------------------------ */ 1168 .balign 64 1169.L_OP_GOTO_16: /* 0x29 */ 1170/* File: armv5te/OP_GOTO_16.S */ 1171 /* 1172 * Unconditional branch, 16-bit offset. 1173 * 1174 * The branch distance is a signed code-unit offset, which we need to 1175 * double to get a byte offset. 1176 */ 1177 /* goto/16 +AAAA */ 1178 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1179 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1180 bmi common_backwardBranch @ backward branch, do periodic checks 1181#if defined(WITH_JIT) 1182 GET_JIT_PROF_TABLE(r0) 1183 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1184 cmp r0,#0 1185 bne common_updateProfile 1186 GET_INST_OPCODE(ip) @ extract opcode from rINST 1187 GOTO_OPCODE(ip) @ jump to next instruction 1188#else 1189 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1190 GET_INST_OPCODE(ip) @ extract opcode from rINST 1191 GOTO_OPCODE(ip) @ jump to next instruction 1192#endif 1193 1194 1195/* ------------------------------ */ 1196 .balign 64 1197.L_OP_GOTO_32: /* 0x2a */ 1198/* File: armv5te/OP_GOTO_32.S */ 1199 /* 1200 * Unconditional branch, 32-bit offset. 1201 * 1202 * The branch distance is a signed code-unit offset, which we need to 1203 * double to get a byte offset. 1204 * 1205 * Unlike most opcodes, this one is allowed to branch to itself, so 1206 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1207 * instruction doesn't affect the V flag, so we need to clear it 1208 * explicitly. 1209 */ 1210 /* goto/32 +AAAAAAAA */ 1211 FETCH(r0, 1) @ r0<- aaaa (lo) 1212 FETCH(r1, 2) @ r1<- AAAA (hi) 1213 cmp ip, ip @ (clear V flag during stall) 1214 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1215 mov r9, r0, asl #1 @ r9<- byte offset 1216 ble common_backwardBranch @ backward branch, do periodic checks 1217#if defined(WITH_JIT) 1218 GET_JIT_PROF_TABLE(r0) 1219 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1220 cmp r0,#0 1221 bne common_updateProfile 1222 GET_INST_OPCODE(ip) @ extract opcode from rINST 1223 GOTO_OPCODE(ip) @ jump to next instruction 1224#else 1225 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1226 GET_INST_OPCODE(ip) @ extract opcode from rINST 1227 GOTO_OPCODE(ip) @ jump to next instruction 1228#endif 1229 1230/* ------------------------------ */ 1231 .balign 64 1232.L_OP_PACKED_SWITCH: /* 0x2b */ 1233/* File: armv5te/OP_PACKED_SWITCH.S */ 1234 /* 1235 * Handle a packed-switch or sparse-switch instruction. In both cases 1236 * we decode it and hand it off to a helper function. 1237 * 1238 * We don't really expect backward branches in a switch statement, but 1239 * they're perfectly legal, so we check for them here. 1240 * 1241 * for: packed-switch, sparse-switch 1242 */ 1243 /* op vAA, +BBBB */ 1244 FETCH(r0, 1) @ r0<- bbbb (lo) 1245 FETCH(r1, 2) @ r1<- BBBB (hi) 1246 mov r3, rINST, lsr #8 @ r3<- AA 1247 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1248 GET_VREG(r1, r3) @ r1<- vAA 1249 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1250 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1251 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1252 bmi common_backwardBranch @ backward branch, do periodic checks 1253 beq common_backwardBranch @ (want to use BLE but V is unknown) 1254#if defined(WITH_JIT) 1255 GET_JIT_PROF_TABLE(r0) 1256 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1257 cmp r0,#0 1258 bne common_updateProfile 1259 GET_INST_OPCODE(ip) @ extract opcode from rINST 1260 GOTO_OPCODE(ip) @ jump to next instruction 1261#else 1262 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1263 GET_INST_OPCODE(ip) @ extract opcode from rINST 1264 GOTO_OPCODE(ip) @ jump to next instruction 1265#endif 1266 1267 1268/* ------------------------------ */ 1269 .balign 64 1270.L_OP_SPARSE_SWITCH: /* 0x2c */ 1271/* File: armv5te/OP_SPARSE_SWITCH.S */ 1272/* File: armv5te/OP_PACKED_SWITCH.S */ 1273 /* 1274 * Handle a packed-switch or sparse-switch instruction. In both cases 1275 * we decode it and hand it off to a helper function. 1276 * 1277 * We don't really expect backward branches in a switch statement, but 1278 * they're perfectly legal, so we check for them here. 1279 * 1280 * for: packed-switch, sparse-switch 1281 */ 1282 /* op vAA, +BBBB */ 1283 FETCH(r0, 1) @ r0<- bbbb (lo) 1284 FETCH(r1, 2) @ r1<- BBBB (hi) 1285 mov r3, rINST, lsr #8 @ r3<- AA 1286 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1287 GET_VREG(r1, r3) @ r1<- vAA 1288 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1289 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1290 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1291 bmi common_backwardBranch @ backward branch, do periodic checks 1292 beq common_backwardBranch @ (want to use BLE but V is unknown) 1293#if defined(WITH_JIT) 1294 GET_JIT_PROF_TABLE(r0) 1295 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1296 cmp r0,#0 1297 bne common_updateProfile 1298 GET_INST_OPCODE(ip) @ extract opcode from rINST 1299 GOTO_OPCODE(ip) @ jump to next instruction 1300#else 1301 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1302 GET_INST_OPCODE(ip) @ extract opcode from rINST 1303 GOTO_OPCODE(ip) @ jump to next instruction 1304#endif 1305 1306 1307 1308/* ------------------------------ */ 1309 .balign 64 1310.L_OP_CMPL_FLOAT: /* 0x2d */ 1311/* File: armv5te/OP_CMPL_FLOAT.S */ 1312 /* 1313 * Compare two floating-point values. Puts 0, 1, or -1 into the 1314 * destination register based on the results of the comparison. 1315 * 1316 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1317 * on what value we'd like to return when one of the operands is NaN. 1318 * 1319 * The operation we're implementing is: 1320 * if (x == y) 1321 * return 0; 1322 * else if (x < y) 1323 * return -1; 1324 * else if (x > y) 1325 * return 1; 1326 * else 1327 * return {-1,1}; // one or both operands was NaN 1328 * 1329 * The straightforward implementation requires 3 calls to functions 1330 * that return a result in r0. We can do it with two calls if our 1331 * EABI library supports __aeabi_cfcmple (only one if we want to check 1332 * for NaN directly): 1333 * check x <= y 1334 * if <, return -1 1335 * if ==, return 0 1336 * check y <= x 1337 * if <, return 1 1338 * return {-1,1} 1339 * 1340 * for: cmpl-float, cmpg-float 1341 */ 1342 /* op vAA, vBB, vCC */ 1343 FETCH(r0, 1) @ r0<- CCBB 1344 and r2, r0, #255 @ r2<- BB 1345 mov r3, r0, lsr #8 @ r3<- CC 1346 GET_VREG(r9, r2) @ r9<- vBB 1347 GET_VREG(r10, r3) @ r10<- vCC 1348 mov r0, r9 @ copy to arg registers 1349 mov r1, r10 1350 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1351 bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1352 mvncc r1, #0 @ (less than) r1<- -1 1353 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1354.LOP_CMPL_FLOAT_finish: 1355 mov r3, rINST, lsr #8 @ r3<- AA 1356 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1357 SET_VREG(r1, r3) @ vAA<- r1 1358 GET_INST_OPCODE(ip) @ extract opcode from rINST 1359 GOTO_OPCODE(ip) @ jump to next instruction 1360 1361/* ------------------------------ */ 1362 .balign 64 1363.L_OP_CMPG_FLOAT: /* 0x2e */ 1364/* File: armv5te/OP_CMPG_FLOAT.S */ 1365/* File: armv5te/OP_CMPL_FLOAT.S */ 1366 /* 1367 * Compare two floating-point values. Puts 0, 1, or -1 into the 1368 * destination register based on the results of the comparison. 1369 * 1370 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1371 * on what value we'd like to return when one of the operands is NaN. 1372 * 1373 * The operation we're implementing is: 1374 * if (x == y) 1375 * return 0; 1376 * else if (x < y) 1377 * return -1; 1378 * else if (x > y) 1379 * return 1; 1380 * else 1381 * return {-1,1}; // one or both operands was NaN 1382 * 1383 * The straightforward implementation requires 3 calls to functions 1384 * that return a result in r0. We can do it with two calls if our 1385 * EABI library supports __aeabi_cfcmple (only one if we want to check 1386 * for NaN directly): 1387 * check x <= y 1388 * if <, return -1 1389 * if ==, return 0 1390 * check y <= x 1391 * if <, return 1 1392 * return {-1,1} 1393 * 1394 * for: cmpl-float, cmpg-float 1395 */ 1396 /* op vAA, vBB, vCC */ 1397 FETCH(r0, 1) @ r0<- CCBB 1398 and r2, r0, #255 @ r2<- BB 1399 mov r3, r0, lsr #8 @ r3<- CC 1400 GET_VREG(r9, r2) @ r9<- vBB 1401 GET_VREG(r10, r3) @ r10<- vCC 1402 mov r0, r9 @ copy to arg registers 1403 mov r1, r10 1404 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1405 bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1406 mvncc r1, #0 @ (less than) r1<- -1 1407 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1408.LOP_CMPG_FLOAT_finish: 1409 mov r3, rINST, lsr #8 @ r3<- AA 1410 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1411 SET_VREG(r1, r3) @ vAA<- r1 1412 GET_INST_OPCODE(ip) @ extract opcode from rINST 1413 GOTO_OPCODE(ip) @ jump to next instruction 1414 1415 1416/* ------------------------------ */ 1417 .balign 64 1418.L_OP_CMPL_DOUBLE: /* 0x2f */ 1419/* File: armv5te/OP_CMPL_DOUBLE.S */ 1420 /* 1421 * Compare two floating-point values. Puts 0, 1, or -1 into the 1422 * destination register based on the results of the comparison. 1423 * 1424 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1425 * on what value we'd like to return when one of the operands is NaN. 1426 * 1427 * See OP_CMPL_FLOAT for an explanation. 1428 * 1429 * For: cmpl-double, cmpg-double 1430 */ 1431 /* op vAA, vBB, vCC */ 1432 FETCH(r0, 1) @ r0<- CCBB 1433 and r9, r0, #255 @ r9<- BB 1434 mov r10, r0, lsr #8 @ r10<- CC 1435 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1436 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1437 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1438 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1439 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1440 bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1441 mvncc r1, #0 @ (less than) r1<- -1 1442 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1443.LOP_CMPL_DOUBLE_finish: 1444 mov r3, rINST, lsr #8 @ r3<- AA 1445 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1446 SET_VREG(r1, r3) @ vAA<- r1 1447 GET_INST_OPCODE(ip) @ extract opcode from rINST 1448 GOTO_OPCODE(ip) @ jump to next instruction 1449 1450/* ------------------------------ */ 1451 .balign 64 1452.L_OP_CMPG_DOUBLE: /* 0x30 */ 1453/* File: armv5te/OP_CMPG_DOUBLE.S */ 1454/* File: armv5te/OP_CMPL_DOUBLE.S */ 1455 /* 1456 * Compare two floating-point values. Puts 0, 1, or -1 into the 1457 * destination register based on the results of the comparison. 1458 * 1459 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1460 * on what value we'd like to return when one of the operands is NaN. 1461 * 1462 * See OP_CMPL_FLOAT for an explanation. 1463 * 1464 * For: cmpl-double, cmpg-double 1465 */ 1466 /* op vAA, vBB, vCC */ 1467 FETCH(r0, 1) @ r0<- CCBB 1468 and r9, r0, #255 @ r9<- BB 1469 mov r10, r0, lsr #8 @ r10<- CC 1470 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1471 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1472 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1473 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1474 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1475 bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1476 mvncc r1, #0 @ (less than) r1<- -1 1477 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1478.LOP_CMPG_DOUBLE_finish: 1479 mov r3, rINST, lsr #8 @ r3<- AA 1480 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1481 SET_VREG(r1, r3) @ vAA<- r1 1482 GET_INST_OPCODE(ip) @ extract opcode from rINST 1483 GOTO_OPCODE(ip) @ jump to next instruction 1484 1485 1486/* ------------------------------ */ 1487 .balign 64 1488.L_OP_CMP_LONG: /* 0x31 */ 1489/* File: armv5te/OP_CMP_LONG.S */ 1490 /* 1491 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1492 * register based on the results of the comparison. 1493 * 1494 * We load the full values with LDM, but in practice many values could 1495 * be resolved by only looking at the high word. This could be made 1496 * faster or slower by splitting the LDM into a pair of LDRs. 1497 * 1498 * If we just wanted to set condition flags, we could do this: 1499 * subs ip, r0, r2 1500 * sbcs ip, r1, r3 1501 * subeqs ip, r0, r2 1502 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1503 * integer value, which we can do with 2 conditional mov/mvn instructions 1504 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1505 * us a constant 5-cycle path plus a branch at the end to the 1506 * instruction epilogue code. The multi-compare approach below needs 1507 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1508 * in the worst case (the 64-bit values are equal). 1509 */ 1510 /* cmp-long vAA, vBB, vCC */ 1511 FETCH(r0, 1) @ r0<- CCBB 1512 mov r9, rINST, lsr #8 @ r9<- AA 1513 and r2, r0, #255 @ r2<- BB 1514 mov r3, r0, lsr #8 @ r3<- CC 1515 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1516 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1517 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1518 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1519 cmp r1, r3 @ compare (vBB+1, vCC+1) 1520 blt .LOP_CMP_LONG_less @ signed compare on high part 1521 bgt .LOP_CMP_LONG_greater 1522 subs r1, r0, r2 @ r1<- r0 - r2 1523 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1524 bne .LOP_CMP_LONG_less 1525 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1526 1527/* ------------------------------ */ 1528 .balign 64 1529.L_OP_IF_EQ: /* 0x32 */ 1530/* File: armv5te/OP_IF_EQ.S */ 1531/* File: armv5te/bincmp.S */ 1532 /* 1533 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1534 * fragment that specifies the *reverse* comparison to perform, e.g. 1535 * for "if-le" you would use "gt". 1536 * 1537 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1538 */ 1539 /* if-cmp vA, vB, +CCCC */ 1540 mov r0, rINST, lsr #8 @ r0<- A+ 1541 mov r1, rINST, lsr #12 @ r1<- B 1542 and r0, r0, #15 1543 GET_VREG(r3, r1) @ r3<- vB 1544 GET_VREG(r2, r0) @ r2<- vA 1545 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1546 cmp r2, r3 @ compare (vA, vB) 1547 bne 1f @ branch to 1 if comparison failed 1548 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1549 movs r9, r9, asl #1 @ convert to bytes, check sign 1550 bmi common_backwardBranch @ yes, do periodic checks 15511: 1552#if defined(WITH_JIT) 1553 GET_JIT_PROF_TABLE(r0) 1554 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1555 b common_testUpdateProfile 1556#else 1557 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1558 GET_INST_OPCODE(ip) @ extract opcode from rINST 1559 GOTO_OPCODE(ip) @ jump to next instruction 1560#endif 1561 1562 1563 1564/* ------------------------------ */ 1565 .balign 64 1566.L_OP_IF_NE: /* 0x33 */ 1567/* File: armv5te/OP_IF_NE.S */ 1568/* File: armv5te/bincmp.S */ 1569 /* 1570 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1571 * fragment that specifies the *reverse* comparison to perform, e.g. 1572 * for "if-le" you would use "gt". 1573 * 1574 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1575 */ 1576 /* if-cmp vA, vB, +CCCC */ 1577 mov r0, rINST, lsr #8 @ r0<- A+ 1578 mov r1, rINST, lsr #12 @ r1<- B 1579 and r0, r0, #15 1580 GET_VREG(r3, r1) @ r3<- vB 1581 GET_VREG(r2, r0) @ r2<- vA 1582 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1583 cmp r2, r3 @ compare (vA, vB) 1584 beq 1f @ branch to 1 if comparison failed 1585 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1586 movs r9, r9, asl #1 @ convert to bytes, check sign 1587 bmi common_backwardBranch @ yes, do periodic checks 15881: 1589#if defined(WITH_JIT) 1590 GET_JIT_PROF_TABLE(r0) 1591 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1592 b common_testUpdateProfile 1593#else 1594 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1595 GET_INST_OPCODE(ip) @ extract opcode from rINST 1596 GOTO_OPCODE(ip) @ jump to next instruction 1597#endif 1598 1599 1600 1601/* ------------------------------ */ 1602 .balign 64 1603.L_OP_IF_LT: /* 0x34 */ 1604/* File: armv5te/OP_IF_LT.S */ 1605/* File: armv5te/bincmp.S */ 1606 /* 1607 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1608 * fragment that specifies the *reverse* comparison to perform, e.g. 1609 * for "if-le" you would use "gt". 1610 * 1611 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1612 */ 1613 /* if-cmp vA, vB, +CCCC */ 1614 mov r0, rINST, lsr #8 @ r0<- A+ 1615 mov r1, rINST, lsr #12 @ r1<- B 1616 and r0, r0, #15 1617 GET_VREG(r3, r1) @ r3<- vB 1618 GET_VREG(r2, r0) @ r2<- vA 1619 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1620 cmp r2, r3 @ compare (vA, vB) 1621 bge 1f @ branch to 1 if comparison failed 1622 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1623 movs r9, r9, asl #1 @ convert to bytes, check sign 1624 bmi common_backwardBranch @ yes, do periodic checks 16251: 1626#if defined(WITH_JIT) 1627 GET_JIT_PROF_TABLE(r0) 1628 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1629 b common_testUpdateProfile 1630#else 1631 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1632 GET_INST_OPCODE(ip) @ extract opcode from rINST 1633 GOTO_OPCODE(ip) @ jump to next instruction 1634#endif 1635 1636 1637 1638/* ------------------------------ */ 1639 .balign 64 1640.L_OP_IF_GE: /* 0x35 */ 1641/* File: armv5te/OP_IF_GE.S */ 1642/* File: armv5te/bincmp.S */ 1643 /* 1644 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1645 * fragment that specifies the *reverse* comparison to perform, e.g. 1646 * for "if-le" you would use "gt". 1647 * 1648 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1649 */ 1650 /* if-cmp vA, vB, +CCCC */ 1651 mov r0, rINST, lsr #8 @ r0<- A+ 1652 mov r1, rINST, lsr #12 @ r1<- B 1653 and r0, r0, #15 1654 GET_VREG(r3, r1) @ r3<- vB 1655 GET_VREG(r2, r0) @ r2<- vA 1656 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1657 cmp r2, r3 @ compare (vA, vB) 1658 blt 1f @ branch to 1 if comparison failed 1659 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1660 movs r9, r9, asl #1 @ convert to bytes, check sign 1661 bmi common_backwardBranch @ yes, do periodic checks 16621: 1663#if defined(WITH_JIT) 1664 GET_JIT_PROF_TABLE(r0) 1665 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1666 b common_testUpdateProfile 1667#else 1668 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1669 GET_INST_OPCODE(ip) @ extract opcode from rINST 1670 GOTO_OPCODE(ip) @ jump to next instruction 1671#endif 1672 1673 1674 1675/* ------------------------------ */ 1676 .balign 64 1677.L_OP_IF_GT: /* 0x36 */ 1678/* File: armv5te/OP_IF_GT.S */ 1679/* File: armv5te/bincmp.S */ 1680 /* 1681 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1682 * fragment that specifies the *reverse* comparison to perform, e.g. 1683 * for "if-le" you would use "gt". 1684 * 1685 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1686 */ 1687 /* if-cmp vA, vB, +CCCC */ 1688 mov r0, rINST, lsr #8 @ r0<- A+ 1689 mov r1, rINST, lsr #12 @ r1<- B 1690 and r0, r0, #15 1691 GET_VREG(r3, r1) @ r3<- vB 1692 GET_VREG(r2, r0) @ r2<- vA 1693 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1694 cmp r2, r3 @ compare (vA, vB) 1695 ble 1f @ branch to 1 if comparison failed 1696 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1697 movs r9, r9, asl #1 @ convert to bytes, check sign 1698 bmi common_backwardBranch @ yes, do periodic checks 16991: 1700#if defined(WITH_JIT) 1701 GET_JIT_PROF_TABLE(r0) 1702 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1703 b common_testUpdateProfile 1704#else 1705 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1706 GET_INST_OPCODE(ip) @ extract opcode from rINST 1707 GOTO_OPCODE(ip) @ jump to next instruction 1708#endif 1709 1710 1711 1712/* ------------------------------ */ 1713 .balign 64 1714.L_OP_IF_LE: /* 0x37 */ 1715/* File: armv5te/OP_IF_LE.S */ 1716/* File: armv5te/bincmp.S */ 1717 /* 1718 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1719 * fragment that specifies the *reverse* comparison to perform, e.g. 1720 * for "if-le" you would use "gt". 1721 * 1722 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1723 */ 1724 /* if-cmp vA, vB, +CCCC */ 1725 mov r0, rINST, lsr #8 @ r0<- A+ 1726 mov r1, rINST, lsr #12 @ r1<- B 1727 and r0, r0, #15 1728 GET_VREG(r3, r1) @ r3<- vB 1729 GET_VREG(r2, r0) @ r2<- vA 1730 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1731 cmp r2, r3 @ compare (vA, vB) 1732 bgt 1f @ branch to 1 if comparison failed 1733 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1734 movs r9, r9, asl #1 @ convert to bytes, check sign 1735 bmi common_backwardBranch @ yes, do periodic checks 17361: 1737#if defined(WITH_JIT) 1738 GET_JIT_PROF_TABLE(r0) 1739 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1740 b common_testUpdateProfile 1741#else 1742 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1743 GET_INST_OPCODE(ip) @ extract opcode from rINST 1744 GOTO_OPCODE(ip) @ jump to next instruction 1745#endif 1746 1747 1748 1749/* ------------------------------ */ 1750 .balign 64 1751.L_OP_IF_EQZ: /* 0x38 */ 1752/* File: armv5te/OP_IF_EQZ.S */ 1753/* File: armv5te/zcmp.S */ 1754 /* 1755 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1756 * fragment that specifies the *reverse* comparison to perform, e.g. 1757 * for "if-le" you would use "gt". 1758 * 1759 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1760 */ 1761 /* if-cmp vAA, +BBBB */ 1762 mov r0, rINST, lsr #8 @ r0<- AA 1763 GET_VREG(r2, r0) @ r2<- vAA 1764 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1765 cmp r2, #0 @ compare (vA, 0) 1766 bne 1f @ branch to 1 if comparison failed 1767 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1768 movs r9, r9, asl #1 @ convert to bytes, check sign 1769 bmi common_backwardBranch @ backward branch, do periodic checks 17701: 1771#if defined(WITH_JIT) 1772 GET_JIT_PROF_TABLE(r0) 1773 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1774 cmp r0,#0 1775 bne common_updateProfile 1776 GET_INST_OPCODE(ip) @ extract opcode from rINST 1777 GOTO_OPCODE(ip) @ jump to next instruction 1778#else 1779 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1780 GET_INST_OPCODE(ip) @ extract opcode from rINST 1781 GOTO_OPCODE(ip) @ jump to next instruction 1782#endif 1783 1784 1785 1786/* ------------------------------ */ 1787 .balign 64 1788.L_OP_IF_NEZ: /* 0x39 */ 1789/* File: armv5te/OP_IF_NEZ.S */ 1790/* File: armv5te/zcmp.S */ 1791 /* 1792 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1793 * fragment that specifies the *reverse* comparison to perform, e.g. 1794 * for "if-le" you would use "gt". 1795 * 1796 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1797 */ 1798 /* if-cmp vAA, +BBBB */ 1799 mov r0, rINST, lsr #8 @ r0<- AA 1800 GET_VREG(r2, r0) @ r2<- vAA 1801 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1802 cmp r2, #0 @ compare (vA, 0) 1803 beq 1f @ branch to 1 if comparison failed 1804 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1805 movs r9, r9, asl #1 @ convert to bytes, check sign 1806 bmi common_backwardBranch @ backward branch, do periodic checks 18071: 1808#if defined(WITH_JIT) 1809 GET_JIT_PROF_TABLE(r0) 1810 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1811 cmp r0,#0 1812 bne common_updateProfile 1813 GET_INST_OPCODE(ip) @ extract opcode from rINST 1814 GOTO_OPCODE(ip) @ jump to next instruction 1815#else 1816 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1817 GET_INST_OPCODE(ip) @ extract opcode from rINST 1818 GOTO_OPCODE(ip) @ jump to next instruction 1819#endif 1820 1821 1822 1823/* ------------------------------ */ 1824 .balign 64 1825.L_OP_IF_LTZ: /* 0x3a */ 1826/* File: armv5te/OP_IF_LTZ.S */ 1827/* File: armv5te/zcmp.S */ 1828 /* 1829 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1830 * fragment that specifies the *reverse* comparison to perform, e.g. 1831 * for "if-le" you would use "gt". 1832 * 1833 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1834 */ 1835 /* if-cmp vAA, +BBBB */ 1836 mov r0, rINST, lsr #8 @ r0<- AA 1837 GET_VREG(r2, r0) @ r2<- vAA 1838 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1839 cmp r2, #0 @ compare (vA, 0) 1840 bge 1f @ branch to 1 if comparison failed 1841 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1842 movs r9, r9, asl #1 @ convert to bytes, check sign 1843 bmi common_backwardBranch @ backward branch, do periodic checks 18441: 1845#if defined(WITH_JIT) 1846 GET_JIT_PROF_TABLE(r0) 1847 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1848 cmp r0,#0 1849 bne common_updateProfile 1850 GET_INST_OPCODE(ip) @ extract opcode from rINST 1851 GOTO_OPCODE(ip) @ jump to next instruction 1852#else 1853 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1854 GET_INST_OPCODE(ip) @ extract opcode from rINST 1855 GOTO_OPCODE(ip) @ jump to next instruction 1856#endif 1857 1858 1859 1860/* ------------------------------ */ 1861 .balign 64 1862.L_OP_IF_GEZ: /* 0x3b */ 1863/* File: armv5te/OP_IF_GEZ.S */ 1864/* File: armv5te/zcmp.S */ 1865 /* 1866 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1867 * fragment that specifies the *reverse* comparison to perform, e.g. 1868 * for "if-le" you would use "gt". 1869 * 1870 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1871 */ 1872 /* if-cmp vAA, +BBBB */ 1873 mov r0, rINST, lsr #8 @ r0<- AA 1874 GET_VREG(r2, r0) @ r2<- vAA 1875 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1876 cmp r2, #0 @ compare (vA, 0) 1877 blt 1f @ branch to 1 if comparison failed 1878 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1879 movs r9, r9, asl #1 @ convert to bytes, check sign 1880 bmi common_backwardBranch @ backward branch, do periodic checks 18811: 1882#if defined(WITH_JIT) 1883 GET_JIT_PROF_TABLE(r0) 1884 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1885 cmp r0,#0 1886 bne common_updateProfile 1887 GET_INST_OPCODE(ip) @ extract opcode from rINST 1888 GOTO_OPCODE(ip) @ jump to next instruction 1889#else 1890 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1891 GET_INST_OPCODE(ip) @ extract opcode from rINST 1892 GOTO_OPCODE(ip) @ jump to next instruction 1893#endif 1894 1895 1896 1897/* ------------------------------ */ 1898 .balign 64 1899.L_OP_IF_GTZ: /* 0x3c */ 1900/* File: armv5te/OP_IF_GTZ.S */ 1901/* File: armv5te/zcmp.S */ 1902 /* 1903 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1904 * fragment that specifies the *reverse* comparison to perform, e.g. 1905 * for "if-le" you would use "gt". 1906 * 1907 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1908 */ 1909 /* if-cmp vAA, +BBBB */ 1910 mov r0, rINST, lsr #8 @ r0<- AA 1911 GET_VREG(r2, r0) @ r2<- vAA 1912 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1913 cmp r2, #0 @ compare (vA, 0) 1914 ble 1f @ branch to 1 if comparison failed 1915 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1916 movs r9, r9, asl #1 @ convert to bytes, check sign 1917 bmi common_backwardBranch @ backward branch, do periodic checks 19181: 1919#if defined(WITH_JIT) 1920 GET_JIT_PROF_TABLE(r0) 1921 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1922 cmp r0,#0 1923 bne common_updateProfile 1924 GET_INST_OPCODE(ip) @ extract opcode from rINST 1925 GOTO_OPCODE(ip) @ jump to next instruction 1926#else 1927 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1928 GET_INST_OPCODE(ip) @ extract opcode from rINST 1929 GOTO_OPCODE(ip) @ jump to next instruction 1930#endif 1931 1932 1933 1934/* ------------------------------ */ 1935 .balign 64 1936.L_OP_IF_LEZ: /* 0x3d */ 1937/* File: armv5te/OP_IF_LEZ.S */ 1938/* File: armv5te/zcmp.S */ 1939 /* 1940 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1941 * fragment that specifies the *reverse* comparison to perform, e.g. 1942 * for "if-le" you would use "gt". 1943 * 1944 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1945 */ 1946 /* if-cmp vAA, +BBBB */ 1947 mov r0, rINST, lsr #8 @ r0<- AA 1948 GET_VREG(r2, r0) @ r2<- vAA 1949 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1950 cmp r2, #0 @ compare (vA, 0) 1951 bgt 1f @ branch to 1 if comparison failed 1952 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1953 movs r9, r9, asl #1 @ convert to bytes, check sign 1954 bmi common_backwardBranch @ backward branch, do periodic checks 19551: 1956#if defined(WITH_JIT) 1957 GET_JIT_PROF_TABLE(r0) 1958 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1959 cmp r0,#0 1960 bne common_updateProfile 1961 GET_INST_OPCODE(ip) @ extract opcode from rINST 1962 GOTO_OPCODE(ip) @ jump to next instruction 1963#else 1964 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1965 GET_INST_OPCODE(ip) @ extract opcode from rINST 1966 GOTO_OPCODE(ip) @ jump to next instruction 1967#endif 1968 1969 1970 1971/* ------------------------------ */ 1972 .balign 64 1973.L_OP_UNUSED_3E: /* 0x3e */ 1974/* File: armv5te/OP_UNUSED_3E.S */ 1975/* File: armv5te/unused.S */ 1976 bl common_abort 1977 1978 1979 1980/* ------------------------------ */ 1981 .balign 64 1982.L_OP_UNUSED_3F: /* 0x3f */ 1983/* File: armv5te/OP_UNUSED_3F.S */ 1984/* File: armv5te/unused.S */ 1985 bl common_abort 1986 1987 1988 1989/* ------------------------------ */ 1990 .balign 64 1991.L_OP_UNUSED_40: /* 0x40 */ 1992/* File: armv5te/OP_UNUSED_40.S */ 1993/* File: armv5te/unused.S */ 1994 bl common_abort 1995 1996 1997 1998/* ------------------------------ */ 1999 .balign 64 2000.L_OP_UNUSED_41: /* 0x41 */ 2001/* File: armv5te/OP_UNUSED_41.S */ 2002/* File: armv5te/unused.S */ 2003 bl common_abort 2004 2005 2006 2007/* ------------------------------ */ 2008 .balign 64 2009.L_OP_UNUSED_42: /* 0x42 */ 2010/* File: armv5te/OP_UNUSED_42.S */ 2011/* File: armv5te/unused.S */ 2012 bl common_abort 2013 2014 2015 2016/* ------------------------------ */ 2017 .balign 64 2018.L_OP_UNUSED_43: /* 0x43 */ 2019/* File: armv5te/OP_UNUSED_43.S */ 2020/* File: armv5te/unused.S */ 2021 bl common_abort 2022 2023 2024 2025/* ------------------------------ */ 2026 .balign 64 2027.L_OP_AGET: /* 0x44 */ 2028/* File: armv5te/OP_AGET.S */ 2029 /* 2030 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2031 * 2032 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2033 * instructions. We use a pair of FETCH_Bs instead. 2034 * 2035 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2036 */ 2037 /* op vAA, vBB, vCC */ 2038 FETCH_B(r2, 1, 0) @ r2<- BB 2039 mov r9, rINST, lsr #8 @ r9<- AA 2040 FETCH_B(r3, 1, 1) @ r3<- CC 2041 GET_VREG(r0, r2) @ r0<- vBB (array object) 2042 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2043 cmp r0, #0 @ null array object? 2044 beq common_errNullObject @ yes, bail 2045 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2046 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2047 cmp r1, r3 @ compare unsigned index, length 2048 bcs common_errArrayIndex @ index >= length, bail 2049 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2050 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2051 GET_INST_OPCODE(ip) @ extract opcode from rINST 2052 SET_VREG(r2, r9) @ vAA<- r2 2053 GOTO_OPCODE(ip) @ jump to next instruction 2054 2055 2056/* ------------------------------ */ 2057 .balign 64 2058.L_OP_AGET_WIDE: /* 0x45 */ 2059/* File: armv4t/OP_AGET_WIDE.S */ 2060 /* 2061 * Array get, 64 bits. vAA <- vBB[vCC]. 2062 * 2063 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2064 */ 2065 /* aget-wide vAA, vBB, vCC */ 2066 FETCH(r0, 1) @ r0<- CCBB 2067 mov r9, rINST, lsr #8 @ r9<- AA 2068 and r2, r0, #255 @ r2<- BB 2069 mov r3, r0, lsr #8 @ r3<- CC 2070 GET_VREG(r0, r2) @ r0<- vBB (array object) 2071 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2072 cmp r0, #0 @ null array object? 2073 beq common_errNullObject @ yes, bail 2074 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2075 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2076 cmp r1, r3 @ compare unsigned index, length 2077 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2078 b common_errArrayIndex @ index >= length, bail 2079 @ May want to swap the order of these two branches depending on how the 2080 @ branch prediction (if any) handles conditional forward branches vs. 2081 @ unconditional forward branches. 2082 2083/* ------------------------------ */ 2084 .balign 64 2085.L_OP_AGET_OBJECT: /* 0x46 */ 2086/* File: armv5te/OP_AGET_OBJECT.S */ 2087/* File: armv5te/OP_AGET.S */ 2088 /* 2089 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2090 * 2091 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2092 * instructions. We use a pair of FETCH_Bs instead. 2093 * 2094 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2095 */ 2096 /* op vAA, vBB, vCC */ 2097 FETCH_B(r2, 1, 0) @ r2<- BB 2098 mov r9, rINST, lsr #8 @ r9<- AA 2099 FETCH_B(r3, 1, 1) @ r3<- CC 2100 GET_VREG(r0, r2) @ r0<- vBB (array object) 2101 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2102 cmp r0, #0 @ null array object? 2103 beq common_errNullObject @ yes, bail 2104 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2105 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2106 cmp r1, r3 @ compare unsigned index, length 2107 bcs common_errArrayIndex @ index >= length, bail 2108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2109 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2110 GET_INST_OPCODE(ip) @ extract opcode from rINST 2111 SET_VREG(r2, r9) @ vAA<- r2 2112 GOTO_OPCODE(ip) @ jump to next instruction 2113 2114 2115 2116/* ------------------------------ */ 2117 .balign 64 2118.L_OP_AGET_BOOLEAN: /* 0x47 */ 2119/* File: armv5te/OP_AGET_BOOLEAN.S */ 2120/* File: armv5te/OP_AGET.S */ 2121 /* 2122 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2123 * 2124 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2125 * instructions. We use a pair of FETCH_Bs instead. 2126 * 2127 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2128 */ 2129 /* op vAA, vBB, vCC */ 2130 FETCH_B(r2, 1, 0) @ r2<- BB 2131 mov r9, rINST, lsr #8 @ r9<- AA 2132 FETCH_B(r3, 1, 1) @ r3<- CC 2133 GET_VREG(r0, r2) @ r0<- vBB (array object) 2134 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2135 cmp r0, #0 @ null array object? 2136 beq common_errNullObject @ yes, bail 2137 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2138 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2139 cmp r1, r3 @ compare unsigned index, length 2140 bcs common_errArrayIndex @ index >= length, bail 2141 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2142 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2143 GET_INST_OPCODE(ip) @ extract opcode from rINST 2144 SET_VREG(r2, r9) @ vAA<- r2 2145 GOTO_OPCODE(ip) @ jump to next instruction 2146 2147 2148 2149/* ------------------------------ */ 2150 .balign 64 2151.L_OP_AGET_BYTE: /* 0x48 */ 2152/* File: armv5te/OP_AGET_BYTE.S */ 2153/* File: armv5te/OP_AGET.S */ 2154 /* 2155 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2156 * 2157 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2158 * instructions. We use a pair of FETCH_Bs instead. 2159 * 2160 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2161 */ 2162 /* op vAA, vBB, vCC */ 2163 FETCH_B(r2, 1, 0) @ r2<- BB 2164 mov r9, rINST, lsr #8 @ r9<- AA 2165 FETCH_B(r3, 1, 1) @ r3<- CC 2166 GET_VREG(r0, r2) @ r0<- vBB (array object) 2167 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2168 cmp r0, #0 @ null array object? 2169 beq common_errNullObject @ yes, bail 2170 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2171 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2172 cmp r1, r3 @ compare unsigned index, length 2173 bcs common_errArrayIndex @ index >= length, bail 2174 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2175 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2176 GET_INST_OPCODE(ip) @ extract opcode from rINST 2177 SET_VREG(r2, r9) @ vAA<- r2 2178 GOTO_OPCODE(ip) @ jump to next instruction 2179 2180 2181 2182/* ------------------------------ */ 2183 .balign 64 2184.L_OP_AGET_CHAR: /* 0x49 */ 2185/* File: armv5te/OP_AGET_CHAR.S */ 2186/* File: armv5te/OP_AGET.S */ 2187 /* 2188 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2189 * 2190 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2191 * instructions. We use a pair of FETCH_Bs instead. 2192 * 2193 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2194 */ 2195 /* op vAA, vBB, vCC */ 2196 FETCH_B(r2, 1, 0) @ r2<- BB 2197 mov r9, rINST, lsr #8 @ r9<- AA 2198 FETCH_B(r3, 1, 1) @ r3<- CC 2199 GET_VREG(r0, r2) @ r0<- vBB (array object) 2200 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2201 cmp r0, #0 @ null array object? 2202 beq common_errNullObject @ yes, bail 2203 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2204 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2205 cmp r1, r3 @ compare unsigned index, length 2206 bcs common_errArrayIndex @ index >= length, bail 2207 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2208 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2209 GET_INST_OPCODE(ip) @ extract opcode from rINST 2210 SET_VREG(r2, r9) @ vAA<- r2 2211 GOTO_OPCODE(ip) @ jump to next instruction 2212 2213 2214 2215/* ------------------------------ */ 2216 .balign 64 2217.L_OP_AGET_SHORT: /* 0x4a */ 2218/* File: armv5te/OP_AGET_SHORT.S */ 2219/* File: armv5te/OP_AGET.S */ 2220 /* 2221 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2222 * 2223 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2224 * instructions. We use a pair of FETCH_Bs instead. 2225 * 2226 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2227 */ 2228 /* op vAA, vBB, vCC */ 2229 FETCH_B(r2, 1, 0) @ r2<- BB 2230 mov r9, rINST, lsr #8 @ r9<- AA 2231 FETCH_B(r3, 1, 1) @ r3<- CC 2232 GET_VREG(r0, r2) @ r0<- vBB (array object) 2233 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2234 cmp r0, #0 @ null array object? 2235 beq common_errNullObject @ yes, bail 2236 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2237 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2238 cmp r1, r3 @ compare unsigned index, length 2239 bcs common_errArrayIndex @ index >= length, bail 2240 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2241 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2242 GET_INST_OPCODE(ip) @ extract opcode from rINST 2243 SET_VREG(r2, r9) @ vAA<- r2 2244 GOTO_OPCODE(ip) @ jump to next instruction 2245 2246 2247 2248/* ------------------------------ */ 2249 .balign 64 2250.L_OP_APUT: /* 0x4b */ 2251/* File: armv5te/OP_APUT.S */ 2252 /* 2253 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2254 * 2255 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2256 * instructions. We use a pair of FETCH_Bs instead. 2257 * 2258 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2259 */ 2260 /* op vAA, vBB, vCC */ 2261 FETCH_B(r2, 1, 0) @ r2<- BB 2262 mov r9, rINST, lsr #8 @ r9<- AA 2263 FETCH_B(r3, 1, 1) @ r3<- CC 2264 GET_VREG(r0, r2) @ r0<- vBB (array object) 2265 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2266 cmp r0, #0 @ null array object? 2267 beq common_errNullObject @ yes, bail 2268 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2269 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2270 cmp r1, r3 @ compare unsigned index, length 2271 bcs common_errArrayIndex @ index >= length, bail 2272 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2273 GET_VREG(r2, r9) @ r2<- vAA 2274 GET_INST_OPCODE(ip) @ extract opcode from rINST 2275 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2276 GOTO_OPCODE(ip) @ jump to next instruction 2277 2278 2279/* ------------------------------ */ 2280 .balign 64 2281.L_OP_APUT_WIDE: /* 0x4c */ 2282/* File: armv4t/OP_APUT_WIDE.S */ 2283 /* 2284 * Array put, 64 bits. vBB[vCC] <- vAA. 2285 */ 2286 /* aput-wide vAA, vBB, vCC */ 2287 FETCH(r0, 1) @ r0<- CCBB 2288 mov r9, rINST, lsr #8 @ r9<- AA 2289 and r2, r0, #255 @ r2<- BB 2290 mov r3, r0, lsr #8 @ r3<- CC 2291 GET_VREG(r0, r2) @ r0<- vBB (array object) 2292 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2293 cmp r0, #0 @ null array object? 2294 beq common_errNullObject @ yes, bail 2295 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2296 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2297 cmp r1, r3 @ compare unsigned index, length 2298 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2299 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2300 b common_errArrayIndex @ index >= length, bail 2301 @ May want to swap the order of these two branches depending on how the 2302 @ branch prediction (if any) handles conditional forward branches vs. 2303 @ unconditional forward branches. 2304 2305/* ------------------------------ */ 2306 .balign 64 2307.L_OP_APUT_OBJECT: /* 0x4d */ 2308/* File: armv5te/OP_APUT_OBJECT.S */ 2309 /* 2310 * Store an object into an array. vBB[vCC] <- vAA. 2311 * 2312 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2313 * instructions. We use a pair of FETCH_Bs instead. 2314 */ 2315 /* op vAA, vBB, vCC */ 2316 FETCH(r0, 1) @ r0<- CCBB 2317 mov r9, rINST, lsr #8 @ r9<- AA 2318 and r2, r0, #255 @ r2<- BB 2319 mov r3, r0, lsr #8 @ r3<- CC 2320 GET_VREG(r1, r2) @ r1<- vBB (array object) 2321 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2322 cmp r1, #0 @ null array object? 2323 GET_VREG(r9, r9) @ r9<- vAA 2324 beq common_errNullObject @ yes, bail 2325 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2326 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2327 cmp r0, r3 @ compare unsigned index, length 2328 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2329 b common_errArrayIndex @ index >= length, bail 2330 2331 2332/* ------------------------------ */ 2333 .balign 64 2334.L_OP_APUT_BOOLEAN: /* 0x4e */ 2335/* File: armv5te/OP_APUT_BOOLEAN.S */ 2336/* File: armv5te/OP_APUT.S */ 2337 /* 2338 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2339 * 2340 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2341 * instructions. We use a pair of FETCH_Bs instead. 2342 * 2343 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2344 */ 2345 /* op vAA, vBB, vCC */ 2346 FETCH_B(r2, 1, 0) @ r2<- BB 2347 mov r9, rINST, lsr #8 @ r9<- AA 2348 FETCH_B(r3, 1, 1) @ r3<- CC 2349 GET_VREG(r0, r2) @ r0<- vBB (array object) 2350 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2351 cmp r0, #0 @ null array object? 2352 beq common_errNullObject @ yes, bail 2353 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2354 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2355 cmp r1, r3 @ compare unsigned index, length 2356 bcs common_errArrayIndex @ index >= length, bail 2357 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2358 GET_VREG(r2, r9) @ r2<- vAA 2359 GET_INST_OPCODE(ip) @ extract opcode from rINST 2360 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2361 GOTO_OPCODE(ip) @ jump to next instruction 2362 2363 2364 2365/* ------------------------------ */ 2366 .balign 64 2367.L_OP_APUT_BYTE: /* 0x4f */ 2368/* File: armv5te/OP_APUT_BYTE.S */ 2369/* File: armv5te/OP_APUT.S */ 2370 /* 2371 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2372 * 2373 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2374 * instructions. We use a pair of FETCH_Bs instead. 2375 * 2376 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2377 */ 2378 /* op vAA, vBB, vCC */ 2379 FETCH_B(r2, 1, 0) @ r2<- BB 2380 mov r9, rINST, lsr #8 @ r9<- AA 2381 FETCH_B(r3, 1, 1) @ r3<- CC 2382 GET_VREG(r0, r2) @ r0<- vBB (array object) 2383 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2384 cmp r0, #0 @ null array object? 2385 beq common_errNullObject @ yes, bail 2386 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2387 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2388 cmp r1, r3 @ compare unsigned index, length 2389 bcs common_errArrayIndex @ index >= length, bail 2390 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2391 GET_VREG(r2, r9) @ r2<- vAA 2392 GET_INST_OPCODE(ip) @ extract opcode from rINST 2393 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2394 GOTO_OPCODE(ip) @ jump to next instruction 2395 2396 2397 2398/* ------------------------------ */ 2399 .balign 64 2400.L_OP_APUT_CHAR: /* 0x50 */ 2401/* File: armv5te/OP_APUT_CHAR.S */ 2402/* File: armv5te/OP_APUT.S */ 2403 /* 2404 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2405 * 2406 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2407 * instructions. We use a pair of FETCH_Bs instead. 2408 * 2409 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2410 */ 2411 /* op vAA, vBB, vCC */ 2412 FETCH_B(r2, 1, 0) @ r2<- BB 2413 mov r9, rINST, lsr #8 @ r9<- AA 2414 FETCH_B(r3, 1, 1) @ r3<- CC 2415 GET_VREG(r0, r2) @ r0<- vBB (array object) 2416 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2417 cmp r0, #0 @ null array object? 2418 beq common_errNullObject @ yes, bail 2419 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2420 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2421 cmp r1, r3 @ compare unsigned index, length 2422 bcs common_errArrayIndex @ index >= length, bail 2423 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2424 GET_VREG(r2, r9) @ r2<- vAA 2425 GET_INST_OPCODE(ip) @ extract opcode from rINST 2426 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2427 GOTO_OPCODE(ip) @ jump to next instruction 2428 2429 2430 2431/* ------------------------------ */ 2432 .balign 64 2433.L_OP_APUT_SHORT: /* 0x51 */ 2434/* File: armv5te/OP_APUT_SHORT.S */ 2435/* File: armv5te/OP_APUT.S */ 2436 /* 2437 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2438 * 2439 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2440 * instructions. We use a pair of FETCH_Bs instead. 2441 * 2442 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2443 */ 2444 /* op vAA, vBB, vCC */ 2445 FETCH_B(r2, 1, 0) @ r2<- BB 2446 mov r9, rINST, lsr #8 @ r9<- AA 2447 FETCH_B(r3, 1, 1) @ r3<- CC 2448 GET_VREG(r0, r2) @ r0<- vBB (array object) 2449 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2450 cmp r0, #0 @ null array object? 2451 beq common_errNullObject @ yes, bail 2452 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2453 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2454 cmp r1, r3 @ compare unsigned index, length 2455 bcs common_errArrayIndex @ index >= length, bail 2456 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2457 GET_VREG(r2, r9) @ r2<- vAA 2458 GET_INST_OPCODE(ip) @ extract opcode from rINST 2459 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2460 GOTO_OPCODE(ip) @ jump to next instruction 2461 2462 2463 2464/* ------------------------------ */ 2465 .balign 64 2466.L_OP_IGET: /* 0x52 */ 2467/* File: armv5te/OP_IGET.S */ 2468 /* 2469 * General 32-bit instance field get. 2470 * 2471 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2472 */ 2473 /* op vA, vB, field@CCCC */ 2474 mov r0, rINST, lsr #12 @ r0<- B 2475 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2476 FETCH(r1, 1) @ r1<- field ref CCCC 2477 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2478 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2479 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2480 cmp r0, #0 @ is resolved entry null? 2481 bne .LOP_IGET_finish @ no, already resolved 24828: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2483 EXPORT_PC() @ resolve() could throw 2484 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2485 bl dvmResolveInstField @ r0<- resolved InstField ptr 2486 cmp r0, #0 2487 bne .LOP_IGET_finish 2488 b common_exceptionThrown 2489 2490/* ------------------------------ */ 2491 .balign 64 2492.L_OP_IGET_WIDE: /* 0x53 */ 2493/* File: armv4t/OP_IGET_WIDE.S */ 2494 /* 2495 * Wide 32-bit instance field get. 2496 */ 2497 /* iget-wide vA, vB, field@CCCC */ 2498 mov r0, rINST, lsr #12 @ r0<- B 2499 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2500 FETCH(r1, 1) @ r1<- field ref CCCC 2501 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2502 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2503 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2504 cmp r0, #0 @ is resolved entry null? 2505 bne .LOP_IGET_WIDE_finish @ no, already resolved 25068: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2507 EXPORT_PC() @ resolve() could throw 2508 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2509 bl dvmResolveInstField @ r0<- resolved InstField ptr 2510 cmp r0, #0 2511 bne .LOP_IGET_WIDE_finish 2512 b common_exceptionThrown 2513 2514/* ------------------------------ */ 2515 .balign 64 2516.L_OP_IGET_OBJECT: /* 0x54 */ 2517/* File: armv5te/OP_IGET_OBJECT.S */ 2518/* File: armv5te/OP_IGET.S */ 2519 /* 2520 * General 32-bit instance field get. 2521 * 2522 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2523 */ 2524 /* op vA, vB, field@CCCC */ 2525 mov r0, rINST, lsr #12 @ r0<- B 2526 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2527 FETCH(r1, 1) @ r1<- field ref CCCC 2528 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2529 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2530 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2531 cmp r0, #0 @ is resolved entry null? 2532 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25338: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2534 EXPORT_PC() @ resolve() could throw 2535 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2536 bl dvmResolveInstField @ r0<- resolved InstField ptr 2537 cmp r0, #0 2538 bne .LOP_IGET_OBJECT_finish 2539 b common_exceptionThrown 2540 2541 2542/* ------------------------------ */ 2543 .balign 64 2544.L_OP_IGET_BOOLEAN: /* 0x55 */ 2545/* File: armv5te/OP_IGET_BOOLEAN.S */ 2546@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2547/* File: armv5te/OP_IGET.S */ 2548 /* 2549 * General 32-bit instance field get. 2550 * 2551 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2552 */ 2553 /* op vA, vB, field@CCCC */ 2554 mov r0, rINST, lsr #12 @ r0<- B 2555 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2556 FETCH(r1, 1) @ r1<- field ref CCCC 2557 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2558 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2559 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2560 cmp r0, #0 @ is resolved entry null? 2561 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25628: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2563 EXPORT_PC() @ resolve() could throw 2564 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2565 bl dvmResolveInstField @ r0<- resolved InstField ptr 2566 cmp r0, #0 2567 bne .LOP_IGET_BOOLEAN_finish 2568 b common_exceptionThrown 2569 2570 2571/* ------------------------------ */ 2572 .balign 64 2573.L_OP_IGET_BYTE: /* 0x56 */ 2574/* File: armv5te/OP_IGET_BYTE.S */ 2575@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2576/* File: armv5te/OP_IGET.S */ 2577 /* 2578 * General 32-bit instance field get. 2579 * 2580 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2581 */ 2582 /* op vA, vB, field@CCCC */ 2583 mov r0, rINST, lsr #12 @ r0<- B 2584 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2585 FETCH(r1, 1) @ r1<- field ref CCCC 2586 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2587 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2588 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2589 cmp r0, #0 @ is resolved entry null? 2590 bne .LOP_IGET_BYTE_finish @ no, already resolved 25918: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2592 EXPORT_PC() @ resolve() could throw 2593 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2594 bl dvmResolveInstField @ r0<- resolved InstField ptr 2595 cmp r0, #0 2596 bne .LOP_IGET_BYTE_finish 2597 b common_exceptionThrown 2598 2599 2600/* ------------------------------ */ 2601 .balign 64 2602.L_OP_IGET_CHAR: /* 0x57 */ 2603/* File: armv5te/OP_IGET_CHAR.S */ 2604@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2605/* File: armv5te/OP_IGET.S */ 2606 /* 2607 * General 32-bit instance field get. 2608 * 2609 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2610 */ 2611 /* op vA, vB, field@CCCC */ 2612 mov r0, rINST, lsr #12 @ r0<- B 2613 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2614 FETCH(r1, 1) @ r1<- field ref CCCC 2615 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2616 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2617 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2618 cmp r0, #0 @ is resolved entry null? 2619 bne .LOP_IGET_CHAR_finish @ no, already resolved 26208: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2621 EXPORT_PC() @ resolve() could throw 2622 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2623 bl dvmResolveInstField @ r0<- resolved InstField ptr 2624 cmp r0, #0 2625 bne .LOP_IGET_CHAR_finish 2626 b common_exceptionThrown 2627 2628 2629/* ------------------------------ */ 2630 .balign 64 2631.L_OP_IGET_SHORT: /* 0x58 */ 2632/* File: armv5te/OP_IGET_SHORT.S */ 2633@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2634/* File: armv5te/OP_IGET.S */ 2635 /* 2636 * General 32-bit instance field get. 2637 * 2638 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2639 */ 2640 /* op vA, vB, field@CCCC */ 2641 mov r0, rINST, lsr #12 @ r0<- B 2642 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2643 FETCH(r1, 1) @ r1<- field ref CCCC 2644 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2645 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2646 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2647 cmp r0, #0 @ is resolved entry null? 2648 bne .LOP_IGET_SHORT_finish @ no, already resolved 26498: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2650 EXPORT_PC() @ resolve() could throw 2651 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2652 bl dvmResolveInstField @ r0<- resolved InstField ptr 2653 cmp r0, #0 2654 bne .LOP_IGET_SHORT_finish 2655 b common_exceptionThrown 2656 2657 2658/* ------------------------------ */ 2659 .balign 64 2660.L_OP_IPUT: /* 0x59 */ 2661/* File: armv5te/OP_IPUT.S */ 2662 /* 2663 * General 32-bit instance field put. 2664 * 2665 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2666 */ 2667 /* op vA, vB, field@CCCC */ 2668 mov r0, rINST, lsr #12 @ r0<- B 2669 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2670 FETCH(r1, 1) @ r1<- field ref CCCC 2671 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2672 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2673 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2674 cmp r0, #0 @ is resolved entry null? 2675 bne .LOP_IPUT_finish @ no, already resolved 26768: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2677 EXPORT_PC() @ resolve() could throw 2678 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2679 bl dvmResolveInstField @ r0<- resolved InstField ptr 2680 cmp r0, #0 @ success? 2681 bne .LOP_IPUT_finish @ yes, finish up 2682 b common_exceptionThrown 2683 2684/* ------------------------------ */ 2685 .balign 64 2686.L_OP_IPUT_WIDE: /* 0x5a */ 2687/* File: armv4t/OP_IPUT_WIDE.S */ 2688 /* iput-wide vA, vB, field@CCCC */ 2689 mov r0, rINST, lsr #12 @ r0<- B 2690 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2691 FETCH(r1, 1) @ r1<- field ref CCCC 2692 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2693 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2694 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2695 cmp r0, #0 @ is resolved entry null? 2696 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2698 EXPORT_PC() @ resolve() could throw 2699 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2700 bl dvmResolveInstField @ r0<- resolved InstField ptr 2701 cmp r0, #0 @ success? 2702 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2703 b common_exceptionThrown 2704 2705/* ------------------------------ */ 2706 .balign 64 2707.L_OP_IPUT_OBJECT: /* 0x5b */ 2708/* File: armv5te/OP_IPUT_OBJECT.S */ 2709/* File: armv5te/OP_IPUT.S */ 2710 /* 2711 * General 32-bit instance field put. 2712 * 2713 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2714 */ 2715 /* op vA, vB, field@CCCC */ 2716 mov r0, rINST, lsr #12 @ r0<- B 2717 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2718 FETCH(r1, 1) @ r1<- field ref CCCC 2719 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2720 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2721 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2722 cmp r0, #0 @ is resolved entry null? 2723 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 27248: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2725 EXPORT_PC() @ resolve() could throw 2726 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2727 bl dvmResolveInstField @ r0<- resolved InstField ptr 2728 cmp r0, #0 @ success? 2729 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2730 b common_exceptionThrown 2731 2732 2733/* ------------------------------ */ 2734 .balign 64 2735.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2736/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2737@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2738/* File: armv5te/OP_IPUT.S */ 2739 /* 2740 * General 32-bit instance field put. 2741 * 2742 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2743 */ 2744 /* op vA, vB, field@CCCC */ 2745 mov r0, rINST, lsr #12 @ r0<- B 2746 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2747 FETCH(r1, 1) @ r1<- field ref CCCC 2748 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2749 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2750 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2751 cmp r0, #0 @ is resolved entry null? 2752 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27538: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2754 EXPORT_PC() @ resolve() could throw 2755 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2756 bl dvmResolveInstField @ r0<- resolved InstField ptr 2757 cmp r0, #0 @ success? 2758 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2759 b common_exceptionThrown 2760 2761 2762/* ------------------------------ */ 2763 .balign 64 2764.L_OP_IPUT_BYTE: /* 0x5d */ 2765/* File: armv5te/OP_IPUT_BYTE.S */ 2766@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2767/* File: armv5te/OP_IPUT.S */ 2768 /* 2769 * General 32-bit instance field put. 2770 * 2771 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2772 */ 2773 /* op vA, vB, field@CCCC */ 2774 mov r0, rINST, lsr #12 @ r0<- B 2775 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2776 FETCH(r1, 1) @ r1<- field ref CCCC 2777 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2778 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2779 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2780 cmp r0, #0 @ is resolved entry null? 2781 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27828: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2783 EXPORT_PC() @ resolve() could throw 2784 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2785 bl dvmResolveInstField @ r0<- resolved InstField ptr 2786 cmp r0, #0 @ success? 2787 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2788 b common_exceptionThrown 2789 2790 2791/* ------------------------------ */ 2792 .balign 64 2793.L_OP_IPUT_CHAR: /* 0x5e */ 2794/* File: armv5te/OP_IPUT_CHAR.S */ 2795@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2796/* File: armv5te/OP_IPUT.S */ 2797 /* 2798 * General 32-bit instance field put. 2799 * 2800 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2801 */ 2802 /* op vA, vB, field@CCCC */ 2803 mov r0, rINST, lsr #12 @ r0<- B 2804 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2805 FETCH(r1, 1) @ r1<- field ref CCCC 2806 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2807 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2808 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2809 cmp r0, #0 @ is resolved entry null? 2810 bne .LOP_IPUT_CHAR_finish @ no, already resolved 28118: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2812 EXPORT_PC() @ resolve() could throw 2813 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2814 bl dvmResolveInstField @ r0<- resolved InstField ptr 2815 cmp r0, #0 @ success? 2816 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2817 b common_exceptionThrown 2818 2819 2820/* ------------------------------ */ 2821 .balign 64 2822.L_OP_IPUT_SHORT: /* 0x5f */ 2823/* File: armv5te/OP_IPUT_SHORT.S */ 2824@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2825/* File: armv5te/OP_IPUT.S */ 2826 /* 2827 * General 32-bit instance field put. 2828 * 2829 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2830 */ 2831 /* op vA, vB, field@CCCC */ 2832 mov r0, rINST, lsr #12 @ r0<- B 2833 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2834 FETCH(r1, 1) @ r1<- field ref CCCC 2835 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2836 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2837 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2838 cmp r0, #0 @ is resolved entry null? 2839 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28408: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2841 EXPORT_PC() @ resolve() could throw 2842 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2843 bl dvmResolveInstField @ r0<- resolved InstField ptr 2844 cmp r0, #0 @ success? 2845 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2846 b common_exceptionThrown 2847 2848 2849/* ------------------------------ */ 2850 .balign 64 2851.L_OP_SGET: /* 0x60 */ 2852/* File: armv5te/OP_SGET.S */ 2853 /* 2854 * General 32-bit SGET handler. 2855 * 2856 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2857 */ 2858 /* op vAA, field@BBBB */ 2859 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2860 FETCH(r1, 1) @ r1<- field ref BBBB 2861 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2862 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2863 cmp r0, #0 @ is resolved entry null? 2864 beq .LOP_SGET_resolve @ yes, do resolve 2865.LOP_SGET_finish: @ field ptr in r0 2866 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2867 mov r2, rINST, lsr #8 @ r2<- AA 2868 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2869 SET_VREG(r1, r2) @ fp[AA]<- r1 2870 GET_INST_OPCODE(ip) @ extract opcode from rINST 2871 GOTO_OPCODE(ip) @ jump to next instruction 2872 2873/* ------------------------------ */ 2874 .balign 64 2875.L_OP_SGET_WIDE: /* 0x61 */ 2876/* File: armv4t/OP_SGET_WIDE.S */ 2877 /* 2878 * 64-bit SGET handler. 2879 */ 2880 /* sget-wide vAA, field@BBBB */ 2881 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2882 FETCH(r1, 1) @ r1<- field ref BBBB 2883 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2884 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2885 cmp r0, #0 @ is resolved entry null? 2886 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2887.LOP_SGET_WIDE_finish: 2888 mov r1, rINST, lsr #8 @ r1<- AA 2889 add r0, r0, #offStaticField_value 2890 ldmia r0, {r2-r3} @ r2/r3<- field value (aligned) 2891 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2892 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2893 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2894 GET_INST_OPCODE(ip) @ extract opcode from rINST 2895 GOTO_OPCODE(ip) @ jump to next instruction 2896 2897/* ------------------------------ */ 2898 .balign 64 2899.L_OP_SGET_OBJECT: /* 0x62 */ 2900/* File: armv5te/OP_SGET_OBJECT.S */ 2901/* File: armv5te/OP_SGET.S */ 2902 /* 2903 * General 32-bit SGET handler. 2904 * 2905 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2906 */ 2907 /* op vAA, field@BBBB */ 2908 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2909 FETCH(r1, 1) @ r1<- field ref BBBB 2910 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2911 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2912 cmp r0, #0 @ is resolved entry null? 2913 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2914.LOP_SGET_OBJECT_finish: @ field ptr in r0 2915 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2916 mov r2, rINST, lsr #8 @ r2<- AA 2917 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2918 SET_VREG(r1, r2) @ fp[AA]<- r1 2919 GET_INST_OPCODE(ip) @ extract opcode from rINST 2920 GOTO_OPCODE(ip) @ jump to next instruction 2921 2922 2923/* ------------------------------ */ 2924 .balign 64 2925.L_OP_SGET_BOOLEAN: /* 0x63 */ 2926/* File: armv5te/OP_SGET_BOOLEAN.S */ 2927/* File: armv5te/OP_SGET.S */ 2928 /* 2929 * General 32-bit SGET handler. 2930 * 2931 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2932 */ 2933 /* op vAA, field@BBBB */ 2934 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2935 FETCH(r1, 1) @ r1<- field ref BBBB 2936 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2937 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2938 cmp r0, #0 @ is resolved entry null? 2939 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2940.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2941 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2942 mov r2, rINST, lsr #8 @ r2<- AA 2943 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2944 SET_VREG(r1, r2) @ fp[AA]<- r1 2945 GET_INST_OPCODE(ip) @ extract opcode from rINST 2946 GOTO_OPCODE(ip) @ jump to next instruction 2947 2948 2949/* ------------------------------ */ 2950 .balign 64 2951.L_OP_SGET_BYTE: /* 0x64 */ 2952/* File: armv5te/OP_SGET_BYTE.S */ 2953/* File: armv5te/OP_SGET.S */ 2954 /* 2955 * General 32-bit SGET handler. 2956 * 2957 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2958 */ 2959 /* op vAA, field@BBBB */ 2960 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2961 FETCH(r1, 1) @ r1<- field ref BBBB 2962 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2963 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2964 cmp r0, #0 @ is resolved entry null? 2965 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2966.LOP_SGET_BYTE_finish: @ field ptr in r0 2967 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2968 mov r2, rINST, lsr #8 @ r2<- AA 2969 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2970 SET_VREG(r1, r2) @ fp[AA]<- r1 2971 GET_INST_OPCODE(ip) @ extract opcode from rINST 2972 GOTO_OPCODE(ip) @ jump to next instruction 2973 2974 2975/* ------------------------------ */ 2976 .balign 64 2977.L_OP_SGET_CHAR: /* 0x65 */ 2978/* File: armv5te/OP_SGET_CHAR.S */ 2979/* File: armv5te/OP_SGET.S */ 2980 /* 2981 * General 32-bit SGET handler. 2982 * 2983 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2984 */ 2985 /* op vAA, field@BBBB */ 2986 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2987 FETCH(r1, 1) @ r1<- field ref BBBB 2988 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2989 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2990 cmp r0, #0 @ is resolved entry null? 2991 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2992.LOP_SGET_CHAR_finish: @ field ptr in r0 2993 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2994 mov r2, rINST, lsr #8 @ r2<- AA 2995 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2996 SET_VREG(r1, r2) @ fp[AA]<- r1 2997 GET_INST_OPCODE(ip) @ extract opcode from rINST 2998 GOTO_OPCODE(ip) @ jump to next instruction 2999 3000 3001/* ------------------------------ */ 3002 .balign 64 3003.L_OP_SGET_SHORT: /* 0x66 */ 3004/* File: armv5te/OP_SGET_SHORT.S */ 3005/* File: armv5te/OP_SGET.S */ 3006 /* 3007 * General 32-bit SGET handler. 3008 * 3009 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 3010 */ 3011 /* op vAA, field@BBBB */ 3012 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3013 FETCH(r1, 1) @ r1<- field ref BBBB 3014 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3015 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3016 cmp r0, #0 @ is resolved entry null? 3017 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 3018.LOP_SGET_SHORT_finish: @ field ptr in r0 3019 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3020 mov r2, rINST, lsr #8 @ r2<- AA 3021 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3022 SET_VREG(r1, r2) @ fp[AA]<- r1 3023 GET_INST_OPCODE(ip) @ extract opcode from rINST 3024 GOTO_OPCODE(ip) @ jump to next instruction 3025 3026 3027/* ------------------------------ */ 3028 .balign 64 3029.L_OP_SPUT: /* 0x67 */ 3030/* File: armv5te/OP_SPUT.S */ 3031 /* 3032 * General 32-bit SPUT handler. 3033 * 3034 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3035 */ 3036 /* op vAA, field@BBBB */ 3037 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3038 FETCH(r1, 1) @ r1<- field ref BBBB 3039 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3040 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3041 cmp r0, #0 @ is resolved entry null? 3042 beq .LOP_SPUT_resolve @ yes, do resolve 3043.LOP_SPUT_finish: @ field ptr in r0 3044 mov r2, rINST, lsr #8 @ r2<- AA 3045 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3046 GET_VREG(r1, r2) @ r1<- fp[AA] 3047 GET_INST_OPCODE(ip) @ extract opcode from rINST 3048 str r1, [r0, #offStaticField_value] @ field<- vAA 3049 GOTO_OPCODE(ip) @ jump to next instruction 3050 3051/* ------------------------------ */ 3052 .balign 64 3053.L_OP_SPUT_WIDE: /* 0x68 */ 3054/* File: armv4t/OP_SPUT_WIDE.S */ 3055 /* 3056 * 64-bit SPUT handler. 3057 */ 3058 /* sput-wide vAA, field@BBBB */ 3059 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3060 FETCH(r1, 1) @ r1<- field ref BBBB 3061 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3062 mov r9, rINST, lsr #8 @ r9<- AA 3063 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3064 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3065 cmp r0, #0 @ is resolved entry null? 3066 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3067.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3068 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3069 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3070 GET_INST_OPCODE(ip) @ extract opcode from rINST 3071 add r0, r0, #offStaticField_value 3072 stmia r0, {r2-r3} @ field<- vAA/vAA+1 3073 GOTO_OPCODE(ip) @ jump to next instruction 3074 3075/* ------------------------------ */ 3076 .balign 64 3077.L_OP_SPUT_OBJECT: /* 0x69 */ 3078/* File: armv5te/OP_SPUT_OBJECT.S */ 3079/* File: armv5te/OP_SPUT.S */ 3080 /* 3081 * General 32-bit SPUT handler. 3082 * 3083 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3084 */ 3085 /* op vAA, field@BBBB */ 3086 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3087 FETCH(r1, 1) @ r1<- field ref BBBB 3088 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3089 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3090 cmp r0, #0 @ is resolved entry null? 3091 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3092.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3093 mov r2, rINST, lsr #8 @ r2<- AA 3094 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3095 GET_VREG(r1, r2) @ r1<- fp[AA] 3096 GET_INST_OPCODE(ip) @ extract opcode from rINST 3097 str r1, [r0, #offStaticField_value] @ field<- vAA 3098 GOTO_OPCODE(ip) @ jump to next instruction 3099 3100 3101/* ------------------------------ */ 3102 .balign 64 3103.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3104/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3105/* File: armv5te/OP_SPUT.S */ 3106 /* 3107 * General 32-bit SPUT handler. 3108 * 3109 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3110 */ 3111 /* op vAA, field@BBBB */ 3112 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3113 FETCH(r1, 1) @ r1<- field ref BBBB 3114 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3115 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3116 cmp r0, #0 @ is resolved entry null? 3117 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3118.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3119 mov r2, rINST, lsr #8 @ r2<- AA 3120 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3121 GET_VREG(r1, r2) @ r1<- fp[AA] 3122 GET_INST_OPCODE(ip) @ extract opcode from rINST 3123 str r1, [r0, #offStaticField_value] @ field<- vAA 3124 GOTO_OPCODE(ip) @ jump to next instruction 3125 3126 3127/* ------------------------------ */ 3128 .balign 64 3129.L_OP_SPUT_BYTE: /* 0x6b */ 3130/* File: armv5te/OP_SPUT_BYTE.S */ 3131/* File: armv5te/OP_SPUT.S */ 3132 /* 3133 * General 32-bit SPUT handler. 3134 * 3135 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3136 */ 3137 /* op vAA, field@BBBB */ 3138 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3139 FETCH(r1, 1) @ r1<- field ref BBBB 3140 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3141 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3142 cmp r0, #0 @ is resolved entry null? 3143 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3144.LOP_SPUT_BYTE_finish: @ field ptr in r0 3145 mov r2, rINST, lsr #8 @ r2<- AA 3146 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3147 GET_VREG(r1, r2) @ r1<- fp[AA] 3148 GET_INST_OPCODE(ip) @ extract opcode from rINST 3149 str r1, [r0, #offStaticField_value] @ field<- vAA 3150 GOTO_OPCODE(ip) @ jump to next instruction 3151 3152 3153/* ------------------------------ */ 3154 .balign 64 3155.L_OP_SPUT_CHAR: /* 0x6c */ 3156/* File: armv5te/OP_SPUT_CHAR.S */ 3157/* File: armv5te/OP_SPUT.S */ 3158 /* 3159 * General 32-bit SPUT handler. 3160 * 3161 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3162 */ 3163 /* op vAA, field@BBBB */ 3164 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3165 FETCH(r1, 1) @ r1<- field ref BBBB 3166 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3167 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3168 cmp r0, #0 @ is resolved entry null? 3169 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3170.LOP_SPUT_CHAR_finish: @ field ptr in r0 3171 mov r2, rINST, lsr #8 @ r2<- AA 3172 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3173 GET_VREG(r1, r2) @ r1<- fp[AA] 3174 GET_INST_OPCODE(ip) @ extract opcode from rINST 3175 str r1, [r0, #offStaticField_value] @ field<- vAA 3176 GOTO_OPCODE(ip) @ jump to next instruction 3177 3178 3179/* ------------------------------ */ 3180 .balign 64 3181.L_OP_SPUT_SHORT: /* 0x6d */ 3182/* File: armv5te/OP_SPUT_SHORT.S */ 3183/* File: armv5te/OP_SPUT.S */ 3184 /* 3185 * General 32-bit SPUT handler. 3186 * 3187 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3188 */ 3189 /* op vAA, field@BBBB */ 3190 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3191 FETCH(r1, 1) @ r1<- field ref BBBB 3192 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3193 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3194 cmp r0, #0 @ is resolved entry null? 3195 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3196.LOP_SPUT_SHORT_finish: @ field ptr in r0 3197 mov r2, rINST, lsr #8 @ r2<- AA 3198 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3199 GET_VREG(r1, r2) @ r1<- fp[AA] 3200 GET_INST_OPCODE(ip) @ extract opcode from rINST 3201 str r1, [r0, #offStaticField_value] @ field<- vAA 3202 GOTO_OPCODE(ip) @ jump to next instruction 3203 3204 3205/* ------------------------------ */ 3206 .balign 64 3207.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3208/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3209 /* 3210 * Handle a virtual method call. 3211 * 3212 * for: invoke-virtual, invoke-virtual/range 3213 */ 3214 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3215 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3216 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3217 FETCH(r1, 1) @ r1<- BBBB 3218 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3219 FETCH(r10, 2) @ r10<- GFED or CCCC 3220 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3221 .if (!0) 3222 and r10, r10, #15 @ r10<- D (or stays CCCC) 3223 .endif 3224 cmp r0, #0 @ already resolved? 3225 EXPORT_PC() @ must export for invoke 3226 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3227 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3228 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3229 mov r2, #METHOD_VIRTUAL @ resolver method type 3230 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3231 cmp r0, #0 @ got null? 3232 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3233 b common_exceptionThrown @ yes, handle exception 3234 3235/* ------------------------------ */ 3236 .balign 64 3237.L_OP_INVOKE_SUPER: /* 0x6f */ 3238/* File: armv5te/OP_INVOKE_SUPER.S */ 3239 /* 3240 * Handle a "super" method call. 3241 * 3242 * for: invoke-super, invoke-super/range 3243 */ 3244 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3245 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3246 FETCH(r10, 2) @ r10<- GFED or CCCC 3247 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3248 .if (!0) 3249 and r10, r10, #15 @ r10<- D (or stays CCCC) 3250 .endif 3251 FETCH(r1, 1) @ r1<- BBBB 3252 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3253 GET_VREG(r2, r10) @ r2<- "this" ptr 3254 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3255 cmp r2, #0 @ null "this"? 3256 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3257 beq common_errNullObject @ null "this", throw exception 3258 cmp r0, #0 @ already resolved? 3259 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3260 EXPORT_PC() @ must export for invoke 3261 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3262 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3263 3264/* ------------------------------ */ 3265 .balign 64 3266.L_OP_INVOKE_DIRECT: /* 0x70 */ 3267/* File: armv5te/OP_INVOKE_DIRECT.S */ 3268 /* 3269 * Handle a direct method call. 3270 * 3271 * (We could defer the "is 'this' pointer null" test to the common 3272 * method invocation code, and use a flag to indicate that static 3273 * calls don't count. If we do this as part of copying the arguments 3274 * out we could avoiding loading the first arg twice.) 3275 * 3276 * for: invoke-direct, invoke-direct/range 3277 */ 3278 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3279 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3280 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3281 FETCH(r1, 1) @ r1<- BBBB 3282 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3283 FETCH(r10, 2) @ r10<- GFED or CCCC 3284 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3285 .if (!0) 3286 and r10, r10, #15 @ r10<- D (or stays CCCC) 3287 .endif 3288 cmp r0, #0 @ already resolved? 3289 EXPORT_PC() @ must export for invoke 3290 GET_VREG(r2, r10) @ r2<- "this" ptr 3291 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3292.LOP_INVOKE_DIRECT_finish: 3293 cmp r2, #0 @ null "this" ref? 3294 bne common_invokeMethodNoRange @ no, continue on 3295 b common_errNullObject @ yes, throw exception 3296 3297/* ------------------------------ */ 3298 .balign 64 3299.L_OP_INVOKE_STATIC: /* 0x71 */ 3300/* File: armv5te/OP_INVOKE_STATIC.S */ 3301 /* 3302 * Handle a static method call. 3303 * 3304 * for: invoke-static, invoke-static/range 3305 */ 3306 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3307 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3308 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3309 FETCH(r1, 1) @ r1<- BBBB 3310 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3311 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3312 cmp r0, #0 @ already resolved? 3313 EXPORT_PC() @ must export for invoke 3314 bne common_invokeMethodNoRange @ yes, continue on 33150: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3316 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3317 mov r2, #METHOD_STATIC @ resolver method type 3318 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3319 cmp r0, #0 @ got null? 3320 bne common_invokeMethodNoRange @ no, continue 3321 b common_exceptionThrown @ yes, handle exception 3322 3323 3324/* ------------------------------ */ 3325 .balign 64 3326.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3327/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3328 /* 3329 * Handle an interface method call. 3330 * 3331 * for: invoke-interface, invoke-interface/range 3332 */ 3333 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3334 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3335 FETCH(r2, 2) @ r2<- FEDC or CCCC 3336 FETCH(r1, 1) @ r1<- BBBB 3337 .if (!0) 3338 and r2, r2, #15 @ r2<- C (or stays CCCC) 3339 .endif 3340 EXPORT_PC() @ must export for invoke 3341 GET_VREG(r0, r2) @ r0<- first arg ("this") 3342 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3343 cmp r0, #0 @ null obj? 3344 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3345 beq common_errNullObject @ yes, fail 3346 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3347 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3348 cmp r0, #0 @ failed? 3349 beq common_exceptionThrown @ yes, handle exception 3350 b common_invokeMethodNoRange @ jump to common handler 3351 3352 3353/* ------------------------------ */ 3354 .balign 64 3355.L_OP_UNUSED_73: /* 0x73 */ 3356/* File: armv5te/OP_UNUSED_73.S */ 3357/* File: armv5te/unused.S */ 3358 bl common_abort 3359 3360 3361 3362/* ------------------------------ */ 3363 .balign 64 3364.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3365/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3366/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3367 /* 3368 * Handle a virtual method call. 3369 * 3370 * for: invoke-virtual, invoke-virtual/range 3371 */ 3372 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3373 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3374 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3375 FETCH(r1, 1) @ r1<- BBBB 3376 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3377 FETCH(r10, 2) @ r10<- GFED or CCCC 3378 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3379 .if (!1) 3380 and r10, r10, #15 @ r10<- D (or stays CCCC) 3381 .endif 3382 cmp r0, #0 @ already resolved? 3383 EXPORT_PC() @ must export for invoke 3384 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3385 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3386 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3387 mov r2, #METHOD_VIRTUAL @ resolver method type 3388 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3389 cmp r0, #0 @ got null? 3390 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3391 b common_exceptionThrown @ yes, handle exception 3392 3393 3394/* ------------------------------ */ 3395 .balign 64 3396.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3397/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3398/* File: armv5te/OP_INVOKE_SUPER.S */ 3399 /* 3400 * Handle a "super" method call. 3401 * 3402 * for: invoke-super, invoke-super/range 3403 */ 3404 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3405 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3406 FETCH(r10, 2) @ r10<- GFED or CCCC 3407 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3408 .if (!1) 3409 and r10, r10, #15 @ r10<- D (or stays CCCC) 3410 .endif 3411 FETCH(r1, 1) @ r1<- BBBB 3412 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3413 GET_VREG(r2, r10) @ r2<- "this" ptr 3414 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3415 cmp r2, #0 @ null "this"? 3416 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3417 beq common_errNullObject @ null "this", throw exception 3418 cmp r0, #0 @ already resolved? 3419 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3420 EXPORT_PC() @ must export for invoke 3421 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3422 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3423 3424 3425/* ------------------------------ */ 3426 .balign 64 3427.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3428/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3429/* File: armv5te/OP_INVOKE_DIRECT.S */ 3430 /* 3431 * Handle a direct method call. 3432 * 3433 * (We could defer the "is 'this' pointer null" test to the common 3434 * method invocation code, and use a flag to indicate that static 3435 * calls don't count. If we do this as part of copying the arguments 3436 * out we could avoiding loading the first arg twice.) 3437 * 3438 * for: invoke-direct, invoke-direct/range 3439 */ 3440 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3441 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3442 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3443 FETCH(r1, 1) @ r1<- BBBB 3444 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3445 FETCH(r10, 2) @ r10<- GFED or CCCC 3446 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3447 .if (!1) 3448 and r10, r10, #15 @ r10<- D (or stays CCCC) 3449 .endif 3450 cmp r0, #0 @ already resolved? 3451 EXPORT_PC() @ must export for invoke 3452 GET_VREG(r2, r10) @ r2<- "this" ptr 3453 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3454.LOP_INVOKE_DIRECT_RANGE_finish: 3455 cmp r2, #0 @ null "this" ref? 3456 bne common_invokeMethodRange @ no, continue on 3457 b common_errNullObject @ yes, throw exception 3458 3459 3460/* ------------------------------ */ 3461 .balign 64 3462.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3463/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3464/* File: armv5te/OP_INVOKE_STATIC.S */ 3465 /* 3466 * Handle a static method call. 3467 * 3468 * for: invoke-static, invoke-static/range 3469 */ 3470 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3471 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3472 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3473 FETCH(r1, 1) @ r1<- BBBB 3474 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3475 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3476 cmp r0, #0 @ already resolved? 3477 EXPORT_PC() @ must export for invoke 3478 bne common_invokeMethodRange @ yes, continue on 34790: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3480 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3481 mov r2, #METHOD_STATIC @ resolver method type 3482 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3483 cmp r0, #0 @ got null? 3484 bne common_invokeMethodRange @ no, continue 3485 b common_exceptionThrown @ yes, handle exception 3486 3487 3488 3489/* ------------------------------ */ 3490 .balign 64 3491.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3492/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3493/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3494 /* 3495 * Handle an interface method call. 3496 * 3497 * for: invoke-interface, invoke-interface/range 3498 */ 3499 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3500 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3501 FETCH(r2, 2) @ r2<- FEDC or CCCC 3502 FETCH(r1, 1) @ r1<- BBBB 3503 .if (!1) 3504 and r2, r2, #15 @ r2<- C (or stays CCCC) 3505 .endif 3506 EXPORT_PC() @ must export for invoke 3507 GET_VREG(r0, r2) @ r0<- first arg ("this") 3508 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3509 cmp r0, #0 @ null obj? 3510 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3511 beq common_errNullObject @ yes, fail 3512 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3513 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3514 cmp r0, #0 @ failed? 3515 beq common_exceptionThrown @ yes, handle exception 3516 b common_invokeMethodRange @ jump to common handler 3517 3518 3519 3520/* ------------------------------ */ 3521 .balign 64 3522.L_OP_UNUSED_79: /* 0x79 */ 3523/* File: armv5te/OP_UNUSED_79.S */ 3524/* File: armv5te/unused.S */ 3525 bl common_abort 3526 3527 3528 3529/* ------------------------------ */ 3530 .balign 64 3531.L_OP_UNUSED_7A: /* 0x7a */ 3532/* File: armv5te/OP_UNUSED_7A.S */ 3533/* File: armv5te/unused.S */ 3534 bl common_abort 3535 3536 3537 3538/* ------------------------------ */ 3539 .balign 64 3540.L_OP_NEG_INT: /* 0x7b */ 3541/* File: armv5te/OP_NEG_INT.S */ 3542/* File: armv5te/unop.S */ 3543 /* 3544 * Generic 32-bit unary operation. Provide an "instr" line that 3545 * specifies an instruction that performs "result = op r0". 3546 * This could be an ARM instruction or a function call. 3547 * 3548 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3549 * int-to-byte, int-to-char, int-to-short 3550 */ 3551 /* unop vA, vB */ 3552 mov r3, rINST, lsr #12 @ r3<- B 3553 mov r9, rINST, lsr #8 @ r9<- A+ 3554 GET_VREG(r0, r3) @ r0<- vB 3555 and r9, r9, #15 3556 @ optional op; may set condition codes 3557 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3558 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3559 GET_INST_OPCODE(ip) @ extract opcode from rINST 3560 SET_VREG(r0, r9) @ vAA<- r0 3561 GOTO_OPCODE(ip) @ jump to next instruction 3562 /* 9-10 instructions */ 3563 3564 3565/* ------------------------------ */ 3566 .balign 64 3567.L_OP_NOT_INT: /* 0x7c */ 3568/* File: armv5te/OP_NOT_INT.S */ 3569/* File: armv5te/unop.S */ 3570 /* 3571 * Generic 32-bit unary operation. Provide an "instr" line that 3572 * specifies an instruction that performs "result = op r0". 3573 * This could be an ARM instruction or a function call. 3574 * 3575 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3576 * int-to-byte, int-to-char, int-to-short 3577 */ 3578 /* unop vA, vB */ 3579 mov r3, rINST, lsr #12 @ r3<- B 3580 mov r9, rINST, lsr #8 @ r9<- A+ 3581 GET_VREG(r0, r3) @ r0<- vB 3582 and r9, r9, #15 3583 @ optional op; may set condition codes 3584 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3585 mvn r0, r0 @ r0<- op, r0-r3 changed 3586 GET_INST_OPCODE(ip) @ extract opcode from rINST 3587 SET_VREG(r0, r9) @ vAA<- r0 3588 GOTO_OPCODE(ip) @ jump to next instruction 3589 /* 9-10 instructions */ 3590 3591 3592/* ------------------------------ */ 3593 .balign 64 3594.L_OP_NEG_LONG: /* 0x7d */ 3595/* File: armv5te/OP_NEG_LONG.S */ 3596/* File: armv5te/unopWide.S */ 3597 /* 3598 * Generic 64-bit unary operation. Provide an "instr" line that 3599 * specifies an instruction that performs "result = op r0/r1". 3600 * This could be an ARM instruction or a function call. 3601 * 3602 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3603 */ 3604 /* unop vA, vB */ 3605 mov r9, rINST, lsr #8 @ r9<- A+ 3606 mov r3, rINST, lsr #12 @ r3<- B 3607 and r9, r9, #15 3608 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3609 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3610 ldmia r3, {r0-r1} @ r0/r1<- vAA 3611 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3612 rsbs r0, r0, #0 @ optional op; may set condition codes 3613 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3614 GET_INST_OPCODE(ip) @ extract opcode from rINST 3615 stmia r9, {r0-r1} @ vAA<- r0/r1 3616 GOTO_OPCODE(ip) @ jump to next instruction 3617 /* 12-13 instructions */ 3618 3619 3620 3621/* ------------------------------ */ 3622 .balign 64 3623.L_OP_NOT_LONG: /* 0x7e */ 3624/* File: armv5te/OP_NOT_LONG.S */ 3625/* File: armv5te/unopWide.S */ 3626 /* 3627 * Generic 64-bit unary operation. Provide an "instr" line that 3628 * specifies an instruction that performs "result = op r0/r1". 3629 * This could be an ARM instruction or a function call. 3630 * 3631 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3632 */ 3633 /* unop vA, vB */ 3634 mov r9, rINST, lsr #8 @ r9<- A+ 3635 mov r3, rINST, lsr #12 @ r3<- B 3636 and r9, r9, #15 3637 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3638 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3639 ldmia r3, {r0-r1} @ r0/r1<- vAA 3640 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3641 mvn r0, r0 @ optional op; may set condition codes 3642 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3643 GET_INST_OPCODE(ip) @ extract opcode from rINST 3644 stmia r9, {r0-r1} @ vAA<- r0/r1 3645 GOTO_OPCODE(ip) @ jump to next instruction 3646 /* 12-13 instructions */ 3647 3648 3649 3650/* ------------------------------ */ 3651 .balign 64 3652.L_OP_NEG_FLOAT: /* 0x7f */ 3653/* File: armv5te/OP_NEG_FLOAT.S */ 3654/* File: armv5te/unop.S */ 3655 /* 3656 * Generic 32-bit unary operation. Provide an "instr" line that 3657 * specifies an instruction that performs "result = op r0". 3658 * This could be an ARM instruction or a function call. 3659 * 3660 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3661 * int-to-byte, int-to-char, int-to-short 3662 */ 3663 /* unop vA, vB */ 3664 mov r3, rINST, lsr #12 @ r3<- B 3665 mov r9, rINST, lsr #8 @ r9<- A+ 3666 GET_VREG(r0, r3) @ r0<- vB 3667 and r9, r9, #15 3668 @ optional op; may set condition codes 3669 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3670 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3671 GET_INST_OPCODE(ip) @ extract opcode from rINST 3672 SET_VREG(r0, r9) @ vAA<- r0 3673 GOTO_OPCODE(ip) @ jump to next instruction 3674 /* 9-10 instructions */ 3675 3676 3677/* ------------------------------ */ 3678 .balign 64 3679.L_OP_NEG_DOUBLE: /* 0x80 */ 3680/* File: armv5te/OP_NEG_DOUBLE.S */ 3681/* File: armv5te/unopWide.S */ 3682 /* 3683 * Generic 64-bit unary operation. Provide an "instr" line that 3684 * specifies an instruction that performs "result = op r0/r1". 3685 * This could be an ARM instruction or a function call. 3686 * 3687 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3688 */ 3689 /* unop vA, vB */ 3690 mov r9, rINST, lsr #8 @ r9<- A+ 3691 mov r3, rINST, lsr #12 @ r3<- B 3692 and r9, r9, #15 3693 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3694 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3695 ldmia r3, {r0-r1} @ r0/r1<- vAA 3696 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3697 @ optional op; may set condition codes 3698 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3699 GET_INST_OPCODE(ip) @ extract opcode from rINST 3700 stmia r9, {r0-r1} @ vAA<- r0/r1 3701 GOTO_OPCODE(ip) @ jump to next instruction 3702 /* 12-13 instructions */ 3703 3704 3705 3706/* ------------------------------ */ 3707 .balign 64 3708.L_OP_INT_TO_LONG: /* 0x81 */ 3709/* File: armv5te/OP_INT_TO_LONG.S */ 3710/* File: armv5te/unopWider.S */ 3711 /* 3712 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3713 * that specifies an instruction that performs "result = op r0", where 3714 * "result" is a 64-bit quantity in r0/r1. 3715 * 3716 * For: int-to-long, int-to-double, float-to-long, float-to-double 3717 */ 3718 /* unop vA, vB */ 3719 mov r9, rINST, lsr #8 @ r9<- A+ 3720 mov r3, rINST, lsr #12 @ r3<- B 3721 and r9, r9, #15 3722 GET_VREG(r0, r3) @ r0<- vB 3723 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3724 @ optional op; may set condition codes 3725 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3726 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3727 GET_INST_OPCODE(ip) @ extract opcode from rINST 3728 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3729 GOTO_OPCODE(ip) @ jump to next instruction 3730 /* 10-11 instructions */ 3731 3732 3733/* ------------------------------ */ 3734 .balign 64 3735.L_OP_INT_TO_FLOAT: /* 0x82 */ 3736/* File: armv5te/OP_INT_TO_FLOAT.S */ 3737/* File: armv5te/unop.S */ 3738 /* 3739 * Generic 32-bit unary operation. Provide an "instr" line that 3740 * specifies an instruction that performs "result = op r0". 3741 * This could be an ARM instruction or a function call. 3742 * 3743 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3744 * int-to-byte, int-to-char, int-to-short 3745 */ 3746 /* unop vA, vB */ 3747 mov r3, rINST, lsr #12 @ r3<- B 3748 mov r9, rINST, lsr #8 @ r9<- A+ 3749 GET_VREG(r0, r3) @ r0<- vB 3750 and r9, r9, #15 3751 @ optional op; may set condition codes 3752 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3753 bl __aeabi_i2f @ r0<- op, r0-r3 changed 3754 GET_INST_OPCODE(ip) @ extract opcode from rINST 3755 SET_VREG(r0, r9) @ vAA<- r0 3756 GOTO_OPCODE(ip) @ jump to next instruction 3757 /* 9-10 instructions */ 3758 3759 3760/* ------------------------------ */ 3761 .balign 64 3762.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3763/* File: armv5te/OP_INT_TO_DOUBLE.S */ 3764/* File: armv5te/unopWider.S */ 3765 /* 3766 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3767 * that specifies an instruction that performs "result = op r0", where 3768 * "result" is a 64-bit quantity in r0/r1. 3769 * 3770 * For: int-to-long, int-to-double, float-to-long, float-to-double 3771 */ 3772 /* unop vA, vB */ 3773 mov r9, rINST, lsr #8 @ r9<- A+ 3774 mov r3, rINST, lsr #12 @ r3<- B 3775 and r9, r9, #15 3776 GET_VREG(r0, r3) @ r0<- vB 3777 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3778 @ optional op; may set condition codes 3779 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3780 bl __aeabi_i2d @ r0<- op, r0-r3 changed 3781 GET_INST_OPCODE(ip) @ extract opcode from rINST 3782 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3783 GOTO_OPCODE(ip) @ jump to next instruction 3784 /* 10-11 instructions */ 3785 3786 3787/* ------------------------------ */ 3788 .balign 64 3789.L_OP_LONG_TO_INT: /* 0x84 */ 3790/* File: armv5te/OP_LONG_TO_INT.S */ 3791/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3792/* File: armv5te/OP_MOVE.S */ 3793 /* for move, move-object, long-to-int */ 3794 /* op vA, vB */ 3795 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3796 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3797 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3798 GET_VREG(r2, r1) @ r2<- fp[B] 3799 and r0, r0, #15 3800 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3801 SET_VREG(r2, r0) @ fp[A]<- r2 3802 GOTO_OPCODE(ip) @ execute next instruction 3803 3804 3805 3806/* ------------------------------ */ 3807 .balign 64 3808.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3809/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3810/* File: armv5te/unopNarrower.S */ 3811 /* 3812 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3813 * that specifies an instruction that performs "result = op r0/r1", where 3814 * "result" is a 32-bit quantity in r0. 3815 * 3816 * For: long-to-float, double-to-int, double-to-float 3817 * 3818 * (This would work for long-to-int, but that instruction is actually 3819 * an exact match for OP_MOVE.) 3820 */ 3821 /* unop vA, vB */ 3822 mov r3, rINST, lsr #12 @ r3<- B 3823 mov r9, rINST, lsr #8 @ r9<- A+ 3824 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3825 and r9, r9, #15 3826 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3827 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3828 @ optional op; may set condition codes 3829 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3830 GET_INST_OPCODE(ip) @ extract opcode from rINST 3831 SET_VREG(r0, r9) @ vA<- r0 3832 GOTO_OPCODE(ip) @ jump to next instruction 3833 /* 10-11 instructions */ 3834 3835 3836/* ------------------------------ */ 3837 .balign 64 3838.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3839/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3840/* File: armv5te/unopWide.S */ 3841 /* 3842 * Generic 64-bit unary operation. Provide an "instr" line that 3843 * specifies an instruction that performs "result = op r0/r1". 3844 * This could be an ARM instruction or a function call. 3845 * 3846 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3847 */ 3848 /* unop vA, vB */ 3849 mov r9, rINST, lsr #8 @ r9<- A+ 3850 mov r3, rINST, lsr #12 @ r3<- B 3851 and r9, r9, #15 3852 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3853 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3854 ldmia r3, {r0-r1} @ r0/r1<- vAA 3855 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3856 @ optional op; may set condition codes 3857 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3858 GET_INST_OPCODE(ip) @ extract opcode from rINST 3859 stmia r9, {r0-r1} @ vAA<- r0/r1 3860 GOTO_OPCODE(ip) @ jump to next instruction 3861 /* 12-13 instructions */ 3862 3863 3864 3865/* ------------------------------ */ 3866 .balign 64 3867.L_OP_FLOAT_TO_INT: /* 0x87 */ 3868/* File: armv5te/OP_FLOAT_TO_INT.S */ 3869/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3870/* File: armv5te/unop.S */ 3871 /* 3872 * Generic 32-bit unary operation. Provide an "instr" line that 3873 * specifies an instruction that performs "result = op r0". 3874 * This could be an ARM instruction or a function call. 3875 * 3876 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3877 * int-to-byte, int-to-char, int-to-short 3878 */ 3879 /* unop vA, vB */ 3880 mov r3, rINST, lsr #12 @ r3<- B 3881 mov r9, rINST, lsr #8 @ r9<- A+ 3882 GET_VREG(r0, r3) @ r0<- vB 3883 and r9, r9, #15 3884 @ optional op; may set condition codes 3885 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3886 bl __aeabi_f2iz @ r0<- op, r0-r3 changed 3887 GET_INST_OPCODE(ip) @ extract opcode from rINST 3888 SET_VREG(r0, r9) @ vAA<- r0 3889 GOTO_OPCODE(ip) @ jump to next instruction 3890 /* 9-10 instructions */ 3891 3892 3893#if 0 3894@include "armv5te/unop.S" {"instr":"bl f2i_doconv"} 3895@break 3896/* 3897 * Convert the float in r0 to an int in r0. 3898 * 3899 * We have to clip values to int min/max per the specification. The 3900 * expected common case is a "reasonable" value that converts directly 3901 * to modest integer. The EABI convert function isn't doing this for us. 3902 */ 3903f2i_doconv: 3904 stmfd sp!, {r4, lr} 3905 mov r1, #0x4f000000 @ (float)maxint 3906 mov r4, r0 3907 bl __aeabi_fcmpge @ is arg >= maxint? 3908 cmp r0, #0 @ nonzero == yes 3909 mvnne r0, #0x80000000 @ return maxint (7fffffff) 3910 ldmnefd sp!, {r4, pc} 3911 3912 mov r0, r4 @ recover arg 3913 mov r1, #0xcf000000 @ (float)minint 3914 bl __aeabi_fcmple @ is arg <= minint? 3915 cmp r0, #0 @ nonzero == yes 3916 movne r0, #0x80000000 @ return minint (80000000) 3917 ldmnefd sp!, {r4, pc} 3918 3919 mov r0, r4 @ recover arg 3920 mov r1, r4 3921 bl __aeabi_fcmpeq @ is arg == self? 3922 cmp r0, #0 @ zero == no 3923 ldmeqfd sp!, {r4, pc} @ return zero for NaN 3924 3925 mov r0, r4 @ recover arg 3926 bl __aeabi_f2iz @ convert float to int 3927 ldmfd sp!, {r4, pc} 3928#endif 3929 3930 3931/* ------------------------------ */ 3932 .balign 64 3933.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3934/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3935@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3936/* File: armv5te/unopWider.S */ 3937 /* 3938 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3939 * that specifies an instruction that performs "result = op r0", where 3940 * "result" is a 64-bit quantity in r0/r1. 3941 * 3942 * For: int-to-long, int-to-double, float-to-long, float-to-double 3943 */ 3944 /* unop vA, vB */ 3945 mov r9, rINST, lsr #8 @ r9<- A+ 3946 mov r3, rINST, lsr #12 @ r3<- B 3947 and r9, r9, #15 3948 GET_VREG(r0, r3) @ r0<- vB 3949 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3950 @ optional op; may set condition codes 3951 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3952 bl f2l_doconv @ r0<- op, r0-r3 changed 3953 GET_INST_OPCODE(ip) @ extract opcode from rINST 3954 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3955 GOTO_OPCODE(ip) @ jump to next instruction 3956 /* 10-11 instructions */ 3957 3958 3959 3960/* ------------------------------ */ 3961 .balign 64 3962.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3963/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */ 3964/* File: armv5te/unopWider.S */ 3965 /* 3966 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3967 * that specifies an instruction that performs "result = op r0", where 3968 * "result" is a 64-bit quantity in r0/r1. 3969 * 3970 * For: int-to-long, int-to-double, float-to-long, float-to-double 3971 */ 3972 /* unop vA, vB */ 3973 mov r9, rINST, lsr #8 @ r9<- A+ 3974 mov r3, rINST, lsr #12 @ r3<- B 3975 and r9, r9, #15 3976 GET_VREG(r0, r3) @ r0<- vB 3977 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3978 @ optional op; may set condition codes 3979 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3980 bl __aeabi_f2d @ r0<- op, r0-r3 changed 3981 GET_INST_OPCODE(ip) @ extract opcode from rINST 3982 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3983 GOTO_OPCODE(ip) @ jump to next instruction 3984 /* 10-11 instructions */ 3985 3986 3987/* ------------------------------ */ 3988 .balign 64 3989.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3990/* File: armv5te/OP_DOUBLE_TO_INT.S */ 3991/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3992/* File: armv5te/unopNarrower.S */ 3993 /* 3994 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3995 * that specifies an instruction that performs "result = op r0/r1", where 3996 * "result" is a 32-bit quantity in r0. 3997 * 3998 * For: long-to-float, double-to-int, double-to-float 3999 * 4000 * (This would work for long-to-int, but that instruction is actually 4001 * an exact match for OP_MOVE.) 4002 */ 4003 /* unop vA, vB */ 4004 mov r3, rINST, lsr #12 @ r3<- B 4005 mov r9, rINST, lsr #8 @ r9<- A+ 4006 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4007 and r9, r9, #15 4008 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4009 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4010 @ optional op; may set condition codes 4011 bl __aeabi_d2iz @ r0<- op, r0-r3 changed 4012 GET_INST_OPCODE(ip) @ extract opcode from rINST 4013 SET_VREG(r0, r9) @ vA<- r0 4014 GOTO_OPCODE(ip) @ jump to next instruction 4015 /* 10-11 instructions */ 4016 4017 4018#if 0 4019@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"} 4020@break 4021/* 4022 * Convert the double in r0/r1 to an int in r0. 4023 * 4024 * We have to clip values to int min/max per the specification. The 4025 * expected common case is a "reasonable" value that converts directly 4026 * to modest integer. The EABI convert function isn't doing this for us. 4027 */ 4028d2i_doconv: 4029 stmfd sp!, {r4, r5, lr} @ save regs 4030 ldr r2, .LOP_DOUBLE_TO_INT_maxlo @ (double)maxint, lo 4031 ldr r3, .LOP_DOUBLE_TO_INT_maxhi @ (double)maxint, hi 4032 sub sp, sp, #4 @ align for EABI 4033 mov r4, r0 @ save r0 4034 mov r5, r1 @ and r1 4035 bl __aeabi_dcmpge @ is arg >= maxint? 4036 cmp r0, #0 @ nonzero == yes 4037 mvnne r0, #0x80000000 @ return maxint (7fffffff) 4038 bne 1f 4039 4040 mov r0, r4 @ recover arg 4041 mov r1, r5 4042 ldr r3, .LOP_DOUBLE_TO_INT_min @ (double)minint, hi 4043 mov r2, #0 @ (double)minint, lo 4044 bl __aeabi_dcmple @ is arg <= minint? 4045 cmp r0, #0 @ nonzero == yes 4046 movne r0, #0x80000000 @ return minint (80000000) 4047 bne 1f 4048 4049 mov r0, r4 @ recover arg 4050 mov r1, r5 4051 mov r2, r4 @ compare against self 4052 mov r3, r5 4053 bl __aeabi_dcmpeq @ is arg == self? 4054 cmp r0, #0 @ zero == no 4055 beq 1f @ return zero for NaN 4056 4057 mov r0, r4 @ recover arg 4058 mov r1, r5 4059 bl __aeabi_d2iz @ convert double to int 4060 40611: 4062 add sp, sp, #4 4063 ldmfd sp!, {r4, r5, pc} 4064 4065.LOP_DOUBLE_TO_INT_maxlo: 4066 .word 0xffc00000 @ maxint, as a double (low word) 4067.LOP_DOUBLE_TO_INT_maxhi: 4068 .word 0x41dfffff @ maxint, as a double (high word) 4069.LOP_DOUBLE_TO_INT_min: 4070 .word 0xc1e00000 @ minint, as a double (high word) 4071#endif 4072 4073 4074/* ------------------------------ */ 4075 .balign 64 4076.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 4077/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 4078@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 4079/* File: armv5te/unopWide.S */ 4080 /* 4081 * Generic 64-bit unary operation. Provide an "instr" line that 4082 * specifies an instruction that performs "result = op r0/r1". 4083 * This could be an ARM instruction or a function call. 4084 * 4085 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 4086 */ 4087 /* unop vA, vB */ 4088 mov r9, rINST, lsr #8 @ r9<- A+ 4089 mov r3, rINST, lsr #12 @ r3<- B 4090 and r9, r9, #15 4091 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4092 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 4093 ldmia r3, {r0-r1} @ r0/r1<- vAA 4094 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4095 @ optional op; may set condition codes 4096 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 4097 GET_INST_OPCODE(ip) @ extract opcode from rINST 4098 stmia r9, {r0-r1} @ vAA<- r0/r1 4099 GOTO_OPCODE(ip) @ jump to next instruction 4100 /* 12-13 instructions */ 4101 4102 4103 4104 4105/* ------------------------------ */ 4106 .balign 64 4107.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 4108/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */ 4109/* File: armv5te/unopNarrower.S */ 4110 /* 4111 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4112 * that specifies an instruction that performs "result = op r0/r1", where 4113 * "result" is a 32-bit quantity in r0. 4114 * 4115 * For: long-to-float, double-to-int, double-to-float 4116 * 4117 * (This would work for long-to-int, but that instruction is actually 4118 * an exact match for OP_MOVE.) 4119 */ 4120 /* unop vA, vB */ 4121 mov r3, rINST, lsr #12 @ r3<- B 4122 mov r9, rINST, lsr #8 @ r9<- A+ 4123 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4124 and r9, r9, #15 4125 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4126 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4127 @ optional op; may set condition codes 4128 bl __aeabi_d2f @ r0<- op, r0-r3 changed 4129 GET_INST_OPCODE(ip) @ extract opcode from rINST 4130 SET_VREG(r0, r9) @ vA<- r0 4131 GOTO_OPCODE(ip) @ jump to next instruction 4132 /* 10-11 instructions */ 4133 4134 4135/* ------------------------------ */ 4136 .balign 64 4137.L_OP_INT_TO_BYTE: /* 0x8d */ 4138/* File: armv5te/OP_INT_TO_BYTE.S */ 4139/* File: armv5te/unop.S */ 4140 /* 4141 * Generic 32-bit unary operation. Provide an "instr" line that 4142 * specifies an instruction that performs "result = op r0". 4143 * This could be an ARM instruction or a function call. 4144 * 4145 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4146 * int-to-byte, int-to-char, int-to-short 4147 */ 4148 /* unop vA, vB */ 4149 mov r3, rINST, lsr #12 @ r3<- B 4150 mov r9, rINST, lsr #8 @ r9<- A+ 4151 GET_VREG(r0, r3) @ r0<- vB 4152 and r9, r9, #15 4153 mov r0, r0, asl #24 @ optional op; may set condition codes 4154 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4155 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4156 GET_INST_OPCODE(ip) @ extract opcode from rINST 4157 SET_VREG(r0, r9) @ vAA<- r0 4158 GOTO_OPCODE(ip) @ jump to next instruction 4159 /* 9-10 instructions */ 4160 4161 4162/* ------------------------------ */ 4163 .balign 64 4164.L_OP_INT_TO_CHAR: /* 0x8e */ 4165/* File: armv5te/OP_INT_TO_CHAR.S */ 4166/* File: armv5te/unop.S */ 4167 /* 4168 * Generic 32-bit unary operation. Provide an "instr" line that 4169 * specifies an instruction that performs "result = op r0". 4170 * This could be an ARM instruction or a function call. 4171 * 4172 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4173 * int-to-byte, int-to-char, int-to-short 4174 */ 4175 /* unop vA, vB */ 4176 mov r3, rINST, lsr #12 @ r3<- B 4177 mov r9, rINST, lsr #8 @ r9<- A+ 4178 GET_VREG(r0, r3) @ r0<- vB 4179 and r9, r9, #15 4180 mov r0, r0, asl #16 @ optional op; may set condition codes 4181 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4182 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4183 GET_INST_OPCODE(ip) @ extract opcode from rINST 4184 SET_VREG(r0, r9) @ vAA<- r0 4185 GOTO_OPCODE(ip) @ jump to next instruction 4186 /* 9-10 instructions */ 4187 4188 4189/* ------------------------------ */ 4190 .balign 64 4191.L_OP_INT_TO_SHORT: /* 0x8f */ 4192/* File: armv5te/OP_INT_TO_SHORT.S */ 4193/* File: armv5te/unop.S */ 4194 /* 4195 * Generic 32-bit unary operation. Provide an "instr" line that 4196 * specifies an instruction that performs "result = op r0". 4197 * This could be an ARM instruction or a function call. 4198 * 4199 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4200 * int-to-byte, int-to-char, int-to-short 4201 */ 4202 /* unop vA, vB */ 4203 mov r3, rINST, lsr #12 @ r3<- B 4204 mov r9, rINST, lsr #8 @ r9<- A+ 4205 GET_VREG(r0, r3) @ r0<- vB 4206 and r9, r9, #15 4207 mov r0, r0, asl #16 @ optional op; may set condition codes 4208 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4209 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4210 GET_INST_OPCODE(ip) @ extract opcode from rINST 4211 SET_VREG(r0, r9) @ vAA<- r0 4212 GOTO_OPCODE(ip) @ jump to next instruction 4213 /* 9-10 instructions */ 4214 4215 4216/* ------------------------------ */ 4217 .balign 64 4218.L_OP_ADD_INT: /* 0x90 */ 4219/* File: armv5te/OP_ADD_INT.S */ 4220/* File: armv5te/binop.S */ 4221 /* 4222 * Generic 32-bit binary operation. Provide an "instr" line that 4223 * specifies an instruction that performs "result = r0 op r1". 4224 * This could be an ARM instruction or a function call. (If the result 4225 * comes back in a register other than r0, you can override "result".) 4226 * 4227 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4228 * vCC (r1). Useful for integer division and modulus. Note that we 4229 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4230 * handles it correctly. 4231 * 4232 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4233 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4234 * mul-float, div-float, rem-float 4235 */ 4236 /* binop vAA, vBB, vCC */ 4237 FETCH(r0, 1) @ r0<- CCBB 4238 mov r9, rINST, lsr #8 @ r9<- AA 4239 mov r3, r0, lsr #8 @ r3<- CC 4240 and r2, r0, #255 @ r2<- BB 4241 GET_VREG(r1, r3) @ r1<- vCC 4242 GET_VREG(r0, r2) @ r0<- vBB 4243 .if 0 4244 cmp r1, #0 @ is second operand zero? 4245 beq common_errDivideByZero 4246 .endif 4247 4248 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4249 @ optional op; may set condition codes 4250 add r0, r0, r1 @ r0<- op, r0-r3 changed 4251 GET_INST_OPCODE(ip) @ extract opcode from rINST 4252 SET_VREG(r0, r9) @ vAA<- r0 4253 GOTO_OPCODE(ip) @ jump to next instruction 4254 /* 11-14 instructions */ 4255 4256 4257 4258/* ------------------------------ */ 4259 .balign 64 4260.L_OP_SUB_INT: /* 0x91 */ 4261/* File: armv5te/OP_SUB_INT.S */ 4262/* File: armv5te/binop.S */ 4263 /* 4264 * Generic 32-bit binary operation. Provide an "instr" line that 4265 * specifies an instruction that performs "result = r0 op r1". 4266 * This could be an ARM instruction or a function call. (If the result 4267 * comes back in a register other than r0, you can override "result".) 4268 * 4269 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4270 * vCC (r1). Useful for integer division and modulus. Note that we 4271 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4272 * handles it correctly. 4273 * 4274 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4275 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4276 * mul-float, div-float, rem-float 4277 */ 4278 /* binop vAA, vBB, vCC */ 4279 FETCH(r0, 1) @ r0<- CCBB 4280 mov r9, rINST, lsr #8 @ r9<- AA 4281 mov r3, r0, lsr #8 @ r3<- CC 4282 and r2, r0, #255 @ r2<- BB 4283 GET_VREG(r1, r3) @ r1<- vCC 4284 GET_VREG(r0, r2) @ r0<- vBB 4285 .if 0 4286 cmp r1, #0 @ is second operand zero? 4287 beq common_errDivideByZero 4288 .endif 4289 4290 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4291 @ optional op; may set condition codes 4292 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4293 GET_INST_OPCODE(ip) @ extract opcode from rINST 4294 SET_VREG(r0, r9) @ vAA<- r0 4295 GOTO_OPCODE(ip) @ jump to next instruction 4296 /* 11-14 instructions */ 4297 4298 4299 4300/* ------------------------------ */ 4301 .balign 64 4302.L_OP_MUL_INT: /* 0x92 */ 4303/* File: armv5te/OP_MUL_INT.S */ 4304/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4305/* File: armv5te/binop.S */ 4306 /* 4307 * Generic 32-bit binary operation. Provide an "instr" line that 4308 * specifies an instruction that performs "result = r0 op r1". 4309 * This could be an ARM instruction or a function call. (If the result 4310 * comes back in a register other than r0, you can override "result".) 4311 * 4312 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4313 * vCC (r1). Useful for integer division and modulus. Note that we 4314 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4315 * handles it correctly. 4316 * 4317 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4318 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4319 * mul-float, div-float, rem-float 4320 */ 4321 /* binop vAA, vBB, vCC */ 4322 FETCH(r0, 1) @ r0<- CCBB 4323 mov r9, rINST, lsr #8 @ r9<- AA 4324 mov r3, r0, lsr #8 @ r3<- CC 4325 and r2, r0, #255 @ r2<- BB 4326 GET_VREG(r1, r3) @ r1<- vCC 4327 GET_VREG(r0, r2) @ r0<- vBB 4328 .if 0 4329 cmp r1, #0 @ is second operand zero? 4330 beq common_errDivideByZero 4331 .endif 4332 4333 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4334 @ optional op; may set condition codes 4335 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4336 GET_INST_OPCODE(ip) @ extract opcode from rINST 4337 SET_VREG(r0, r9) @ vAA<- r0 4338 GOTO_OPCODE(ip) @ jump to next instruction 4339 /* 11-14 instructions */ 4340 4341 4342 4343/* ------------------------------ */ 4344 .balign 64 4345.L_OP_DIV_INT: /* 0x93 */ 4346/* File: armv5te/OP_DIV_INT.S */ 4347/* File: armv5te/binop.S */ 4348 /* 4349 * Generic 32-bit binary operation. Provide an "instr" line that 4350 * specifies an instruction that performs "result = r0 op r1". 4351 * This could be an ARM instruction or a function call. (If the result 4352 * comes back in a register other than r0, you can override "result".) 4353 * 4354 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4355 * vCC (r1). Useful for integer division and modulus. Note that we 4356 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4357 * handles it correctly. 4358 * 4359 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4360 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4361 * mul-float, div-float, rem-float 4362 */ 4363 /* binop vAA, vBB, vCC */ 4364 FETCH(r0, 1) @ r0<- CCBB 4365 mov r9, rINST, lsr #8 @ r9<- AA 4366 mov r3, r0, lsr #8 @ r3<- CC 4367 and r2, r0, #255 @ r2<- BB 4368 GET_VREG(r1, r3) @ r1<- vCC 4369 GET_VREG(r0, r2) @ r0<- vBB 4370 .if 1 4371 cmp r1, #0 @ is second operand zero? 4372 beq common_errDivideByZero 4373 .endif 4374 4375 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4376 @ optional op; may set condition codes 4377 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4378 GET_INST_OPCODE(ip) @ extract opcode from rINST 4379 SET_VREG(r0, r9) @ vAA<- r0 4380 GOTO_OPCODE(ip) @ jump to next instruction 4381 /* 11-14 instructions */ 4382 4383 4384 4385/* ------------------------------ */ 4386 .balign 64 4387.L_OP_REM_INT: /* 0x94 */ 4388/* File: armv5te/OP_REM_INT.S */ 4389/* idivmod returns quotient in r0 and remainder in r1 */ 4390/* File: armv5te/binop.S */ 4391 /* 4392 * Generic 32-bit binary operation. Provide an "instr" line that 4393 * specifies an instruction that performs "result = r0 op r1". 4394 * This could be an ARM instruction or a function call. (If the result 4395 * comes back in a register other than r0, you can override "result".) 4396 * 4397 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4398 * vCC (r1). Useful for integer division and modulus. Note that we 4399 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4400 * handles it correctly. 4401 * 4402 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4403 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4404 * mul-float, div-float, rem-float 4405 */ 4406 /* binop vAA, vBB, vCC */ 4407 FETCH(r0, 1) @ r0<- CCBB 4408 mov r9, rINST, lsr #8 @ r9<- AA 4409 mov r3, r0, lsr #8 @ r3<- CC 4410 and r2, r0, #255 @ r2<- BB 4411 GET_VREG(r1, r3) @ r1<- vCC 4412 GET_VREG(r0, r2) @ r0<- vBB 4413 .if 1 4414 cmp r1, #0 @ is second operand zero? 4415 beq common_errDivideByZero 4416 .endif 4417 4418 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4419 @ optional op; may set condition codes 4420 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4421 GET_INST_OPCODE(ip) @ extract opcode from rINST 4422 SET_VREG(r1, r9) @ vAA<- r1 4423 GOTO_OPCODE(ip) @ jump to next instruction 4424 /* 11-14 instructions */ 4425 4426 4427 4428/* ------------------------------ */ 4429 .balign 64 4430.L_OP_AND_INT: /* 0x95 */ 4431/* File: armv5te/OP_AND_INT.S */ 4432/* File: armv5te/binop.S */ 4433 /* 4434 * Generic 32-bit binary operation. Provide an "instr" line that 4435 * specifies an instruction that performs "result = r0 op r1". 4436 * This could be an ARM instruction or a function call. (If the result 4437 * comes back in a register other than r0, you can override "result".) 4438 * 4439 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4440 * vCC (r1). Useful for integer division and modulus. Note that we 4441 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4442 * handles it correctly. 4443 * 4444 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4445 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4446 * mul-float, div-float, rem-float 4447 */ 4448 /* binop vAA, vBB, vCC */ 4449 FETCH(r0, 1) @ r0<- CCBB 4450 mov r9, rINST, lsr #8 @ r9<- AA 4451 mov r3, r0, lsr #8 @ r3<- CC 4452 and r2, r0, #255 @ r2<- BB 4453 GET_VREG(r1, r3) @ r1<- vCC 4454 GET_VREG(r0, r2) @ r0<- vBB 4455 .if 0 4456 cmp r1, #0 @ is second operand zero? 4457 beq common_errDivideByZero 4458 .endif 4459 4460 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4461 @ optional op; may set condition codes 4462 and r0, r0, r1 @ r0<- op, r0-r3 changed 4463 GET_INST_OPCODE(ip) @ extract opcode from rINST 4464 SET_VREG(r0, r9) @ vAA<- r0 4465 GOTO_OPCODE(ip) @ jump to next instruction 4466 /* 11-14 instructions */ 4467 4468 4469 4470/* ------------------------------ */ 4471 .balign 64 4472.L_OP_OR_INT: /* 0x96 */ 4473/* File: armv5te/OP_OR_INT.S */ 4474/* File: armv5te/binop.S */ 4475 /* 4476 * Generic 32-bit binary operation. Provide an "instr" line that 4477 * specifies an instruction that performs "result = r0 op r1". 4478 * This could be an ARM instruction or a function call. (If the result 4479 * comes back in a register other than r0, you can override "result".) 4480 * 4481 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4482 * vCC (r1). Useful for integer division and modulus. Note that we 4483 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4484 * handles it correctly. 4485 * 4486 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4487 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4488 * mul-float, div-float, rem-float 4489 */ 4490 /* binop vAA, vBB, vCC */ 4491 FETCH(r0, 1) @ r0<- CCBB 4492 mov r9, rINST, lsr #8 @ r9<- AA 4493 mov r3, r0, lsr #8 @ r3<- CC 4494 and r2, r0, #255 @ r2<- BB 4495 GET_VREG(r1, r3) @ r1<- vCC 4496 GET_VREG(r0, r2) @ r0<- vBB 4497 .if 0 4498 cmp r1, #0 @ is second operand zero? 4499 beq common_errDivideByZero 4500 .endif 4501 4502 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4503 @ optional op; may set condition codes 4504 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4505 GET_INST_OPCODE(ip) @ extract opcode from rINST 4506 SET_VREG(r0, r9) @ vAA<- r0 4507 GOTO_OPCODE(ip) @ jump to next instruction 4508 /* 11-14 instructions */ 4509 4510 4511 4512/* ------------------------------ */ 4513 .balign 64 4514.L_OP_XOR_INT: /* 0x97 */ 4515/* File: armv5te/OP_XOR_INT.S */ 4516/* File: armv5te/binop.S */ 4517 /* 4518 * Generic 32-bit binary operation. Provide an "instr" line that 4519 * specifies an instruction that performs "result = r0 op r1". 4520 * This could be an ARM instruction or a function call. (If the result 4521 * comes back in a register other than r0, you can override "result".) 4522 * 4523 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4524 * vCC (r1). Useful for integer division and modulus. Note that we 4525 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4526 * handles it correctly. 4527 * 4528 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4529 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4530 * mul-float, div-float, rem-float 4531 */ 4532 /* binop vAA, vBB, vCC */ 4533 FETCH(r0, 1) @ r0<- CCBB 4534 mov r9, rINST, lsr #8 @ r9<- AA 4535 mov r3, r0, lsr #8 @ r3<- CC 4536 and r2, r0, #255 @ r2<- BB 4537 GET_VREG(r1, r3) @ r1<- vCC 4538 GET_VREG(r0, r2) @ r0<- vBB 4539 .if 0 4540 cmp r1, #0 @ is second operand zero? 4541 beq common_errDivideByZero 4542 .endif 4543 4544 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4545 @ optional op; may set condition codes 4546 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4547 GET_INST_OPCODE(ip) @ extract opcode from rINST 4548 SET_VREG(r0, r9) @ vAA<- r0 4549 GOTO_OPCODE(ip) @ jump to next instruction 4550 /* 11-14 instructions */ 4551 4552 4553 4554/* ------------------------------ */ 4555 .balign 64 4556.L_OP_SHL_INT: /* 0x98 */ 4557/* File: armv5te/OP_SHL_INT.S */ 4558/* File: armv5te/binop.S */ 4559 /* 4560 * Generic 32-bit binary operation. Provide an "instr" line that 4561 * specifies an instruction that performs "result = r0 op r1". 4562 * This could be an ARM instruction or a function call. (If the result 4563 * comes back in a register other than r0, you can override "result".) 4564 * 4565 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4566 * vCC (r1). Useful for integer division and modulus. Note that we 4567 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4568 * handles it correctly. 4569 * 4570 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4571 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4572 * mul-float, div-float, rem-float 4573 */ 4574 /* binop vAA, vBB, vCC */ 4575 FETCH(r0, 1) @ r0<- CCBB 4576 mov r9, rINST, lsr #8 @ r9<- AA 4577 mov r3, r0, lsr #8 @ r3<- CC 4578 and r2, r0, #255 @ r2<- BB 4579 GET_VREG(r1, r3) @ r1<- vCC 4580 GET_VREG(r0, r2) @ r0<- vBB 4581 .if 0 4582 cmp r1, #0 @ is second operand zero? 4583 beq common_errDivideByZero 4584 .endif 4585 4586 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4587 and r1, r1, #31 @ optional op; may set condition codes 4588 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4589 GET_INST_OPCODE(ip) @ extract opcode from rINST 4590 SET_VREG(r0, r9) @ vAA<- r0 4591 GOTO_OPCODE(ip) @ jump to next instruction 4592 /* 11-14 instructions */ 4593 4594 4595 4596/* ------------------------------ */ 4597 .balign 64 4598.L_OP_SHR_INT: /* 0x99 */ 4599/* File: armv5te/OP_SHR_INT.S */ 4600/* File: armv5te/binop.S */ 4601 /* 4602 * Generic 32-bit binary operation. Provide an "instr" line that 4603 * specifies an instruction that performs "result = r0 op r1". 4604 * This could be an ARM instruction or a function call. (If the result 4605 * comes back in a register other than r0, you can override "result".) 4606 * 4607 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4608 * vCC (r1). Useful for integer division and modulus. Note that we 4609 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4610 * handles it correctly. 4611 * 4612 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4613 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4614 * mul-float, div-float, rem-float 4615 */ 4616 /* binop vAA, vBB, vCC */ 4617 FETCH(r0, 1) @ r0<- CCBB 4618 mov r9, rINST, lsr #8 @ r9<- AA 4619 mov r3, r0, lsr #8 @ r3<- CC 4620 and r2, r0, #255 @ r2<- BB 4621 GET_VREG(r1, r3) @ r1<- vCC 4622 GET_VREG(r0, r2) @ r0<- vBB 4623 .if 0 4624 cmp r1, #0 @ is second operand zero? 4625 beq common_errDivideByZero 4626 .endif 4627 4628 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4629 and r1, r1, #31 @ optional op; may set condition codes 4630 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4631 GET_INST_OPCODE(ip) @ extract opcode from rINST 4632 SET_VREG(r0, r9) @ vAA<- r0 4633 GOTO_OPCODE(ip) @ jump to next instruction 4634 /* 11-14 instructions */ 4635 4636 4637 4638/* ------------------------------ */ 4639 .balign 64 4640.L_OP_USHR_INT: /* 0x9a */ 4641/* File: armv5te/OP_USHR_INT.S */ 4642/* File: armv5te/binop.S */ 4643 /* 4644 * Generic 32-bit binary operation. Provide an "instr" line that 4645 * specifies an instruction that performs "result = r0 op r1". 4646 * This could be an ARM instruction or a function call. (If the result 4647 * comes back in a register other than r0, you can override "result".) 4648 * 4649 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4650 * vCC (r1). Useful for integer division and modulus. Note that we 4651 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4652 * handles it correctly. 4653 * 4654 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4655 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4656 * mul-float, div-float, rem-float 4657 */ 4658 /* binop vAA, vBB, vCC */ 4659 FETCH(r0, 1) @ r0<- CCBB 4660 mov r9, rINST, lsr #8 @ r9<- AA 4661 mov r3, r0, lsr #8 @ r3<- CC 4662 and r2, r0, #255 @ r2<- BB 4663 GET_VREG(r1, r3) @ r1<- vCC 4664 GET_VREG(r0, r2) @ r0<- vBB 4665 .if 0 4666 cmp r1, #0 @ is second operand zero? 4667 beq common_errDivideByZero 4668 .endif 4669 4670 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4671 and r1, r1, #31 @ optional op; may set condition codes 4672 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4673 GET_INST_OPCODE(ip) @ extract opcode from rINST 4674 SET_VREG(r0, r9) @ vAA<- r0 4675 GOTO_OPCODE(ip) @ jump to next instruction 4676 /* 11-14 instructions */ 4677 4678 4679 4680/* ------------------------------ */ 4681 .balign 64 4682.L_OP_ADD_LONG: /* 0x9b */ 4683/* File: armv5te/OP_ADD_LONG.S */ 4684/* File: armv5te/binopWide.S */ 4685 /* 4686 * Generic 64-bit binary operation. Provide an "instr" line that 4687 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4688 * This could be an ARM instruction or a function call. (If the result 4689 * comes back in a register other than r0, you can override "result".) 4690 * 4691 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4692 * vCC (r1). Useful for integer division and modulus. 4693 * 4694 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4695 * xor-long, add-double, sub-double, mul-double, div-double, 4696 * rem-double 4697 * 4698 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4699 */ 4700 /* binop vAA, vBB, vCC */ 4701 FETCH(r0, 1) @ r0<- CCBB 4702 mov r9, rINST, lsr #8 @ r9<- AA 4703 and r2, r0, #255 @ r2<- BB 4704 mov r3, r0, lsr #8 @ r3<- CC 4705 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4706 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4707 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4708 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4709 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4710 .if 0 4711 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4712 beq common_errDivideByZero 4713 .endif 4714 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4715 4716 adds r0, r0, r2 @ optional op; may set condition codes 4717 adc r1, r1, r3 @ result<- op, r0-r3 changed 4718 GET_INST_OPCODE(ip) @ extract opcode from rINST 4719 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4720 GOTO_OPCODE(ip) @ jump to next instruction 4721 /* 14-17 instructions */ 4722 4723 4724 4725/* ------------------------------ */ 4726 .balign 64 4727.L_OP_SUB_LONG: /* 0x9c */ 4728/* File: armv5te/OP_SUB_LONG.S */ 4729/* File: armv5te/binopWide.S */ 4730 /* 4731 * Generic 64-bit binary operation. Provide an "instr" line that 4732 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4733 * This could be an ARM instruction or a function call. (If the result 4734 * comes back in a register other than r0, you can override "result".) 4735 * 4736 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4737 * vCC (r1). Useful for integer division and modulus. 4738 * 4739 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4740 * xor-long, add-double, sub-double, mul-double, div-double, 4741 * rem-double 4742 * 4743 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4744 */ 4745 /* binop vAA, vBB, vCC */ 4746 FETCH(r0, 1) @ r0<- CCBB 4747 mov r9, rINST, lsr #8 @ r9<- AA 4748 and r2, r0, #255 @ r2<- BB 4749 mov r3, r0, lsr #8 @ r3<- CC 4750 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4751 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4752 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4753 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4754 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4755 .if 0 4756 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4757 beq common_errDivideByZero 4758 .endif 4759 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4760 4761 subs r0, r0, r2 @ optional op; may set condition codes 4762 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4763 GET_INST_OPCODE(ip) @ extract opcode from rINST 4764 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4765 GOTO_OPCODE(ip) @ jump to next instruction 4766 /* 14-17 instructions */ 4767 4768 4769 4770/* ------------------------------ */ 4771 .balign 64 4772.L_OP_MUL_LONG: /* 0x9d */ 4773/* File: armv5te/OP_MUL_LONG.S */ 4774 /* 4775 * Signed 64-bit integer multiply. 4776 * 4777 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4778 * WX 4779 * x YZ 4780 * -------- 4781 * ZW ZX 4782 * YW YX 4783 * 4784 * The low word of the result holds ZX, the high word holds 4785 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4786 * it doesn't fit in the low 64 bits. 4787 * 4788 * Unlike most ARM math operations, multiply instructions have 4789 * restrictions on using the same register more than once (Rd and Rm 4790 * cannot be the same). 4791 */ 4792 /* mul-long vAA, vBB, vCC */ 4793 FETCH(r0, 1) @ r0<- CCBB 4794 and r2, r0, #255 @ r2<- BB 4795 mov r3, r0, lsr #8 @ r3<- CC 4796 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4797 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4798 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4799 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4800 mul ip, r2, r1 @ ip<- ZxW 4801 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4802 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4803 mov r0, rINST, lsr #8 @ r0<- AA 4804 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4805 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4806 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4807 b .LOP_MUL_LONG_finish 4808 4809/* ------------------------------ */ 4810 .balign 64 4811.L_OP_DIV_LONG: /* 0x9e */ 4812/* File: armv5te/OP_DIV_LONG.S */ 4813/* File: armv5te/binopWide.S */ 4814 /* 4815 * Generic 64-bit binary operation. Provide an "instr" line that 4816 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4817 * This could be an ARM instruction or a function call. (If the result 4818 * comes back in a register other than r0, you can override "result".) 4819 * 4820 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4821 * vCC (r1). Useful for integer division and modulus. 4822 * 4823 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4824 * xor-long, add-double, sub-double, mul-double, div-double, 4825 * rem-double 4826 * 4827 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4828 */ 4829 /* binop vAA, vBB, vCC */ 4830 FETCH(r0, 1) @ r0<- CCBB 4831 mov r9, rINST, lsr #8 @ r9<- AA 4832 and r2, r0, #255 @ r2<- BB 4833 mov r3, r0, lsr #8 @ r3<- CC 4834 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4835 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4836 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4837 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4838 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4839 .if 1 4840 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4841 beq common_errDivideByZero 4842 .endif 4843 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4844 4845 @ optional op; may set condition codes 4846 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4847 GET_INST_OPCODE(ip) @ extract opcode from rINST 4848 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4849 GOTO_OPCODE(ip) @ jump to next instruction 4850 /* 14-17 instructions */ 4851 4852 4853 4854/* ------------------------------ */ 4855 .balign 64 4856.L_OP_REM_LONG: /* 0x9f */ 4857/* File: armv5te/OP_REM_LONG.S */ 4858/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4859/* File: armv5te/binopWide.S */ 4860 /* 4861 * Generic 64-bit binary operation. Provide an "instr" line that 4862 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4863 * This could be an ARM instruction or a function call. (If the result 4864 * comes back in a register other than r0, you can override "result".) 4865 * 4866 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4867 * vCC (r1). Useful for integer division and modulus. 4868 * 4869 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4870 * xor-long, add-double, sub-double, mul-double, div-double, 4871 * rem-double 4872 * 4873 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4874 */ 4875 /* binop vAA, vBB, vCC */ 4876 FETCH(r0, 1) @ r0<- CCBB 4877 mov r9, rINST, lsr #8 @ r9<- AA 4878 and r2, r0, #255 @ r2<- BB 4879 mov r3, r0, lsr #8 @ r3<- CC 4880 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4881 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4882 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4883 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4884 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4885 .if 1 4886 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4887 beq common_errDivideByZero 4888 .endif 4889 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4890 4891 @ optional op; may set condition codes 4892 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4893 GET_INST_OPCODE(ip) @ extract opcode from rINST 4894 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4895 GOTO_OPCODE(ip) @ jump to next instruction 4896 /* 14-17 instructions */ 4897 4898 4899 4900/* ------------------------------ */ 4901 .balign 64 4902.L_OP_AND_LONG: /* 0xa0 */ 4903/* File: armv5te/OP_AND_LONG.S */ 4904/* File: armv5te/binopWide.S */ 4905 /* 4906 * Generic 64-bit binary operation. Provide an "instr" line that 4907 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4908 * This could be an ARM instruction or a function call. (If the result 4909 * comes back in a register other than r0, you can override "result".) 4910 * 4911 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4912 * vCC (r1). Useful for integer division and modulus. 4913 * 4914 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4915 * xor-long, add-double, sub-double, mul-double, div-double, 4916 * rem-double 4917 * 4918 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4919 */ 4920 /* binop vAA, vBB, vCC */ 4921 FETCH(r0, 1) @ r0<- CCBB 4922 mov r9, rINST, lsr #8 @ r9<- AA 4923 and r2, r0, #255 @ r2<- BB 4924 mov r3, r0, lsr #8 @ r3<- CC 4925 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4926 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4927 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4928 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4929 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4930 .if 0 4931 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4932 beq common_errDivideByZero 4933 .endif 4934 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4935 4936 and r0, r0, r2 @ optional op; may set condition codes 4937 and r1, r1, r3 @ result<- op, r0-r3 changed 4938 GET_INST_OPCODE(ip) @ extract opcode from rINST 4939 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4940 GOTO_OPCODE(ip) @ jump to next instruction 4941 /* 14-17 instructions */ 4942 4943 4944 4945/* ------------------------------ */ 4946 .balign 64 4947.L_OP_OR_LONG: /* 0xa1 */ 4948/* File: armv5te/OP_OR_LONG.S */ 4949/* File: armv5te/binopWide.S */ 4950 /* 4951 * Generic 64-bit binary operation. Provide an "instr" line that 4952 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4953 * This could be an ARM instruction or a function call. (If the result 4954 * comes back in a register other than r0, you can override "result".) 4955 * 4956 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4957 * vCC (r1). Useful for integer division and modulus. 4958 * 4959 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4960 * xor-long, add-double, sub-double, mul-double, div-double, 4961 * rem-double 4962 * 4963 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4964 */ 4965 /* binop vAA, vBB, vCC */ 4966 FETCH(r0, 1) @ r0<- CCBB 4967 mov r9, rINST, lsr #8 @ r9<- AA 4968 and r2, r0, #255 @ r2<- BB 4969 mov r3, r0, lsr #8 @ r3<- CC 4970 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4971 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4972 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4973 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4974 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4975 .if 0 4976 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4977 beq common_errDivideByZero 4978 .endif 4979 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4980 4981 orr r0, r0, r2 @ optional op; may set condition codes 4982 orr r1, r1, r3 @ result<- op, r0-r3 changed 4983 GET_INST_OPCODE(ip) @ extract opcode from rINST 4984 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4985 GOTO_OPCODE(ip) @ jump to next instruction 4986 /* 14-17 instructions */ 4987 4988 4989 4990/* ------------------------------ */ 4991 .balign 64 4992.L_OP_XOR_LONG: /* 0xa2 */ 4993/* File: armv5te/OP_XOR_LONG.S */ 4994/* File: armv5te/binopWide.S */ 4995 /* 4996 * Generic 64-bit binary operation. Provide an "instr" line that 4997 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4998 * This could be an ARM instruction or a function call. (If the result 4999 * comes back in a register other than r0, you can override "result".) 5000 * 5001 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5002 * vCC (r1). Useful for integer division and modulus. 5003 * 5004 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5005 * xor-long, add-double, sub-double, mul-double, div-double, 5006 * rem-double 5007 * 5008 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5009 */ 5010 /* binop vAA, vBB, vCC */ 5011 FETCH(r0, 1) @ r0<- CCBB 5012 mov r9, rINST, lsr #8 @ r9<- AA 5013 and r2, r0, #255 @ r2<- BB 5014 mov r3, r0, lsr #8 @ r3<- CC 5015 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5016 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5017 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5018 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5019 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5020 .if 0 5021 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5022 beq common_errDivideByZero 5023 .endif 5024 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5025 5026 eor r0, r0, r2 @ optional op; may set condition codes 5027 eor r1, r1, r3 @ result<- op, r0-r3 changed 5028 GET_INST_OPCODE(ip) @ extract opcode from rINST 5029 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5030 GOTO_OPCODE(ip) @ jump to next instruction 5031 /* 14-17 instructions */ 5032 5033 5034 5035/* ------------------------------ */ 5036 .balign 64 5037.L_OP_SHL_LONG: /* 0xa3 */ 5038/* File: armv5te/OP_SHL_LONG.S */ 5039 /* 5040 * Long integer shift. This is different from the generic 32/64-bit 5041 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5042 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5043 * 6 bits of the shift distance. 5044 */ 5045 /* shl-long vAA, vBB, vCC */ 5046 FETCH(r0, 1) @ r0<- CCBB 5047 mov r9, rINST, lsr #8 @ r9<- AA 5048 and r3, r0, #255 @ r3<- BB 5049 mov r0, r0, lsr #8 @ r0<- CC 5050 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5051 GET_VREG(r2, r0) @ r2<- vCC 5052 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5053 and r2, r2, #63 @ r2<- r2 & 0x3f 5054 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5055 5056 mov r1, r1, asl r2 @ r1<- r1 << r2 5057 rsb r3, r2, #32 @ r3<- 32 - r2 5058 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 5059 subs ip, r2, #32 @ ip<- r2 - 32 5060 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 5061 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5062 b .LOP_SHL_LONG_finish 5063 5064/* ------------------------------ */ 5065 .balign 64 5066.L_OP_SHR_LONG: /* 0xa4 */ 5067/* File: armv5te/OP_SHR_LONG.S */ 5068 /* 5069 * Long integer shift. This is different from the generic 32/64-bit 5070 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5071 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5072 * 6 bits of the shift distance. 5073 */ 5074 /* shr-long vAA, vBB, vCC */ 5075 FETCH(r0, 1) @ r0<- CCBB 5076 mov r9, rINST, lsr #8 @ r9<- AA 5077 and r3, r0, #255 @ r3<- BB 5078 mov r0, r0, lsr #8 @ r0<- CC 5079 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5080 GET_VREG(r2, r0) @ r2<- vCC 5081 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5082 and r2, r2, #63 @ r0<- r0 & 0x3f 5083 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5084 5085 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5086 rsb r3, r2, #32 @ r3<- 32 - r2 5087 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5088 subs ip, r2, #32 @ ip<- r2 - 32 5089 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 5090 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5091 b .LOP_SHR_LONG_finish 5092 5093/* ------------------------------ */ 5094 .balign 64 5095.L_OP_USHR_LONG: /* 0xa5 */ 5096/* File: armv5te/OP_USHR_LONG.S */ 5097 /* 5098 * Long integer shift. This is different from the generic 32/64-bit 5099 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5100 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5101 * 6 bits of the shift distance. 5102 */ 5103 /* ushr-long vAA, vBB, vCC */ 5104 FETCH(r0, 1) @ r0<- CCBB 5105 mov r9, rINST, lsr #8 @ r9<- AA 5106 and r3, r0, #255 @ r3<- BB 5107 mov r0, r0, lsr #8 @ r0<- CC 5108 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5109 GET_VREG(r2, r0) @ r2<- vCC 5110 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5111 and r2, r2, #63 @ r0<- r0 & 0x3f 5112 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5113 5114 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5115 rsb r3, r2, #32 @ r3<- 32 - r2 5116 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5117 subs ip, r2, #32 @ ip<- r2 - 32 5118 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 5119 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5120 b .LOP_USHR_LONG_finish 5121 5122/* ------------------------------ */ 5123 .balign 64 5124.L_OP_ADD_FLOAT: /* 0xa6 */ 5125/* File: armv5te/OP_ADD_FLOAT.S */ 5126/* File: armv5te/binop.S */ 5127 /* 5128 * Generic 32-bit binary operation. Provide an "instr" line that 5129 * specifies an instruction that performs "result = r0 op r1". 5130 * This could be an ARM instruction or a function call. (If the result 5131 * comes back in a register other than r0, you can override "result".) 5132 * 5133 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5134 * vCC (r1). Useful for integer division and modulus. Note that we 5135 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5136 * handles it correctly. 5137 * 5138 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5139 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5140 * mul-float, div-float, rem-float 5141 */ 5142 /* binop vAA, vBB, vCC */ 5143 FETCH(r0, 1) @ r0<- CCBB 5144 mov r9, rINST, lsr #8 @ r9<- AA 5145 mov r3, r0, lsr #8 @ r3<- CC 5146 and r2, r0, #255 @ r2<- BB 5147 GET_VREG(r1, r3) @ r1<- vCC 5148 GET_VREG(r0, r2) @ r0<- vBB 5149 .if 0 5150 cmp r1, #0 @ is second operand zero? 5151 beq common_errDivideByZero 5152 .endif 5153 5154 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5155 @ optional op; may set condition codes 5156 bl __aeabi_fadd @ r0<- op, r0-r3 changed 5157 GET_INST_OPCODE(ip) @ extract opcode from rINST 5158 SET_VREG(r0, r9) @ vAA<- r0 5159 GOTO_OPCODE(ip) @ jump to next instruction 5160 /* 11-14 instructions */ 5161 5162 5163 5164/* ------------------------------ */ 5165 .balign 64 5166.L_OP_SUB_FLOAT: /* 0xa7 */ 5167/* File: armv5te/OP_SUB_FLOAT.S */ 5168/* File: armv5te/binop.S */ 5169 /* 5170 * Generic 32-bit binary operation. Provide an "instr" line that 5171 * specifies an instruction that performs "result = r0 op r1". 5172 * This could be an ARM instruction or a function call. (If the result 5173 * comes back in a register other than r0, you can override "result".) 5174 * 5175 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5176 * vCC (r1). Useful for integer division and modulus. Note that we 5177 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5178 * handles it correctly. 5179 * 5180 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5181 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5182 * mul-float, div-float, rem-float 5183 */ 5184 /* binop vAA, vBB, vCC */ 5185 FETCH(r0, 1) @ r0<- CCBB 5186 mov r9, rINST, lsr #8 @ r9<- AA 5187 mov r3, r0, lsr #8 @ r3<- CC 5188 and r2, r0, #255 @ r2<- BB 5189 GET_VREG(r1, r3) @ r1<- vCC 5190 GET_VREG(r0, r2) @ r0<- vBB 5191 .if 0 5192 cmp r1, #0 @ is second operand zero? 5193 beq common_errDivideByZero 5194 .endif 5195 5196 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5197 @ optional op; may set condition codes 5198 bl __aeabi_fsub @ r0<- op, r0-r3 changed 5199 GET_INST_OPCODE(ip) @ extract opcode from rINST 5200 SET_VREG(r0, r9) @ vAA<- r0 5201 GOTO_OPCODE(ip) @ jump to next instruction 5202 /* 11-14 instructions */ 5203 5204 5205 5206/* ------------------------------ */ 5207 .balign 64 5208.L_OP_MUL_FLOAT: /* 0xa8 */ 5209/* File: armv5te/OP_MUL_FLOAT.S */ 5210/* File: armv5te/binop.S */ 5211 /* 5212 * Generic 32-bit binary operation. Provide an "instr" line that 5213 * specifies an instruction that performs "result = r0 op r1". 5214 * This could be an ARM instruction or a function call. (If the result 5215 * comes back in a register other than r0, you can override "result".) 5216 * 5217 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5218 * vCC (r1). Useful for integer division and modulus. Note that we 5219 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5220 * handles it correctly. 5221 * 5222 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5223 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5224 * mul-float, div-float, rem-float 5225 */ 5226 /* binop vAA, vBB, vCC */ 5227 FETCH(r0, 1) @ r0<- CCBB 5228 mov r9, rINST, lsr #8 @ r9<- AA 5229 mov r3, r0, lsr #8 @ r3<- CC 5230 and r2, r0, #255 @ r2<- BB 5231 GET_VREG(r1, r3) @ r1<- vCC 5232 GET_VREG(r0, r2) @ r0<- vBB 5233 .if 0 5234 cmp r1, #0 @ is second operand zero? 5235 beq common_errDivideByZero 5236 .endif 5237 5238 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5239 @ optional op; may set condition codes 5240 bl __aeabi_fmul @ r0<- op, r0-r3 changed 5241 GET_INST_OPCODE(ip) @ extract opcode from rINST 5242 SET_VREG(r0, r9) @ vAA<- r0 5243 GOTO_OPCODE(ip) @ jump to next instruction 5244 /* 11-14 instructions */ 5245 5246 5247 5248/* ------------------------------ */ 5249 .balign 64 5250.L_OP_DIV_FLOAT: /* 0xa9 */ 5251/* File: armv5te/OP_DIV_FLOAT.S */ 5252/* File: armv5te/binop.S */ 5253 /* 5254 * Generic 32-bit binary operation. Provide an "instr" line that 5255 * specifies an instruction that performs "result = r0 op r1". 5256 * This could be an ARM instruction or a function call. (If the result 5257 * comes back in a register other than r0, you can override "result".) 5258 * 5259 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5260 * vCC (r1). Useful for integer division and modulus. Note that we 5261 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5262 * handles it correctly. 5263 * 5264 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5265 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5266 * mul-float, div-float, rem-float 5267 */ 5268 /* binop vAA, vBB, vCC */ 5269 FETCH(r0, 1) @ r0<- CCBB 5270 mov r9, rINST, lsr #8 @ r9<- AA 5271 mov r3, r0, lsr #8 @ r3<- CC 5272 and r2, r0, #255 @ r2<- BB 5273 GET_VREG(r1, r3) @ r1<- vCC 5274 GET_VREG(r0, r2) @ r0<- vBB 5275 .if 0 5276 cmp r1, #0 @ is second operand zero? 5277 beq common_errDivideByZero 5278 .endif 5279 5280 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5281 @ optional op; may set condition codes 5282 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 5283 GET_INST_OPCODE(ip) @ extract opcode from rINST 5284 SET_VREG(r0, r9) @ vAA<- r0 5285 GOTO_OPCODE(ip) @ jump to next instruction 5286 /* 11-14 instructions */ 5287 5288 5289 5290/* ------------------------------ */ 5291 .balign 64 5292.L_OP_REM_FLOAT: /* 0xaa */ 5293/* File: armv5te/OP_REM_FLOAT.S */ 5294/* EABI doesn't define a float remainder function, but libm does */ 5295/* File: armv5te/binop.S */ 5296 /* 5297 * Generic 32-bit binary operation. Provide an "instr" line that 5298 * specifies an instruction that performs "result = r0 op r1". 5299 * This could be an ARM instruction or a function call. (If the result 5300 * comes back in a register other than r0, you can override "result".) 5301 * 5302 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5303 * vCC (r1). Useful for integer division and modulus. Note that we 5304 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5305 * handles it correctly. 5306 * 5307 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5308 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5309 * mul-float, div-float, rem-float 5310 */ 5311 /* binop vAA, vBB, vCC */ 5312 FETCH(r0, 1) @ r0<- CCBB 5313 mov r9, rINST, lsr #8 @ r9<- AA 5314 mov r3, r0, lsr #8 @ r3<- CC 5315 and r2, r0, #255 @ r2<- BB 5316 GET_VREG(r1, r3) @ r1<- vCC 5317 GET_VREG(r0, r2) @ r0<- vBB 5318 .if 0 5319 cmp r1, #0 @ is second operand zero? 5320 beq common_errDivideByZero 5321 .endif 5322 5323 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5324 @ optional op; may set condition codes 5325 bl fmodf @ r0<- op, r0-r3 changed 5326 GET_INST_OPCODE(ip) @ extract opcode from rINST 5327 SET_VREG(r0, r9) @ vAA<- r0 5328 GOTO_OPCODE(ip) @ jump to next instruction 5329 /* 11-14 instructions */ 5330 5331 5332 5333/* ------------------------------ */ 5334 .balign 64 5335.L_OP_ADD_DOUBLE: /* 0xab */ 5336/* File: armv5te/OP_ADD_DOUBLE.S */ 5337/* File: armv5te/binopWide.S */ 5338 /* 5339 * Generic 64-bit binary operation. Provide an "instr" line that 5340 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5341 * This could be an ARM instruction or a function call. (If the result 5342 * comes back in a register other than r0, you can override "result".) 5343 * 5344 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5345 * vCC (r1). Useful for integer division and modulus. 5346 * 5347 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5348 * xor-long, add-double, sub-double, mul-double, div-double, 5349 * rem-double 5350 * 5351 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5352 */ 5353 /* binop vAA, vBB, vCC */ 5354 FETCH(r0, 1) @ r0<- CCBB 5355 mov r9, rINST, lsr #8 @ r9<- AA 5356 and r2, r0, #255 @ r2<- BB 5357 mov r3, r0, lsr #8 @ r3<- CC 5358 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5359 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5360 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5361 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5362 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5363 .if 0 5364 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5365 beq common_errDivideByZero 5366 .endif 5367 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5368 5369 @ optional op; may set condition codes 5370 bl __aeabi_dadd @ result<- op, r0-r3 changed 5371 GET_INST_OPCODE(ip) @ extract opcode from rINST 5372 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5373 GOTO_OPCODE(ip) @ jump to next instruction 5374 /* 14-17 instructions */ 5375 5376 5377 5378/* ------------------------------ */ 5379 .balign 64 5380.L_OP_SUB_DOUBLE: /* 0xac */ 5381/* File: armv5te/OP_SUB_DOUBLE.S */ 5382/* File: armv5te/binopWide.S */ 5383 /* 5384 * Generic 64-bit binary operation. Provide an "instr" line that 5385 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5386 * This could be an ARM instruction or a function call. (If the result 5387 * comes back in a register other than r0, you can override "result".) 5388 * 5389 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5390 * vCC (r1). Useful for integer division and modulus. 5391 * 5392 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5393 * xor-long, add-double, sub-double, mul-double, div-double, 5394 * rem-double 5395 * 5396 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5397 */ 5398 /* binop vAA, vBB, vCC */ 5399 FETCH(r0, 1) @ r0<- CCBB 5400 mov r9, rINST, lsr #8 @ r9<- AA 5401 and r2, r0, #255 @ r2<- BB 5402 mov r3, r0, lsr #8 @ r3<- CC 5403 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5404 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5405 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5406 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5407 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5408 .if 0 5409 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5410 beq common_errDivideByZero 5411 .endif 5412 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5413 5414 @ optional op; may set condition codes 5415 bl __aeabi_dsub @ result<- op, r0-r3 changed 5416 GET_INST_OPCODE(ip) @ extract opcode from rINST 5417 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5418 GOTO_OPCODE(ip) @ jump to next instruction 5419 /* 14-17 instructions */ 5420 5421 5422 5423/* ------------------------------ */ 5424 .balign 64 5425.L_OP_MUL_DOUBLE: /* 0xad */ 5426/* File: armv5te/OP_MUL_DOUBLE.S */ 5427/* File: armv5te/binopWide.S */ 5428 /* 5429 * Generic 64-bit binary operation. Provide an "instr" line that 5430 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5431 * This could be an ARM instruction or a function call. (If the result 5432 * comes back in a register other than r0, you can override "result".) 5433 * 5434 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5435 * vCC (r1). Useful for integer division and modulus. 5436 * 5437 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5438 * xor-long, add-double, sub-double, mul-double, div-double, 5439 * rem-double 5440 * 5441 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5442 */ 5443 /* binop vAA, vBB, vCC */ 5444 FETCH(r0, 1) @ r0<- CCBB 5445 mov r9, rINST, lsr #8 @ r9<- AA 5446 and r2, r0, #255 @ r2<- BB 5447 mov r3, r0, lsr #8 @ r3<- CC 5448 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5449 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5450 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5451 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5452 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5453 .if 0 5454 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5455 beq common_errDivideByZero 5456 .endif 5457 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5458 5459 @ optional op; may set condition codes 5460 bl __aeabi_dmul @ result<- op, r0-r3 changed 5461 GET_INST_OPCODE(ip) @ extract opcode from rINST 5462 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5463 GOTO_OPCODE(ip) @ jump to next instruction 5464 /* 14-17 instructions */ 5465 5466 5467 5468/* ------------------------------ */ 5469 .balign 64 5470.L_OP_DIV_DOUBLE: /* 0xae */ 5471/* File: armv5te/OP_DIV_DOUBLE.S */ 5472/* File: armv5te/binopWide.S */ 5473 /* 5474 * Generic 64-bit binary operation. Provide an "instr" line that 5475 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5476 * This could be an ARM instruction or a function call. (If the result 5477 * comes back in a register other than r0, you can override "result".) 5478 * 5479 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5480 * vCC (r1). Useful for integer division and modulus. 5481 * 5482 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5483 * xor-long, add-double, sub-double, mul-double, div-double, 5484 * rem-double 5485 * 5486 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5487 */ 5488 /* binop vAA, vBB, vCC */ 5489 FETCH(r0, 1) @ r0<- CCBB 5490 mov r9, rINST, lsr #8 @ r9<- AA 5491 and r2, r0, #255 @ r2<- BB 5492 mov r3, r0, lsr #8 @ r3<- CC 5493 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5494 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5495 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5496 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5497 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5498 .if 0 5499 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5500 beq common_errDivideByZero 5501 .endif 5502 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5503 5504 @ optional op; may set condition codes 5505 bl __aeabi_ddiv @ result<- op, r0-r3 changed 5506 GET_INST_OPCODE(ip) @ extract opcode from rINST 5507 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5508 GOTO_OPCODE(ip) @ jump to next instruction 5509 /* 14-17 instructions */ 5510 5511 5512 5513/* ------------------------------ */ 5514 .balign 64 5515.L_OP_REM_DOUBLE: /* 0xaf */ 5516/* File: armv5te/OP_REM_DOUBLE.S */ 5517/* EABI doesn't define a double remainder function, but libm does */ 5518/* File: armv5te/binopWide.S */ 5519 /* 5520 * Generic 64-bit binary operation. Provide an "instr" line that 5521 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5522 * This could be an ARM instruction or a function call. (If the result 5523 * comes back in a register other than r0, you can override "result".) 5524 * 5525 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5526 * vCC (r1). Useful for integer division and modulus. 5527 * 5528 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5529 * xor-long, add-double, sub-double, mul-double, div-double, 5530 * rem-double 5531 * 5532 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5533 */ 5534 /* binop vAA, vBB, vCC */ 5535 FETCH(r0, 1) @ r0<- CCBB 5536 mov r9, rINST, lsr #8 @ r9<- AA 5537 and r2, r0, #255 @ r2<- BB 5538 mov r3, r0, lsr #8 @ r3<- CC 5539 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5540 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5541 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5542 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5543 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5544 .if 0 5545 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5546 beq common_errDivideByZero 5547 .endif 5548 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5549 5550 @ optional op; may set condition codes 5551 bl fmod @ result<- op, r0-r3 changed 5552 GET_INST_OPCODE(ip) @ extract opcode from rINST 5553 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5554 GOTO_OPCODE(ip) @ jump to next instruction 5555 /* 14-17 instructions */ 5556 5557 5558 5559/* ------------------------------ */ 5560 .balign 64 5561.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5562/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5563/* File: armv5te/binop2addr.S */ 5564 /* 5565 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5566 * that specifies an instruction that performs "result = r0 op r1". 5567 * This could be an ARM instruction or a function call. (If the result 5568 * comes back in a register other than r0, you can override "result".) 5569 * 5570 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5571 * vCC (r1). Useful for integer division and modulus. 5572 * 5573 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5574 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5575 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5576 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5577 */ 5578 /* binop/2addr vA, vB */ 5579 mov r9, rINST, lsr #8 @ r9<- A+ 5580 mov r3, rINST, lsr #12 @ r3<- B 5581 and r9, r9, #15 5582 GET_VREG(r0, r9) @ r0<- vA 5583 GET_VREG(r1, r3) @ r1<- vB 5584 .if 0 5585 cmp r1, #0 @ is second operand zero? 5586 beq common_errDivideByZero 5587 .endif 5588 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5589 5590 @ optional op; may set condition codes 5591 add r0, r0, r1 @ r0<- op, r0-r3 changed 5592 GET_INST_OPCODE(ip) @ extract opcode from rINST 5593 SET_VREG(r0, r9) @ vAA<- r0 5594 GOTO_OPCODE(ip) @ jump to next instruction 5595 /* 10-13 instructions */ 5596 5597 5598 5599/* ------------------------------ */ 5600 .balign 64 5601.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5602/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5603/* File: armv5te/binop2addr.S */ 5604 /* 5605 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5606 * that specifies an instruction that performs "result = r0 op r1". 5607 * This could be an ARM instruction or a function call. (If the result 5608 * comes back in a register other than r0, you can override "result".) 5609 * 5610 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5611 * vCC (r1). Useful for integer division and modulus. 5612 * 5613 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5614 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5615 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5616 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5617 */ 5618 /* binop/2addr vA, vB */ 5619 mov r9, rINST, lsr #8 @ r9<- A+ 5620 mov r3, rINST, lsr #12 @ r3<- B 5621 and r9, r9, #15 5622 GET_VREG(r0, r9) @ r0<- vA 5623 GET_VREG(r1, r3) @ r1<- vB 5624 .if 0 5625 cmp r1, #0 @ is second operand zero? 5626 beq common_errDivideByZero 5627 .endif 5628 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5629 5630 @ optional op; may set condition codes 5631 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5632 GET_INST_OPCODE(ip) @ extract opcode from rINST 5633 SET_VREG(r0, r9) @ vAA<- r0 5634 GOTO_OPCODE(ip) @ jump to next instruction 5635 /* 10-13 instructions */ 5636 5637 5638 5639/* ------------------------------ */ 5640 .balign 64 5641.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5642/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5643/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5644/* File: armv5te/binop2addr.S */ 5645 /* 5646 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5647 * that specifies an instruction that performs "result = r0 op r1". 5648 * This could be an ARM instruction or a function call. (If the result 5649 * comes back in a register other than r0, you can override "result".) 5650 * 5651 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5652 * vCC (r1). Useful for integer division and modulus. 5653 * 5654 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5655 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5656 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5657 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5658 */ 5659 /* binop/2addr vA, vB */ 5660 mov r9, rINST, lsr #8 @ r9<- A+ 5661 mov r3, rINST, lsr #12 @ r3<- B 5662 and r9, r9, #15 5663 GET_VREG(r0, r9) @ r0<- vA 5664 GET_VREG(r1, r3) @ r1<- vB 5665 .if 0 5666 cmp r1, #0 @ is second operand zero? 5667 beq common_errDivideByZero 5668 .endif 5669 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5670 5671 @ optional op; may set condition codes 5672 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5673 GET_INST_OPCODE(ip) @ extract opcode from rINST 5674 SET_VREG(r0, r9) @ vAA<- r0 5675 GOTO_OPCODE(ip) @ jump to next instruction 5676 /* 10-13 instructions */ 5677 5678 5679 5680/* ------------------------------ */ 5681 .balign 64 5682.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5683/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5684/* File: armv5te/binop2addr.S */ 5685 /* 5686 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5687 * that specifies an instruction that performs "result = r0 op r1". 5688 * This could be an ARM instruction or a function call. (If the result 5689 * comes back in a register other than r0, you can override "result".) 5690 * 5691 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5692 * vCC (r1). Useful for integer division and modulus. 5693 * 5694 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5695 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5696 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5697 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5698 */ 5699 /* binop/2addr vA, vB */ 5700 mov r9, rINST, lsr #8 @ r9<- A+ 5701 mov r3, rINST, lsr #12 @ r3<- B 5702 and r9, r9, #15 5703 GET_VREG(r0, r9) @ r0<- vA 5704 GET_VREG(r1, r3) @ r1<- vB 5705 .if 1 5706 cmp r1, #0 @ is second operand zero? 5707 beq common_errDivideByZero 5708 .endif 5709 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5710 5711 @ optional op; may set condition codes 5712 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5713 GET_INST_OPCODE(ip) @ extract opcode from rINST 5714 SET_VREG(r0, r9) @ vAA<- r0 5715 GOTO_OPCODE(ip) @ jump to next instruction 5716 /* 10-13 instructions */ 5717 5718 5719 5720/* ------------------------------ */ 5721 .balign 64 5722.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5723/* File: armv5te/OP_REM_INT_2ADDR.S */ 5724/* idivmod returns quotient in r0 and remainder in r1 */ 5725/* File: armv5te/binop2addr.S */ 5726 /* 5727 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5728 * that specifies an instruction that performs "result = r0 op r1". 5729 * This could be an ARM instruction or a function call. (If the result 5730 * comes back in a register other than r0, you can override "result".) 5731 * 5732 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5733 * vCC (r1). Useful for integer division and modulus. 5734 * 5735 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5736 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5737 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5738 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5739 */ 5740 /* binop/2addr vA, vB */ 5741 mov r9, rINST, lsr #8 @ r9<- A+ 5742 mov r3, rINST, lsr #12 @ r3<- B 5743 and r9, r9, #15 5744 GET_VREG(r0, r9) @ r0<- vA 5745 GET_VREG(r1, r3) @ r1<- vB 5746 .if 1 5747 cmp r1, #0 @ is second operand zero? 5748 beq common_errDivideByZero 5749 .endif 5750 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5751 5752 @ optional op; may set condition codes 5753 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5754 GET_INST_OPCODE(ip) @ extract opcode from rINST 5755 SET_VREG(r1, r9) @ vAA<- r1 5756 GOTO_OPCODE(ip) @ jump to next instruction 5757 /* 10-13 instructions */ 5758 5759 5760 5761/* ------------------------------ */ 5762 .balign 64 5763.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5764/* File: armv5te/OP_AND_INT_2ADDR.S */ 5765/* File: armv5te/binop2addr.S */ 5766 /* 5767 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5768 * that specifies an instruction that performs "result = r0 op r1". 5769 * This could be an ARM instruction or a function call. (If the result 5770 * comes back in a register other than r0, you can override "result".) 5771 * 5772 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5773 * vCC (r1). Useful for integer division and modulus. 5774 * 5775 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5776 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5777 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5778 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5779 */ 5780 /* binop/2addr vA, vB */ 5781 mov r9, rINST, lsr #8 @ r9<- A+ 5782 mov r3, rINST, lsr #12 @ r3<- B 5783 and r9, r9, #15 5784 GET_VREG(r0, r9) @ r0<- vA 5785 GET_VREG(r1, r3) @ r1<- vB 5786 .if 0 5787 cmp r1, #0 @ is second operand zero? 5788 beq common_errDivideByZero 5789 .endif 5790 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5791 5792 @ optional op; may set condition codes 5793 and r0, r0, r1 @ r0<- op, r0-r3 changed 5794 GET_INST_OPCODE(ip) @ extract opcode from rINST 5795 SET_VREG(r0, r9) @ vAA<- r0 5796 GOTO_OPCODE(ip) @ jump to next instruction 5797 /* 10-13 instructions */ 5798 5799 5800 5801/* ------------------------------ */ 5802 .balign 64 5803.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5804/* File: armv5te/OP_OR_INT_2ADDR.S */ 5805/* File: armv5te/binop2addr.S */ 5806 /* 5807 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5808 * that specifies an instruction that performs "result = r0 op r1". 5809 * This could be an ARM instruction or a function call. (If the result 5810 * comes back in a register other than r0, you can override "result".) 5811 * 5812 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5813 * vCC (r1). Useful for integer division and modulus. 5814 * 5815 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5816 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5817 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5818 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5819 */ 5820 /* binop/2addr vA, vB */ 5821 mov r9, rINST, lsr #8 @ r9<- A+ 5822 mov r3, rINST, lsr #12 @ r3<- B 5823 and r9, r9, #15 5824 GET_VREG(r0, r9) @ r0<- vA 5825 GET_VREG(r1, r3) @ r1<- vB 5826 .if 0 5827 cmp r1, #0 @ is second operand zero? 5828 beq common_errDivideByZero 5829 .endif 5830 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5831 5832 @ optional op; may set condition codes 5833 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5834 GET_INST_OPCODE(ip) @ extract opcode from rINST 5835 SET_VREG(r0, r9) @ vAA<- r0 5836 GOTO_OPCODE(ip) @ jump to next instruction 5837 /* 10-13 instructions */ 5838 5839 5840 5841/* ------------------------------ */ 5842 .balign 64 5843.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5844/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5845/* File: armv5te/binop2addr.S */ 5846 /* 5847 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5848 * that specifies an instruction that performs "result = r0 op r1". 5849 * This could be an ARM instruction or a function call. (If the result 5850 * comes back in a register other than r0, you can override "result".) 5851 * 5852 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5853 * vCC (r1). Useful for integer division and modulus. 5854 * 5855 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5856 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5857 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5858 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5859 */ 5860 /* binop/2addr vA, vB */ 5861 mov r9, rINST, lsr #8 @ r9<- A+ 5862 mov r3, rINST, lsr #12 @ r3<- B 5863 and r9, r9, #15 5864 GET_VREG(r0, r9) @ r0<- vA 5865 GET_VREG(r1, r3) @ r1<- vB 5866 .if 0 5867 cmp r1, #0 @ is second operand zero? 5868 beq common_errDivideByZero 5869 .endif 5870 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5871 5872 @ optional op; may set condition codes 5873 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5874 GET_INST_OPCODE(ip) @ extract opcode from rINST 5875 SET_VREG(r0, r9) @ vAA<- r0 5876 GOTO_OPCODE(ip) @ jump to next instruction 5877 /* 10-13 instructions */ 5878 5879 5880 5881/* ------------------------------ */ 5882 .balign 64 5883.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5884/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5885/* File: armv5te/binop2addr.S */ 5886 /* 5887 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5888 * that specifies an instruction that performs "result = r0 op r1". 5889 * This could be an ARM instruction or a function call. (If the result 5890 * comes back in a register other than r0, you can override "result".) 5891 * 5892 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5893 * vCC (r1). Useful for integer division and modulus. 5894 * 5895 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5896 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5897 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5898 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5899 */ 5900 /* binop/2addr vA, vB */ 5901 mov r9, rINST, lsr #8 @ r9<- A+ 5902 mov r3, rINST, lsr #12 @ r3<- B 5903 and r9, r9, #15 5904 GET_VREG(r0, r9) @ r0<- vA 5905 GET_VREG(r1, r3) @ r1<- vB 5906 .if 0 5907 cmp r1, #0 @ is second operand zero? 5908 beq common_errDivideByZero 5909 .endif 5910 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5911 5912 and r1, r1, #31 @ optional op; may set condition codes 5913 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5914 GET_INST_OPCODE(ip) @ extract opcode from rINST 5915 SET_VREG(r0, r9) @ vAA<- r0 5916 GOTO_OPCODE(ip) @ jump to next instruction 5917 /* 10-13 instructions */ 5918 5919 5920 5921/* ------------------------------ */ 5922 .balign 64 5923.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5924/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5925/* File: armv5te/binop2addr.S */ 5926 /* 5927 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5928 * that specifies an instruction that performs "result = r0 op r1". 5929 * This could be an ARM instruction or a function call. (If the result 5930 * comes back in a register other than r0, you can override "result".) 5931 * 5932 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5933 * vCC (r1). Useful for integer division and modulus. 5934 * 5935 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5936 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5937 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5938 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5939 */ 5940 /* binop/2addr vA, vB */ 5941 mov r9, rINST, lsr #8 @ r9<- A+ 5942 mov r3, rINST, lsr #12 @ r3<- B 5943 and r9, r9, #15 5944 GET_VREG(r0, r9) @ r0<- vA 5945 GET_VREG(r1, r3) @ r1<- vB 5946 .if 0 5947 cmp r1, #0 @ is second operand zero? 5948 beq common_errDivideByZero 5949 .endif 5950 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5951 5952 and r1, r1, #31 @ optional op; may set condition codes 5953 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5954 GET_INST_OPCODE(ip) @ extract opcode from rINST 5955 SET_VREG(r0, r9) @ vAA<- r0 5956 GOTO_OPCODE(ip) @ jump to next instruction 5957 /* 10-13 instructions */ 5958 5959 5960 5961/* ------------------------------ */ 5962 .balign 64 5963.L_OP_USHR_INT_2ADDR: /* 0xba */ 5964/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5965/* File: armv5te/binop2addr.S */ 5966 /* 5967 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5968 * that specifies an instruction that performs "result = r0 op r1". 5969 * This could be an ARM instruction or a function call. (If the result 5970 * comes back in a register other than r0, you can override "result".) 5971 * 5972 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5973 * vCC (r1). Useful for integer division and modulus. 5974 * 5975 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5976 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5977 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5978 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5979 */ 5980 /* binop/2addr vA, vB */ 5981 mov r9, rINST, lsr #8 @ r9<- A+ 5982 mov r3, rINST, lsr #12 @ r3<- B 5983 and r9, r9, #15 5984 GET_VREG(r0, r9) @ r0<- vA 5985 GET_VREG(r1, r3) @ r1<- vB 5986 .if 0 5987 cmp r1, #0 @ is second operand zero? 5988 beq common_errDivideByZero 5989 .endif 5990 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5991 5992 and r1, r1, #31 @ optional op; may set condition codes 5993 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5994 GET_INST_OPCODE(ip) @ extract opcode from rINST 5995 SET_VREG(r0, r9) @ vAA<- r0 5996 GOTO_OPCODE(ip) @ jump to next instruction 5997 /* 10-13 instructions */ 5998 5999 6000 6001/* ------------------------------ */ 6002 .balign 64 6003.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 6004/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 6005/* File: armv5te/binopWide2addr.S */ 6006 /* 6007 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6008 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6009 * This could be an ARM instruction or a function call. (If the result 6010 * comes back in a register other than r0, you can override "result".) 6011 * 6012 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6013 * vCC (r1). Useful for integer division and modulus. 6014 * 6015 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6016 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6017 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6018 * rem-double/2addr 6019 */ 6020 /* binop/2addr vA, vB */ 6021 mov r9, rINST, lsr #8 @ r9<- A+ 6022 mov r1, rINST, lsr #12 @ r1<- B 6023 and r9, r9, #15 6024 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6025 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6026 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6027 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6028 .if 0 6029 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6030 beq common_errDivideByZero 6031 .endif 6032 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6033 6034 adds r0, r0, r2 @ optional op; may set condition codes 6035 adc r1, r1, r3 @ result<- op, r0-r3 changed 6036 GET_INST_OPCODE(ip) @ extract opcode from rINST 6037 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6038 GOTO_OPCODE(ip) @ jump to next instruction 6039 /* 12-15 instructions */ 6040 6041 6042 6043/* ------------------------------ */ 6044 .balign 64 6045.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 6046/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 6047/* File: armv5te/binopWide2addr.S */ 6048 /* 6049 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6050 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6051 * This could be an ARM instruction or a function call. (If the result 6052 * comes back in a register other than r0, you can override "result".) 6053 * 6054 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6055 * vCC (r1). Useful for integer division and modulus. 6056 * 6057 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6058 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6059 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6060 * rem-double/2addr 6061 */ 6062 /* binop/2addr vA, vB */ 6063 mov r9, rINST, lsr #8 @ r9<- A+ 6064 mov r1, rINST, lsr #12 @ r1<- B 6065 and r9, r9, #15 6066 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6067 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6068 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6069 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6070 .if 0 6071 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6072 beq common_errDivideByZero 6073 .endif 6074 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6075 6076 subs r0, r0, r2 @ optional op; may set condition codes 6077 sbc r1, r1, r3 @ result<- op, r0-r3 changed 6078 GET_INST_OPCODE(ip) @ extract opcode from rINST 6079 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6080 GOTO_OPCODE(ip) @ jump to next instruction 6081 /* 12-15 instructions */ 6082 6083 6084 6085/* ------------------------------ */ 6086 .balign 64 6087.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 6088/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 6089 /* 6090 * Signed 64-bit integer multiply, "/2addr" version. 6091 * 6092 * See OP_MUL_LONG for an explanation. 6093 * 6094 * We get a little tight on registers, so to avoid looking up &fp[A] 6095 * again we stuff it into rINST. 6096 */ 6097 /* mul-long/2addr vA, vB */ 6098 mov r9, rINST, lsr #8 @ r9<- A+ 6099 mov r1, rINST, lsr #12 @ r1<- B 6100 and r9, r9, #15 6101 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6102 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 6103 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6104 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 6105 mul ip, r2, r1 @ ip<- ZxW 6106 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 6107 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 6108 mov r0, rINST @ r0<- &fp[A] (free up rINST) 6109 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6110 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 6111 GET_INST_OPCODE(ip) @ extract opcode from rINST 6112 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 6113 GOTO_OPCODE(ip) @ jump to next instruction 6114 6115 6116/* ------------------------------ */ 6117 .balign 64 6118.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 6119/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 6120/* File: armv5te/binopWide2addr.S */ 6121 /* 6122 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6123 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6124 * This could be an ARM instruction or a function call. (If the result 6125 * comes back in a register other than r0, you can override "result".) 6126 * 6127 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6128 * vCC (r1). Useful for integer division and modulus. 6129 * 6130 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6131 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6132 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6133 * rem-double/2addr 6134 */ 6135 /* binop/2addr vA, vB */ 6136 mov r9, rINST, lsr #8 @ r9<- A+ 6137 mov r1, rINST, lsr #12 @ r1<- B 6138 and r9, r9, #15 6139 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6140 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6141 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6142 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6143 .if 1 6144 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6145 beq common_errDivideByZero 6146 .endif 6147 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6148 6149 @ optional op; may set condition codes 6150 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6151 GET_INST_OPCODE(ip) @ extract opcode from rINST 6152 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6153 GOTO_OPCODE(ip) @ jump to next instruction 6154 /* 12-15 instructions */ 6155 6156 6157 6158/* ------------------------------ */ 6159 .balign 64 6160.L_OP_REM_LONG_2ADDR: /* 0xbf */ 6161/* File: armv5te/OP_REM_LONG_2ADDR.S */ 6162/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 6163/* File: armv5te/binopWide2addr.S */ 6164 /* 6165 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6166 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6167 * This could be an ARM instruction or a function call. (If the result 6168 * comes back in a register other than r0, you can override "result".) 6169 * 6170 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6171 * vCC (r1). Useful for integer division and modulus. 6172 * 6173 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6174 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6175 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6176 * rem-double/2addr 6177 */ 6178 /* binop/2addr vA, vB */ 6179 mov r9, rINST, lsr #8 @ r9<- A+ 6180 mov r1, rINST, lsr #12 @ r1<- B 6181 and r9, r9, #15 6182 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6183 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6184 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6185 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6186 .if 1 6187 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6188 beq common_errDivideByZero 6189 .endif 6190 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6191 6192 @ optional op; may set condition codes 6193 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6194 GET_INST_OPCODE(ip) @ extract opcode from rINST 6195 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 6196 GOTO_OPCODE(ip) @ jump to next instruction 6197 /* 12-15 instructions */ 6198 6199 6200 6201/* ------------------------------ */ 6202 .balign 64 6203.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 6204/* File: armv5te/OP_AND_LONG_2ADDR.S */ 6205/* File: armv5te/binopWide2addr.S */ 6206 /* 6207 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6208 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6209 * This could be an ARM instruction or a function call. (If the result 6210 * comes back in a register other than r0, you can override "result".) 6211 * 6212 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6213 * vCC (r1). Useful for integer division and modulus. 6214 * 6215 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6216 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6217 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6218 * rem-double/2addr 6219 */ 6220 /* binop/2addr vA, vB */ 6221 mov r9, rINST, lsr #8 @ r9<- A+ 6222 mov r1, rINST, lsr #12 @ r1<- B 6223 and r9, r9, #15 6224 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6225 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6226 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6227 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6228 .if 0 6229 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6230 beq common_errDivideByZero 6231 .endif 6232 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6233 6234 and r0, r0, r2 @ optional op; may set condition codes 6235 and r1, r1, r3 @ result<- op, r0-r3 changed 6236 GET_INST_OPCODE(ip) @ extract opcode from rINST 6237 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6238 GOTO_OPCODE(ip) @ jump to next instruction 6239 /* 12-15 instructions */ 6240 6241 6242 6243/* ------------------------------ */ 6244 .balign 64 6245.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6246/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6247/* File: armv5te/binopWide2addr.S */ 6248 /* 6249 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6250 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6251 * This could be an ARM instruction or a function call. (If the result 6252 * comes back in a register other than r0, you can override "result".) 6253 * 6254 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6255 * vCC (r1). Useful for integer division and modulus. 6256 * 6257 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6258 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6259 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6260 * rem-double/2addr 6261 */ 6262 /* binop/2addr vA, vB */ 6263 mov r9, rINST, lsr #8 @ r9<- A+ 6264 mov r1, rINST, lsr #12 @ r1<- B 6265 and r9, r9, #15 6266 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6267 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6268 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6269 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6270 .if 0 6271 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6272 beq common_errDivideByZero 6273 .endif 6274 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6275 6276 orr r0, r0, r2 @ optional op; may set condition codes 6277 orr r1, r1, r3 @ result<- op, r0-r3 changed 6278 GET_INST_OPCODE(ip) @ extract opcode from rINST 6279 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6280 GOTO_OPCODE(ip) @ jump to next instruction 6281 /* 12-15 instructions */ 6282 6283 6284 6285/* ------------------------------ */ 6286 .balign 64 6287.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6288/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6289/* File: armv5te/binopWide2addr.S */ 6290 /* 6291 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6292 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6293 * This could be an ARM instruction or a function call. (If the result 6294 * comes back in a register other than r0, you can override "result".) 6295 * 6296 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6297 * vCC (r1). Useful for integer division and modulus. 6298 * 6299 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6300 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6301 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6302 * rem-double/2addr 6303 */ 6304 /* binop/2addr vA, vB */ 6305 mov r9, rINST, lsr #8 @ r9<- A+ 6306 mov r1, rINST, lsr #12 @ r1<- B 6307 and r9, r9, #15 6308 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6309 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6310 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6311 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6312 .if 0 6313 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6314 beq common_errDivideByZero 6315 .endif 6316 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6317 6318 eor r0, r0, r2 @ optional op; may set condition codes 6319 eor r1, r1, r3 @ result<- op, r0-r3 changed 6320 GET_INST_OPCODE(ip) @ extract opcode from rINST 6321 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6322 GOTO_OPCODE(ip) @ jump to next instruction 6323 /* 12-15 instructions */ 6324 6325 6326 6327/* ------------------------------ */ 6328 .balign 64 6329.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6330/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6331 /* 6332 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6333 * 32-bit shift distance. 6334 */ 6335 /* shl-long/2addr vA, vB */ 6336 mov r9, rINST, lsr #8 @ r9<- A+ 6337 mov r3, rINST, lsr #12 @ r3<- B 6338 and r9, r9, #15 6339 GET_VREG(r2, r3) @ r2<- vB 6340 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6341 and r2, r2, #63 @ r2<- r2 & 0x3f 6342 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6343 6344 mov r1, r1, asl r2 @ r1<- r1 << r2 6345 rsb r3, r2, #32 @ r3<- 32 - r2 6346 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6347 subs ip, r2, #32 @ ip<- r2 - 32 6348 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6349 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6350 mov r0, r0, asl r2 @ r0<- r0 << r2 6351 b .LOP_SHL_LONG_2ADDR_finish 6352 6353/* ------------------------------ */ 6354 .balign 64 6355.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6356/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6357 /* 6358 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6359 * 32-bit shift distance. 6360 */ 6361 /* shr-long/2addr vA, vB */ 6362 mov r9, rINST, lsr #8 @ r9<- A+ 6363 mov r3, rINST, lsr #12 @ r3<- B 6364 and r9, r9, #15 6365 GET_VREG(r2, r3) @ r2<- vB 6366 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6367 and r2, r2, #63 @ r2<- r2 & 0x3f 6368 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6369 6370 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6371 rsb r3, r2, #32 @ r3<- 32 - r2 6372 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6373 subs ip, r2, #32 @ ip<- r2 - 32 6374 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6375 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6376 mov r1, r1, asr r2 @ r1<- r1 >> r2 6377 b .LOP_SHR_LONG_2ADDR_finish 6378 6379/* ------------------------------ */ 6380 .balign 64 6381.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6382/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6383 /* 6384 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6385 * 32-bit shift distance. 6386 */ 6387 /* ushr-long/2addr vA, vB */ 6388 mov r9, rINST, lsr #8 @ r9<- A+ 6389 mov r3, rINST, lsr #12 @ r3<- B 6390 and r9, r9, #15 6391 GET_VREG(r2, r3) @ r2<- vB 6392 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6393 and r2, r2, #63 @ r2<- r2 & 0x3f 6394 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6395 6396 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6397 rsb r3, r2, #32 @ r3<- 32 - r2 6398 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6399 subs ip, r2, #32 @ ip<- r2 - 32 6400 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6401 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6402 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6403 b .LOP_USHR_LONG_2ADDR_finish 6404 6405/* ------------------------------ */ 6406 .balign 64 6407.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6408/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */ 6409/* File: armv5te/binop2addr.S */ 6410 /* 6411 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6412 * that specifies an instruction that performs "result = r0 op r1". 6413 * This could be an ARM instruction or a function call. (If the result 6414 * comes back in a register other than r0, you can override "result".) 6415 * 6416 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6417 * vCC (r1). Useful for integer division and modulus. 6418 * 6419 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6420 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6421 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6422 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6423 */ 6424 /* binop/2addr vA, vB */ 6425 mov r9, rINST, lsr #8 @ r9<- A+ 6426 mov r3, rINST, lsr #12 @ r3<- B 6427 and r9, r9, #15 6428 GET_VREG(r0, r9) @ r0<- vA 6429 GET_VREG(r1, r3) @ r1<- vB 6430 .if 0 6431 cmp r1, #0 @ is second operand zero? 6432 beq common_errDivideByZero 6433 .endif 6434 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6435 6436 @ optional op; may set condition codes 6437 bl __aeabi_fadd @ r0<- op, r0-r3 changed 6438 GET_INST_OPCODE(ip) @ extract opcode from rINST 6439 SET_VREG(r0, r9) @ vAA<- r0 6440 GOTO_OPCODE(ip) @ jump to next instruction 6441 /* 10-13 instructions */ 6442 6443 6444 6445/* ------------------------------ */ 6446 .balign 64 6447.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6448/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */ 6449/* File: armv5te/binop2addr.S */ 6450 /* 6451 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6452 * that specifies an instruction that performs "result = r0 op r1". 6453 * This could be an ARM instruction or a function call. (If the result 6454 * comes back in a register other than r0, you can override "result".) 6455 * 6456 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6457 * vCC (r1). Useful for integer division and modulus. 6458 * 6459 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6460 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6461 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6462 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6463 */ 6464 /* binop/2addr vA, vB */ 6465 mov r9, rINST, lsr #8 @ r9<- A+ 6466 mov r3, rINST, lsr #12 @ r3<- B 6467 and r9, r9, #15 6468 GET_VREG(r0, r9) @ r0<- vA 6469 GET_VREG(r1, r3) @ r1<- vB 6470 .if 0 6471 cmp r1, #0 @ is second operand zero? 6472 beq common_errDivideByZero 6473 .endif 6474 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6475 6476 @ optional op; may set condition codes 6477 bl __aeabi_fsub @ r0<- op, r0-r3 changed 6478 GET_INST_OPCODE(ip) @ extract opcode from rINST 6479 SET_VREG(r0, r9) @ vAA<- r0 6480 GOTO_OPCODE(ip) @ jump to next instruction 6481 /* 10-13 instructions */ 6482 6483 6484 6485/* ------------------------------ */ 6486 .balign 64 6487.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6488/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */ 6489/* File: armv5te/binop2addr.S */ 6490 /* 6491 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6492 * that specifies an instruction that performs "result = r0 op r1". 6493 * This could be an ARM instruction or a function call. (If the result 6494 * comes back in a register other than r0, you can override "result".) 6495 * 6496 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6497 * vCC (r1). Useful for integer division and modulus. 6498 * 6499 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6500 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6501 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6502 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6503 */ 6504 /* binop/2addr vA, vB */ 6505 mov r9, rINST, lsr #8 @ r9<- A+ 6506 mov r3, rINST, lsr #12 @ r3<- B 6507 and r9, r9, #15 6508 GET_VREG(r0, r9) @ r0<- vA 6509 GET_VREG(r1, r3) @ r1<- vB 6510 .if 0 6511 cmp r1, #0 @ is second operand zero? 6512 beq common_errDivideByZero 6513 .endif 6514 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6515 6516 @ optional op; may set condition codes 6517 bl __aeabi_fmul @ r0<- op, r0-r3 changed 6518 GET_INST_OPCODE(ip) @ extract opcode from rINST 6519 SET_VREG(r0, r9) @ vAA<- r0 6520 GOTO_OPCODE(ip) @ jump to next instruction 6521 /* 10-13 instructions */ 6522 6523 6524 6525/* ------------------------------ */ 6526 .balign 64 6527.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6528/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */ 6529/* File: armv5te/binop2addr.S */ 6530 /* 6531 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6532 * that specifies an instruction that performs "result = r0 op r1". 6533 * This could be an ARM instruction or a function call. (If the result 6534 * comes back in a register other than r0, you can override "result".) 6535 * 6536 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6537 * vCC (r1). Useful for integer division and modulus. 6538 * 6539 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6540 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6541 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6542 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6543 */ 6544 /* binop/2addr vA, vB */ 6545 mov r9, rINST, lsr #8 @ r9<- A+ 6546 mov r3, rINST, lsr #12 @ r3<- B 6547 and r9, r9, #15 6548 GET_VREG(r0, r9) @ r0<- vA 6549 GET_VREG(r1, r3) @ r1<- vB 6550 .if 0 6551 cmp r1, #0 @ is second operand zero? 6552 beq common_errDivideByZero 6553 .endif 6554 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6555 6556 @ optional op; may set condition codes 6557 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 6558 GET_INST_OPCODE(ip) @ extract opcode from rINST 6559 SET_VREG(r0, r9) @ vAA<- r0 6560 GOTO_OPCODE(ip) @ jump to next instruction 6561 /* 10-13 instructions */ 6562 6563 6564 6565/* ------------------------------ */ 6566 .balign 64 6567.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6568/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6569/* EABI doesn't define a float remainder function, but libm does */ 6570/* File: armv5te/binop2addr.S */ 6571 /* 6572 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6573 * that specifies an instruction that performs "result = r0 op r1". 6574 * This could be an ARM instruction or a function call. (If the result 6575 * comes back in a register other than r0, you can override "result".) 6576 * 6577 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6578 * vCC (r1). Useful for integer division and modulus. 6579 * 6580 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6581 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6582 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6583 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6584 */ 6585 /* binop/2addr vA, vB */ 6586 mov r9, rINST, lsr #8 @ r9<- A+ 6587 mov r3, rINST, lsr #12 @ r3<- B 6588 and r9, r9, #15 6589 GET_VREG(r0, r9) @ r0<- vA 6590 GET_VREG(r1, r3) @ r1<- vB 6591 .if 0 6592 cmp r1, #0 @ is second operand zero? 6593 beq common_errDivideByZero 6594 .endif 6595 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6596 6597 @ optional op; may set condition codes 6598 bl fmodf @ r0<- op, r0-r3 changed 6599 GET_INST_OPCODE(ip) @ extract opcode from rINST 6600 SET_VREG(r0, r9) @ vAA<- r0 6601 GOTO_OPCODE(ip) @ jump to next instruction 6602 /* 10-13 instructions */ 6603 6604 6605 6606/* ------------------------------ */ 6607 .balign 64 6608.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6609/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */ 6610/* File: armv5te/binopWide2addr.S */ 6611 /* 6612 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6613 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6614 * This could be an ARM instruction or a function call. (If the result 6615 * comes back in a register other than r0, you can override "result".) 6616 * 6617 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6618 * vCC (r1). Useful for integer division and modulus. 6619 * 6620 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6621 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6622 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6623 * rem-double/2addr 6624 */ 6625 /* binop/2addr vA, vB */ 6626 mov r9, rINST, lsr #8 @ r9<- A+ 6627 mov r1, rINST, lsr #12 @ r1<- B 6628 and r9, r9, #15 6629 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6630 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6631 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6632 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6633 .if 0 6634 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6635 beq common_errDivideByZero 6636 .endif 6637 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6638 6639 @ optional op; may set condition codes 6640 bl __aeabi_dadd @ result<- op, r0-r3 changed 6641 GET_INST_OPCODE(ip) @ extract opcode from rINST 6642 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6643 GOTO_OPCODE(ip) @ jump to next instruction 6644 /* 12-15 instructions */ 6645 6646 6647 6648/* ------------------------------ */ 6649 .balign 64 6650.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6651/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */ 6652/* File: armv5te/binopWide2addr.S */ 6653 /* 6654 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6655 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6656 * This could be an ARM instruction or a function call. (If the result 6657 * comes back in a register other than r0, you can override "result".) 6658 * 6659 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6660 * vCC (r1). Useful for integer division and modulus. 6661 * 6662 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6663 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6664 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6665 * rem-double/2addr 6666 */ 6667 /* binop/2addr vA, vB */ 6668 mov r9, rINST, lsr #8 @ r9<- A+ 6669 mov r1, rINST, lsr #12 @ r1<- B 6670 and r9, r9, #15 6671 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6672 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6673 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6674 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6675 .if 0 6676 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6677 beq common_errDivideByZero 6678 .endif 6679 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6680 6681 @ optional op; may set condition codes 6682 bl __aeabi_dsub @ result<- op, r0-r3 changed 6683 GET_INST_OPCODE(ip) @ extract opcode from rINST 6684 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6685 GOTO_OPCODE(ip) @ jump to next instruction 6686 /* 12-15 instructions */ 6687 6688 6689 6690/* ------------------------------ */ 6691 .balign 64 6692.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6693/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */ 6694/* File: armv5te/binopWide2addr.S */ 6695 /* 6696 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6697 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6698 * This could be an ARM instruction or a function call. (If the result 6699 * comes back in a register other than r0, you can override "result".) 6700 * 6701 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6702 * vCC (r1). Useful for integer division and modulus. 6703 * 6704 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6705 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6706 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6707 * rem-double/2addr 6708 */ 6709 /* binop/2addr vA, vB */ 6710 mov r9, rINST, lsr #8 @ r9<- A+ 6711 mov r1, rINST, lsr #12 @ r1<- B 6712 and r9, r9, #15 6713 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6714 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6715 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6716 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6717 .if 0 6718 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6719 beq common_errDivideByZero 6720 .endif 6721 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6722 6723 @ optional op; may set condition codes 6724 bl __aeabi_dmul @ result<- op, r0-r3 changed 6725 GET_INST_OPCODE(ip) @ extract opcode from rINST 6726 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6727 GOTO_OPCODE(ip) @ jump to next instruction 6728 /* 12-15 instructions */ 6729 6730 6731 6732/* ------------------------------ */ 6733 .balign 64 6734.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6735/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */ 6736/* File: armv5te/binopWide2addr.S */ 6737 /* 6738 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6739 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6740 * This could be an ARM instruction or a function call. (If the result 6741 * comes back in a register other than r0, you can override "result".) 6742 * 6743 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6744 * vCC (r1). Useful for integer division and modulus. 6745 * 6746 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6747 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6748 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6749 * rem-double/2addr 6750 */ 6751 /* binop/2addr vA, vB */ 6752 mov r9, rINST, lsr #8 @ r9<- A+ 6753 mov r1, rINST, lsr #12 @ r1<- B 6754 and r9, r9, #15 6755 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6756 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6757 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6758 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6759 .if 0 6760 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6761 beq common_errDivideByZero 6762 .endif 6763 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6764 6765 @ optional op; may set condition codes 6766 bl __aeabi_ddiv @ result<- op, r0-r3 changed 6767 GET_INST_OPCODE(ip) @ extract opcode from rINST 6768 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6769 GOTO_OPCODE(ip) @ jump to next instruction 6770 /* 12-15 instructions */ 6771 6772 6773 6774/* ------------------------------ */ 6775 .balign 64 6776.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6777/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6778/* EABI doesn't define a double remainder function, but libm does */ 6779/* File: armv5te/binopWide2addr.S */ 6780 /* 6781 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6782 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6783 * This could be an ARM instruction or a function call. (If the result 6784 * comes back in a register other than r0, you can override "result".) 6785 * 6786 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6787 * vCC (r1). Useful for integer division and modulus. 6788 * 6789 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6790 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6791 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6792 * rem-double/2addr 6793 */ 6794 /* binop/2addr vA, vB */ 6795 mov r9, rINST, lsr #8 @ r9<- A+ 6796 mov r1, rINST, lsr #12 @ r1<- B 6797 and r9, r9, #15 6798 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6799 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6800 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6801 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6802 .if 0 6803 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6804 beq common_errDivideByZero 6805 .endif 6806 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6807 6808 @ optional op; may set condition codes 6809 bl fmod @ result<- op, r0-r3 changed 6810 GET_INST_OPCODE(ip) @ extract opcode from rINST 6811 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6812 GOTO_OPCODE(ip) @ jump to next instruction 6813 /* 12-15 instructions */ 6814 6815 6816 6817/* ------------------------------ */ 6818 .balign 64 6819.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6820/* File: armv5te/OP_ADD_INT_LIT16.S */ 6821/* File: armv5te/binopLit16.S */ 6822 /* 6823 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6824 * that specifies an instruction that performs "result = r0 op r1". 6825 * This could be an ARM instruction or a function call. (If the result 6826 * comes back in a register other than r0, you can override "result".) 6827 * 6828 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6829 * vCC (r1). Useful for integer division and modulus. 6830 * 6831 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6832 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6833 */ 6834 /* binop/lit16 vA, vB, #+CCCC */ 6835 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6836 mov r2, rINST, lsr #12 @ r2<- B 6837 mov r9, rINST, lsr #8 @ r9<- A+ 6838 GET_VREG(r0, r2) @ r0<- vB 6839 and r9, r9, #15 6840 .if 0 6841 cmp r1, #0 @ is second operand zero? 6842 beq common_errDivideByZero 6843 .endif 6844 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6845 6846 add r0, r0, r1 @ r0<- op, r0-r3 changed 6847 GET_INST_OPCODE(ip) @ extract opcode from rINST 6848 SET_VREG(r0, r9) @ vAA<- r0 6849 GOTO_OPCODE(ip) @ jump to next instruction 6850 /* 10-13 instructions */ 6851 6852 6853 6854/* ------------------------------ */ 6855 .balign 64 6856.L_OP_RSUB_INT: /* 0xd1 */ 6857/* File: armv5te/OP_RSUB_INT.S */ 6858/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6859/* File: armv5te/binopLit16.S */ 6860 /* 6861 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6862 * that specifies an instruction that performs "result = r0 op r1". 6863 * This could be an ARM instruction or a function call. (If the result 6864 * comes back in a register other than r0, you can override "result".) 6865 * 6866 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6867 * vCC (r1). Useful for integer division and modulus. 6868 * 6869 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6870 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6871 */ 6872 /* binop/lit16 vA, vB, #+CCCC */ 6873 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6874 mov r2, rINST, lsr #12 @ r2<- B 6875 mov r9, rINST, lsr #8 @ r9<- A+ 6876 GET_VREG(r0, r2) @ r0<- vB 6877 and r9, r9, #15 6878 .if 0 6879 cmp r1, #0 @ is second operand zero? 6880 beq common_errDivideByZero 6881 .endif 6882 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6883 6884 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6885 GET_INST_OPCODE(ip) @ extract opcode from rINST 6886 SET_VREG(r0, r9) @ vAA<- r0 6887 GOTO_OPCODE(ip) @ jump to next instruction 6888 /* 10-13 instructions */ 6889 6890 6891 6892/* ------------------------------ */ 6893 .balign 64 6894.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6895/* File: armv5te/OP_MUL_INT_LIT16.S */ 6896/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6897/* File: armv5te/binopLit16.S */ 6898 /* 6899 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6900 * that specifies an instruction that performs "result = r0 op r1". 6901 * This could be an ARM instruction or a function call. (If the result 6902 * comes back in a register other than r0, you can override "result".) 6903 * 6904 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6905 * vCC (r1). Useful for integer division and modulus. 6906 * 6907 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6908 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6909 */ 6910 /* binop/lit16 vA, vB, #+CCCC */ 6911 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6912 mov r2, rINST, lsr #12 @ r2<- B 6913 mov r9, rINST, lsr #8 @ r9<- A+ 6914 GET_VREG(r0, r2) @ r0<- vB 6915 and r9, r9, #15 6916 .if 0 6917 cmp r1, #0 @ is second operand zero? 6918 beq common_errDivideByZero 6919 .endif 6920 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6921 6922 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6923 GET_INST_OPCODE(ip) @ extract opcode from rINST 6924 SET_VREG(r0, r9) @ vAA<- r0 6925 GOTO_OPCODE(ip) @ jump to next instruction 6926 /* 10-13 instructions */ 6927 6928 6929 6930/* ------------------------------ */ 6931 .balign 64 6932.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6933/* File: armv5te/OP_DIV_INT_LIT16.S */ 6934/* File: armv5te/binopLit16.S */ 6935 /* 6936 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6937 * that specifies an instruction that performs "result = r0 op r1". 6938 * This could be an ARM instruction or a function call. (If the result 6939 * comes back in a register other than r0, you can override "result".) 6940 * 6941 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6942 * vCC (r1). Useful for integer division and modulus. 6943 * 6944 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6945 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6946 */ 6947 /* binop/lit16 vA, vB, #+CCCC */ 6948 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6949 mov r2, rINST, lsr #12 @ r2<- B 6950 mov r9, rINST, lsr #8 @ r9<- A+ 6951 GET_VREG(r0, r2) @ r0<- vB 6952 and r9, r9, #15 6953 .if 1 6954 cmp r1, #0 @ is second operand zero? 6955 beq common_errDivideByZero 6956 .endif 6957 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6958 6959 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6960 GET_INST_OPCODE(ip) @ extract opcode from rINST 6961 SET_VREG(r0, r9) @ vAA<- r0 6962 GOTO_OPCODE(ip) @ jump to next instruction 6963 /* 10-13 instructions */ 6964 6965 6966 6967/* ------------------------------ */ 6968 .balign 64 6969.L_OP_REM_INT_LIT16: /* 0xd4 */ 6970/* File: armv5te/OP_REM_INT_LIT16.S */ 6971/* idivmod returns quotient in r0 and remainder in r1 */ 6972/* File: armv5te/binopLit16.S */ 6973 /* 6974 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6975 * that specifies an instruction that performs "result = r0 op r1". 6976 * This could be an ARM instruction or a function call. (If the result 6977 * comes back in a register other than r0, you can override "result".) 6978 * 6979 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6980 * vCC (r1). Useful for integer division and modulus. 6981 * 6982 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6983 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6984 */ 6985 /* binop/lit16 vA, vB, #+CCCC */ 6986 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6987 mov r2, rINST, lsr #12 @ r2<- B 6988 mov r9, rINST, lsr #8 @ r9<- A+ 6989 GET_VREG(r0, r2) @ r0<- vB 6990 and r9, r9, #15 6991 .if 1 6992 cmp r1, #0 @ is second operand zero? 6993 beq common_errDivideByZero 6994 .endif 6995 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6996 6997 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6998 GET_INST_OPCODE(ip) @ extract opcode from rINST 6999 SET_VREG(r1, r9) @ vAA<- r1 7000 GOTO_OPCODE(ip) @ jump to next instruction 7001 /* 10-13 instructions */ 7002 7003 7004 7005/* ------------------------------ */ 7006 .balign 64 7007.L_OP_AND_INT_LIT16: /* 0xd5 */ 7008/* File: armv5te/OP_AND_INT_LIT16.S */ 7009/* File: armv5te/binopLit16.S */ 7010 /* 7011 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7012 * that specifies an instruction that performs "result = r0 op r1". 7013 * This could be an ARM instruction or a function call. (If the result 7014 * comes back in a register other than r0, you can override "result".) 7015 * 7016 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7017 * vCC (r1). Useful for integer division and modulus. 7018 * 7019 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7020 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7021 */ 7022 /* binop/lit16 vA, vB, #+CCCC */ 7023 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7024 mov r2, rINST, lsr #12 @ r2<- B 7025 mov r9, rINST, lsr #8 @ r9<- A+ 7026 GET_VREG(r0, r2) @ r0<- vB 7027 and r9, r9, #15 7028 .if 0 7029 cmp r1, #0 @ is second operand zero? 7030 beq common_errDivideByZero 7031 .endif 7032 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7033 7034 and r0, r0, r1 @ r0<- op, r0-r3 changed 7035 GET_INST_OPCODE(ip) @ extract opcode from rINST 7036 SET_VREG(r0, r9) @ vAA<- r0 7037 GOTO_OPCODE(ip) @ jump to next instruction 7038 /* 10-13 instructions */ 7039 7040 7041 7042/* ------------------------------ */ 7043 .balign 64 7044.L_OP_OR_INT_LIT16: /* 0xd6 */ 7045/* File: armv5te/OP_OR_INT_LIT16.S */ 7046/* File: armv5te/binopLit16.S */ 7047 /* 7048 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7049 * that specifies an instruction that performs "result = r0 op r1". 7050 * This could be an ARM instruction or a function call. (If the result 7051 * comes back in a register other than r0, you can override "result".) 7052 * 7053 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7054 * vCC (r1). Useful for integer division and modulus. 7055 * 7056 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7057 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7058 */ 7059 /* binop/lit16 vA, vB, #+CCCC */ 7060 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7061 mov r2, rINST, lsr #12 @ r2<- B 7062 mov r9, rINST, lsr #8 @ r9<- A+ 7063 GET_VREG(r0, r2) @ r0<- vB 7064 and r9, r9, #15 7065 .if 0 7066 cmp r1, #0 @ is second operand zero? 7067 beq common_errDivideByZero 7068 .endif 7069 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7070 7071 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7072 GET_INST_OPCODE(ip) @ extract opcode from rINST 7073 SET_VREG(r0, r9) @ vAA<- r0 7074 GOTO_OPCODE(ip) @ jump to next instruction 7075 /* 10-13 instructions */ 7076 7077 7078 7079/* ------------------------------ */ 7080 .balign 64 7081.L_OP_XOR_INT_LIT16: /* 0xd7 */ 7082/* File: armv5te/OP_XOR_INT_LIT16.S */ 7083/* File: armv5te/binopLit16.S */ 7084 /* 7085 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7086 * that specifies an instruction that performs "result = r0 op r1". 7087 * This could be an ARM instruction or a function call. (If the result 7088 * comes back in a register other than r0, you can override "result".) 7089 * 7090 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7091 * vCC (r1). Useful for integer division and modulus. 7092 * 7093 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7094 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7095 */ 7096 /* binop/lit16 vA, vB, #+CCCC */ 7097 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7098 mov r2, rINST, lsr #12 @ r2<- B 7099 mov r9, rINST, lsr #8 @ r9<- A+ 7100 GET_VREG(r0, r2) @ r0<- vB 7101 and r9, r9, #15 7102 .if 0 7103 cmp r1, #0 @ is second operand zero? 7104 beq common_errDivideByZero 7105 .endif 7106 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7107 7108 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7109 GET_INST_OPCODE(ip) @ extract opcode from rINST 7110 SET_VREG(r0, r9) @ vAA<- r0 7111 GOTO_OPCODE(ip) @ jump to next instruction 7112 /* 10-13 instructions */ 7113 7114 7115 7116/* ------------------------------ */ 7117 .balign 64 7118.L_OP_ADD_INT_LIT8: /* 0xd8 */ 7119/* File: armv5te/OP_ADD_INT_LIT8.S */ 7120/* File: armv5te/binopLit8.S */ 7121 /* 7122 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7123 * that specifies an instruction that performs "result = r0 op r1". 7124 * This could be an ARM instruction or a function call. (If the result 7125 * comes back in a register other than r0, you can override "result".) 7126 * 7127 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7128 * vCC (r1). Useful for integer division and modulus. 7129 * 7130 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7131 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7132 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7133 */ 7134 /* binop/lit8 vAA, vBB, #+CC */ 7135 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7136 mov r9, rINST, lsr #8 @ r9<- AA 7137 and r2, r3, #255 @ r2<- BB 7138 GET_VREG(r0, r2) @ r0<- vBB 7139 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7140 .if 0 7141 @cmp r1, #0 @ is second operand zero? 7142 beq common_errDivideByZero 7143 .endif 7144 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7145 7146 @ optional op; may set condition codes 7147 add r0, r0, r1 @ r0<- op, r0-r3 changed 7148 GET_INST_OPCODE(ip) @ extract opcode from rINST 7149 SET_VREG(r0, r9) @ vAA<- r0 7150 GOTO_OPCODE(ip) @ jump to next instruction 7151 /* 10-12 instructions */ 7152 7153 7154 7155/* ------------------------------ */ 7156 .balign 64 7157.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 7158/* File: armv5te/OP_RSUB_INT_LIT8.S */ 7159/* File: armv5te/binopLit8.S */ 7160 /* 7161 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7162 * that specifies an instruction that performs "result = r0 op r1". 7163 * This could be an ARM instruction or a function call. (If the result 7164 * comes back in a register other than r0, you can override "result".) 7165 * 7166 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7167 * vCC (r1). Useful for integer division and modulus. 7168 * 7169 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7170 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7171 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7172 */ 7173 /* binop/lit8 vAA, vBB, #+CC */ 7174 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7175 mov r9, rINST, lsr #8 @ r9<- AA 7176 and r2, r3, #255 @ r2<- BB 7177 GET_VREG(r0, r2) @ r0<- vBB 7178 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7179 .if 0 7180 @cmp r1, #0 @ is second operand zero? 7181 beq common_errDivideByZero 7182 .endif 7183 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7184 7185 @ optional op; may set condition codes 7186 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 7187 GET_INST_OPCODE(ip) @ extract opcode from rINST 7188 SET_VREG(r0, r9) @ vAA<- r0 7189 GOTO_OPCODE(ip) @ jump to next instruction 7190 /* 10-12 instructions */ 7191 7192 7193 7194/* ------------------------------ */ 7195 .balign 64 7196.L_OP_MUL_INT_LIT8: /* 0xda */ 7197/* File: armv5te/OP_MUL_INT_LIT8.S */ 7198/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 7199/* File: armv5te/binopLit8.S */ 7200 /* 7201 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7202 * that specifies an instruction that performs "result = r0 op r1". 7203 * This could be an ARM instruction or a function call. (If the result 7204 * comes back in a register other than r0, you can override "result".) 7205 * 7206 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7207 * vCC (r1). Useful for integer division and modulus. 7208 * 7209 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7210 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7211 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7212 */ 7213 /* binop/lit8 vAA, vBB, #+CC */ 7214 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7215 mov r9, rINST, lsr #8 @ r9<- AA 7216 and r2, r3, #255 @ r2<- BB 7217 GET_VREG(r0, r2) @ r0<- vBB 7218 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7219 .if 0 7220 @cmp r1, #0 @ is second operand zero? 7221 beq common_errDivideByZero 7222 .endif 7223 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7224 7225 @ optional op; may set condition codes 7226 mul r0, r1, r0 @ r0<- op, r0-r3 changed 7227 GET_INST_OPCODE(ip) @ extract opcode from rINST 7228 SET_VREG(r0, r9) @ vAA<- r0 7229 GOTO_OPCODE(ip) @ jump to next instruction 7230 /* 10-12 instructions */ 7231 7232 7233 7234/* ------------------------------ */ 7235 .balign 64 7236.L_OP_DIV_INT_LIT8: /* 0xdb */ 7237/* File: armv5te/OP_DIV_INT_LIT8.S */ 7238/* File: armv5te/binopLit8.S */ 7239 /* 7240 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7241 * that specifies an instruction that performs "result = r0 op r1". 7242 * This could be an ARM instruction or a function call. (If the result 7243 * comes back in a register other than r0, you can override "result".) 7244 * 7245 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7246 * vCC (r1). Useful for integer division and modulus. 7247 * 7248 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7249 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7250 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7251 */ 7252 /* binop/lit8 vAA, vBB, #+CC */ 7253 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7254 mov r9, rINST, lsr #8 @ r9<- AA 7255 and r2, r3, #255 @ r2<- BB 7256 GET_VREG(r0, r2) @ r0<- vBB 7257 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7258 .if 1 7259 @cmp r1, #0 @ is second operand zero? 7260 beq common_errDivideByZero 7261 .endif 7262 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7263 7264 @ optional op; may set condition codes 7265 bl __aeabi_idiv @ r0<- op, r0-r3 changed 7266 GET_INST_OPCODE(ip) @ extract opcode from rINST 7267 SET_VREG(r0, r9) @ vAA<- r0 7268 GOTO_OPCODE(ip) @ jump to next instruction 7269 /* 10-12 instructions */ 7270 7271 7272 7273/* ------------------------------ */ 7274 .balign 64 7275.L_OP_REM_INT_LIT8: /* 0xdc */ 7276/* File: armv5te/OP_REM_INT_LIT8.S */ 7277/* idivmod returns quotient in r0 and remainder in r1 */ 7278/* File: armv5te/binopLit8.S */ 7279 /* 7280 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7281 * that specifies an instruction that performs "result = r0 op r1". 7282 * This could be an ARM instruction or a function call. (If the result 7283 * comes back in a register other than r0, you can override "result".) 7284 * 7285 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7286 * vCC (r1). Useful for integer division and modulus. 7287 * 7288 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7289 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7290 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7291 */ 7292 /* binop/lit8 vAA, vBB, #+CC */ 7293 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7294 mov r9, rINST, lsr #8 @ r9<- AA 7295 and r2, r3, #255 @ r2<- BB 7296 GET_VREG(r0, r2) @ r0<- vBB 7297 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7298 .if 1 7299 @cmp r1, #0 @ is second operand zero? 7300 beq common_errDivideByZero 7301 .endif 7302 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7303 7304 @ optional op; may set condition codes 7305 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7306 GET_INST_OPCODE(ip) @ extract opcode from rINST 7307 SET_VREG(r1, r9) @ vAA<- r1 7308 GOTO_OPCODE(ip) @ jump to next instruction 7309 /* 10-12 instructions */ 7310 7311 7312 7313/* ------------------------------ */ 7314 .balign 64 7315.L_OP_AND_INT_LIT8: /* 0xdd */ 7316/* File: armv5te/OP_AND_INT_LIT8.S */ 7317/* File: armv5te/binopLit8.S */ 7318 /* 7319 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7320 * that specifies an instruction that performs "result = r0 op r1". 7321 * This could be an ARM instruction or a function call. (If the result 7322 * comes back in a register other than r0, you can override "result".) 7323 * 7324 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7325 * vCC (r1). Useful for integer division and modulus. 7326 * 7327 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7328 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7329 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7330 */ 7331 /* binop/lit8 vAA, vBB, #+CC */ 7332 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7333 mov r9, rINST, lsr #8 @ r9<- AA 7334 and r2, r3, #255 @ r2<- BB 7335 GET_VREG(r0, r2) @ r0<- vBB 7336 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7337 .if 0 7338 @cmp r1, #0 @ is second operand zero? 7339 beq common_errDivideByZero 7340 .endif 7341 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7342 7343 @ optional op; may set condition codes 7344 and r0, r0, r1 @ r0<- op, r0-r3 changed 7345 GET_INST_OPCODE(ip) @ extract opcode from rINST 7346 SET_VREG(r0, r9) @ vAA<- r0 7347 GOTO_OPCODE(ip) @ jump to next instruction 7348 /* 10-12 instructions */ 7349 7350 7351 7352/* ------------------------------ */ 7353 .balign 64 7354.L_OP_OR_INT_LIT8: /* 0xde */ 7355/* File: armv5te/OP_OR_INT_LIT8.S */ 7356/* File: armv5te/binopLit8.S */ 7357 /* 7358 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7359 * that specifies an instruction that performs "result = r0 op r1". 7360 * This could be an ARM instruction or a function call. (If the result 7361 * comes back in a register other than r0, you can override "result".) 7362 * 7363 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7364 * vCC (r1). Useful for integer division and modulus. 7365 * 7366 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7367 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7368 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7369 */ 7370 /* binop/lit8 vAA, vBB, #+CC */ 7371 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7372 mov r9, rINST, lsr #8 @ r9<- AA 7373 and r2, r3, #255 @ r2<- BB 7374 GET_VREG(r0, r2) @ r0<- vBB 7375 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7376 .if 0 7377 @cmp r1, #0 @ is second operand zero? 7378 beq common_errDivideByZero 7379 .endif 7380 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7381 7382 @ optional op; may set condition codes 7383 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7384 GET_INST_OPCODE(ip) @ extract opcode from rINST 7385 SET_VREG(r0, r9) @ vAA<- r0 7386 GOTO_OPCODE(ip) @ jump to next instruction 7387 /* 10-12 instructions */ 7388 7389 7390 7391/* ------------------------------ */ 7392 .balign 64 7393.L_OP_XOR_INT_LIT8: /* 0xdf */ 7394/* File: armv5te/OP_XOR_INT_LIT8.S */ 7395/* File: armv5te/binopLit8.S */ 7396 /* 7397 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7398 * that specifies an instruction that performs "result = r0 op r1". 7399 * This could be an ARM instruction or a function call. (If the result 7400 * comes back in a register other than r0, you can override "result".) 7401 * 7402 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7403 * vCC (r1). Useful for integer division and modulus. 7404 * 7405 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7406 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7407 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7408 */ 7409 /* binop/lit8 vAA, vBB, #+CC */ 7410 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7411 mov r9, rINST, lsr #8 @ r9<- AA 7412 and r2, r3, #255 @ r2<- BB 7413 GET_VREG(r0, r2) @ r0<- vBB 7414 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7415 .if 0 7416 @cmp r1, #0 @ is second operand zero? 7417 beq common_errDivideByZero 7418 .endif 7419 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7420 7421 @ optional op; may set condition codes 7422 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7423 GET_INST_OPCODE(ip) @ extract opcode from rINST 7424 SET_VREG(r0, r9) @ vAA<- r0 7425 GOTO_OPCODE(ip) @ jump to next instruction 7426 /* 10-12 instructions */ 7427 7428 7429 7430/* ------------------------------ */ 7431 .balign 64 7432.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7433/* File: armv5te/OP_SHL_INT_LIT8.S */ 7434/* File: armv5te/binopLit8.S */ 7435 /* 7436 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7437 * that specifies an instruction that performs "result = r0 op r1". 7438 * This could be an ARM instruction or a function call. (If the result 7439 * comes back in a register other than r0, you can override "result".) 7440 * 7441 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7442 * vCC (r1). Useful for integer division and modulus. 7443 * 7444 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7445 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7446 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7447 */ 7448 /* binop/lit8 vAA, vBB, #+CC */ 7449 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7450 mov r9, rINST, lsr #8 @ r9<- AA 7451 and r2, r3, #255 @ r2<- BB 7452 GET_VREG(r0, r2) @ r0<- vBB 7453 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7454 .if 0 7455 @cmp r1, #0 @ is second operand zero? 7456 beq common_errDivideByZero 7457 .endif 7458 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7459 7460 and r1, r1, #31 @ optional op; may set condition codes 7461 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7462 GET_INST_OPCODE(ip) @ extract opcode from rINST 7463 SET_VREG(r0, r9) @ vAA<- r0 7464 GOTO_OPCODE(ip) @ jump to next instruction 7465 /* 10-12 instructions */ 7466 7467 7468 7469/* ------------------------------ */ 7470 .balign 64 7471.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7472/* File: armv5te/OP_SHR_INT_LIT8.S */ 7473/* File: armv5te/binopLit8.S */ 7474 /* 7475 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7476 * that specifies an instruction that performs "result = r0 op r1". 7477 * This could be an ARM instruction or a function call. (If the result 7478 * comes back in a register other than r0, you can override "result".) 7479 * 7480 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7481 * vCC (r1). Useful for integer division and modulus. 7482 * 7483 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7484 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7485 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7486 */ 7487 /* binop/lit8 vAA, vBB, #+CC */ 7488 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7489 mov r9, rINST, lsr #8 @ r9<- AA 7490 and r2, r3, #255 @ r2<- BB 7491 GET_VREG(r0, r2) @ r0<- vBB 7492 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7493 .if 0 7494 @cmp r1, #0 @ is second operand zero? 7495 beq common_errDivideByZero 7496 .endif 7497 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7498 7499 and r1, r1, #31 @ optional op; may set condition codes 7500 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7501 GET_INST_OPCODE(ip) @ extract opcode from rINST 7502 SET_VREG(r0, r9) @ vAA<- r0 7503 GOTO_OPCODE(ip) @ jump to next instruction 7504 /* 10-12 instructions */ 7505 7506 7507 7508/* ------------------------------ */ 7509 .balign 64 7510.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7511/* File: armv5te/OP_USHR_INT_LIT8.S */ 7512/* File: armv5te/binopLit8.S */ 7513 /* 7514 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7515 * that specifies an instruction that performs "result = r0 op r1". 7516 * This could be an ARM instruction or a function call. (If the result 7517 * comes back in a register other than r0, you can override "result".) 7518 * 7519 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7520 * vCC (r1). Useful for integer division and modulus. 7521 * 7522 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7523 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7524 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7525 */ 7526 /* binop/lit8 vAA, vBB, #+CC */ 7527 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7528 mov r9, rINST, lsr #8 @ r9<- AA 7529 and r2, r3, #255 @ r2<- BB 7530 GET_VREG(r0, r2) @ r0<- vBB 7531 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7532 .if 0 7533 @cmp r1, #0 @ is second operand zero? 7534 beq common_errDivideByZero 7535 .endif 7536 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7537 7538 and r1, r1, #31 @ optional op; may set condition codes 7539 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7540 GET_INST_OPCODE(ip) @ extract opcode from rINST 7541 SET_VREG(r0, r9) @ vAA<- r0 7542 GOTO_OPCODE(ip) @ jump to next instruction 7543 /* 10-12 instructions */ 7544 7545 7546 7547/* ------------------------------ */ 7548 .balign 64 7549.L_OP_UNUSED_E3: /* 0xe3 */ 7550/* File: armv5te/OP_UNUSED_E3.S */ 7551/* File: armv5te/unused.S */ 7552 bl common_abort 7553 7554 7555 7556/* ------------------------------ */ 7557 .balign 64 7558.L_OP_UNUSED_E4: /* 0xe4 */ 7559/* File: armv5te/OP_UNUSED_E4.S */ 7560/* File: armv5te/unused.S */ 7561 bl common_abort 7562 7563 7564 7565/* ------------------------------ */ 7566 .balign 64 7567.L_OP_UNUSED_E5: /* 0xe5 */ 7568/* File: armv5te/OP_UNUSED_E5.S */ 7569/* File: armv5te/unused.S */ 7570 bl common_abort 7571 7572 7573 7574/* ------------------------------ */ 7575 .balign 64 7576.L_OP_UNUSED_E6: /* 0xe6 */ 7577/* File: armv5te/OP_UNUSED_E6.S */ 7578/* File: armv5te/unused.S */ 7579 bl common_abort 7580 7581 7582 7583/* ------------------------------ */ 7584 .balign 64 7585.L_OP_UNUSED_E7: /* 0xe7 */ 7586/* File: armv5te/OP_UNUSED_E7.S */ 7587/* File: armv5te/unused.S */ 7588 bl common_abort 7589 7590 7591 7592/* ------------------------------ */ 7593 .balign 64 7594.L_OP_UNUSED_E8: /* 0xe8 */ 7595/* File: armv5te/OP_UNUSED_E8.S */ 7596/* File: armv5te/unused.S */ 7597 bl common_abort 7598 7599 7600 7601/* ------------------------------ */ 7602 .balign 64 7603.L_OP_UNUSED_E9: /* 0xe9 */ 7604/* File: armv5te/OP_UNUSED_E9.S */ 7605/* File: armv5te/unused.S */ 7606 bl common_abort 7607 7608 7609 7610/* ------------------------------ */ 7611 .balign 64 7612.L_OP_UNUSED_EA: /* 0xea */ 7613/* File: armv5te/OP_UNUSED_EA.S */ 7614/* File: armv5te/unused.S */ 7615 bl common_abort 7616 7617 7618 7619/* ------------------------------ */ 7620 .balign 64 7621.L_OP_UNUSED_EB: /* 0xeb */ 7622/* File: armv5te/OP_UNUSED_EB.S */ 7623/* File: armv5te/unused.S */ 7624 bl common_abort 7625 7626 7627 7628/* ------------------------------ */ 7629 .balign 64 7630.L_OP_UNUSED_EC: /* 0xec */ 7631/* File: armv5te/OP_UNUSED_EC.S */ 7632/* File: armv5te/unused.S */ 7633 bl common_abort 7634 7635 7636 7637/* ------------------------------ */ 7638 .balign 64 7639.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7640/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7641 /* 7642 * Handle a throw-verification-error instruction. This throws an 7643 * exception for an error discovered during verification. The 7644 * exception is indicated by AA, with some detail provided by BBBB. 7645 */ 7646 /* op AA, ref@BBBB */ 7647 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7648 FETCH(r2, 1) @ r2<- BBBB 7649 EXPORT_PC() @ export the PC 7650 mov r1, rINST, lsr #8 @ r1<- AA 7651 bl dvmThrowVerificationError @ always throws 7652 b common_exceptionThrown @ handle exception 7653 7654 7655/* ------------------------------ */ 7656 .balign 64 7657.L_OP_EXECUTE_INLINE: /* 0xee */ 7658/* File: armv5te/OP_EXECUTE_INLINE.S */ 7659 /* 7660 * Execute a "native inline" instruction. 7661 * 7662 * We need to call: 7663 * dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref) 7664 * 7665 * The first four args are in r0-r3, but the last two must be pushed 7666 * onto the stack. 7667 */ 7668 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7669 FETCH(r10, 1) @ r10<- BBBB 7670 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7671 EXPORT_PC() @ can throw 7672 sub sp, sp, #8 @ make room for arg(s) 7673 mov r0, rINST, lsr #12 @ r0<- B 7674 str r1, [sp] @ push &glue->retval 7675 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7676 add sp, sp, #8 @ pop stack 7677 cmp r0, #0 @ test boolean result of inline 7678 beq common_exceptionThrown @ returned false, handle exception 7679 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7680 GET_INST_OPCODE(ip) @ extract opcode from rINST 7681 GOTO_OPCODE(ip) @ jump to next instruction 7682 7683/* ------------------------------ */ 7684 .balign 64 7685.L_OP_UNUSED_EF: /* 0xef */ 7686/* File: armv5te/OP_UNUSED_EF.S */ 7687/* File: armv5te/unused.S */ 7688 bl common_abort 7689 7690 7691 7692/* ------------------------------ */ 7693 .balign 64 7694.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7695/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7696 /* 7697 * invoke-direct-empty is a no-op in a "standard" interpreter. 7698 */ 7699 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7700 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7701 GOTO_OPCODE(ip) @ execute it 7702 7703/* ------------------------------ */ 7704 .balign 64 7705.L_OP_UNUSED_F1: /* 0xf1 */ 7706/* File: armv5te/OP_UNUSED_F1.S */ 7707/* File: armv5te/unused.S */ 7708 bl common_abort 7709 7710 7711 7712/* ------------------------------ */ 7713 .balign 64 7714.L_OP_IGET_QUICK: /* 0xf2 */ 7715/* File: armv5te/OP_IGET_QUICK.S */ 7716 /* For: iget-quick, iget-object-quick */ 7717 /* op vA, vB, offset@CCCC */ 7718 mov r2, rINST, lsr #12 @ r2<- B 7719 GET_VREG(r3, r2) @ r3<- object we're operating on 7720 FETCH(r1, 1) @ r1<- field byte offset 7721 cmp r3, #0 @ check object for null 7722 mov r2, rINST, lsr #8 @ r2<- A(+) 7723 beq common_errNullObject @ object was null 7724 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7725 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7726 and r2, r2, #15 7727 GET_INST_OPCODE(ip) @ extract opcode from rINST 7728 SET_VREG(r0, r2) @ fp[A]<- r0 7729 GOTO_OPCODE(ip) @ jump to next instruction 7730 7731 7732/* ------------------------------ */ 7733 .balign 64 7734.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7735/* File: armv4t/OP_IGET_WIDE_QUICK.S */ 7736 /* iget-wide-quick vA, vB, offset@CCCC */ 7737 mov r2, rINST, lsr #12 @ r2<- B 7738 GET_VREG(r3, r2) @ r3<- object we're operating on 7739 FETCH(r1, 1) @ r1<- field byte offset 7740 cmp r3, #0 @ check object for null 7741 mov r2, rINST, lsr #8 @ r2<- A(+) 7742 beq common_errNullObject @ object was null 7743 add r9, r3, r1 @ r9<- object + offset 7744 ldmia r9, {r0-r1} @ r0/r1<- obj.field (64 bits, aligned) 7745 and r2, r2, #15 @ r2<- A 7746 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7747 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7748 GET_INST_OPCODE(ip) @ extract opcode from rINST 7749 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7750 GOTO_OPCODE(ip) @ jump to next instruction 7751 7752 7753/* ------------------------------ */ 7754 .balign 64 7755.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7756/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7757/* File: armv5te/OP_IGET_QUICK.S */ 7758 /* For: iget-quick, iget-object-quick */ 7759 /* op vA, vB, offset@CCCC */ 7760 mov r2, rINST, lsr #12 @ r2<- B 7761 GET_VREG(r3, r2) @ r3<- object we're operating on 7762 FETCH(r1, 1) @ r1<- field byte offset 7763 cmp r3, #0 @ check object for null 7764 mov r2, rINST, lsr #8 @ r2<- A(+) 7765 beq common_errNullObject @ object was null 7766 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7767 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7768 and r2, r2, #15 7769 GET_INST_OPCODE(ip) @ extract opcode from rINST 7770 SET_VREG(r0, r2) @ fp[A]<- r0 7771 GOTO_OPCODE(ip) @ jump to next instruction 7772 7773 7774 7775/* ------------------------------ */ 7776 .balign 64 7777.L_OP_IPUT_QUICK: /* 0xf5 */ 7778/* File: armv5te/OP_IPUT_QUICK.S */ 7779 /* For: iput-quick, iput-object-quick */ 7780 /* op vA, vB, offset@CCCC */ 7781 mov r2, rINST, lsr #12 @ r2<- B 7782 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7783 FETCH(r1, 1) @ r1<- field byte offset 7784 cmp r3, #0 @ check object for null 7785 mov r2, rINST, lsr #8 @ r2<- A(+) 7786 beq common_errNullObject @ object was null 7787 and r2, r2, #15 7788 GET_VREG(r0, r2) @ r0<- fp[A] 7789 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7790 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7791 GET_INST_OPCODE(ip) @ extract opcode from rINST 7792 GOTO_OPCODE(ip) @ jump to next instruction 7793 7794 7795/* ------------------------------ */ 7796 .balign 64 7797.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7798/* File: armv4t/OP_IPUT_WIDE_QUICK.S */ 7799 /* iput-wide-quick vA, vB, offset@CCCC */ 7800 mov r0, rINST, lsr #8 @ r0<- A(+) 7801 mov r1, rINST, lsr #12 @ r1<- B 7802 and r0, r0, #15 7803 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7804 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7805 cmp r2, #0 @ check object for null 7806 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7807 beq common_errNullObject @ object was null 7808 FETCH(r3, 1) @ r3<- field byte offset 7809 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7810 add r2, r2, r3 @ r2<- object + byte offset 7811 stmia r2, {r0-r1} @ obj.field (64 bits, aligned)<- r0/r1 7812 GET_INST_OPCODE(ip) @ extract opcode from rINST 7813 GOTO_OPCODE(ip) @ jump to next instruction 7814 7815 7816/* ------------------------------ */ 7817 .balign 64 7818.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7819/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7820/* File: armv5te/OP_IPUT_QUICK.S */ 7821 /* For: iput-quick, iput-object-quick */ 7822 /* op vA, vB, offset@CCCC */ 7823 mov r2, rINST, lsr #12 @ r2<- B 7824 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7825 FETCH(r1, 1) @ r1<- field byte offset 7826 cmp r3, #0 @ check object for null 7827 mov r2, rINST, lsr #8 @ r2<- A(+) 7828 beq common_errNullObject @ object was null 7829 and r2, r2, #15 7830 GET_VREG(r0, r2) @ r0<- fp[A] 7831 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7832 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7833 GET_INST_OPCODE(ip) @ extract opcode from rINST 7834 GOTO_OPCODE(ip) @ jump to next instruction 7835 7836 7837 7838/* ------------------------------ */ 7839 .balign 64 7840.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7841/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7842 /* 7843 * Handle an optimized virtual method call. 7844 * 7845 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7846 */ 7847 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7848 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7849 FETCH(r3, 2) @ r3<- FEDC or CCCC 7850 FETCH(r1, 1) @ r1<- BBBB 7851 .if (!0) 7852 and r3, r3, #15 @ r3<- C (or stays CCCC) 7853 .endif 7854 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7855 cmp r2, #0 @ is "this" null? 7856 beq common_errNullObject @ null "this", throw exception 7857 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7858 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7859 EXPORT_PC() @ invoke must export 7860 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7861 bl common_invokeMethodNoRange @ continue on 7862 7863/* ------------------------------ */ 7864 .balign 64 7865.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7866/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7867/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7868 /* 7869 * Handle an optimized virtual method call. 7870 * 7871 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7872 */ 7873 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7874 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7875 FETCH(r3, 2) @ r3<- FEDC or CCCC 7876 FETCH(r1, 1) @ r1<- BBBB 7877 .if (!1) 7878 and r3, r3, #15 @ r3<- C (or stays CCCC) 7879 .endif 7880 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7881 cmp r2, #0 @ is "this" null? 7882 beq common_errNullObject @ null "this", throw exception 7883 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7884 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7885 EXPORT_PC() @ invoke must export 7886 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7887 bl common_invokeMethodRange @ continue on 7888 7889 7890/* ------------------------------ */ 7891 .balign 64 7892.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7893/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7894 /* 7895 * Handle an optimized "super" method call. 7896 * 7897 * for: [opt] invoke-super-quick, invoke-super-quick/range 7898 */ 7899 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7900 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7901 FETCH(r10, 2) @ r10<- GFED or CCCC 7902 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7903 .if (!0) 7904 and r10, r10, #15 @ r10<- D (or stays CCCC) 7905 .endif 7906 FETCH(r1, 1) @ r1<- BBBB 7907 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7908 EXPORT_PC() @ must export for invoke 7909 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7910 GET_VREG(r3, r10) @ r3<- "this" 7911 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7912 cmp r3, #0 @ null "this" ref? 7913 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7914 beq common_errNullObject @ "this" is null, throw exception 7915 bl common_invokeMethodNoRange @ continue on 7916 7917 7918/* ------------------------------ */ 7919 .balign 64 7920.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7921/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7922/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7923 /* 7924 * Handle an optimized "super" method call. 7925 * 7926 * for: [opt] invoke-super-quick, invoke-super-quick/range 7927 */ 7928 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7929 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7930 FETCH(r10, 2) @ r10<- GFED or CCCC 7931 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7932 .if (!1) 7933 and r10, r10, #15 @ r10<- D (or stays CCCC) 7934 .endif 7935 FETCH(r1, 1) @ r1<- BBBB 7936 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7937 EXPORT_PC() @ must export for invoke 7938 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7939 GET_VREG(r3, r10) @ r3<- "this" 7940 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7941 cmp r3, #0 @ null "this" ref? 7942 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7943 beq common_errNullObject @ "this" is null, throw exception 7944 bl common_invokeMethodRange @ continue on 7945 7946 7947 7948/* ------------------------------ */ 7949 .balign 64 7950.L_OP_UNUSED_FC: /* 0xfc */ 7951/* File: armv5te/OP_UNUSED_FC.S */ 7952/* File: armv5te/unused.S */ 7953 bl common_abort 7954 7955 7956 7957/* ------------------------------ */ 7958 .balign 64 7959.L_OP_UNUSED_FD: /* 0xfd */ 7960/* File: armv5te/OP_UNUSED_FD.S */ 7961/* File: armv5te/unused.S */ 7962 bl common_abort 7963 7964 7965 7966/* ------------------------------ */ 7967 .balign 64 7968.L_OP_UNUSED_FE: /* 0xfe */ 7969/* File: armv5te/OP_UNUSED_FE.S */ 7970/* File: armv5te/unused.S */ 7971 bl common_abort 7972 7973 7974 7975/* ------------------------------ */ 7976 .balign 64 7977.L_OP_UNUSED_FF: /* 0xff */ 7978/* File: armv5te/OP_UNUSED_FF.S */ 7979/* File: armv5te/unused.S */ 7980 bl common_abort 7981 7982 7983 7984 7985 .balign 64 7986 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7987 .global dvmAsmInstructionEnd 7988dvmAsmInstructionEnd: 7989 7990/* 7991 * =========================================================================== 7992 * Sister implementations 7993 * =========================================================================== 7994 */ 7995 .global dvmAsmSisterStart 7996 .type dvmAsmSisterStart, %function 7997 .text 7998 .balign 4 7999dvmAsmSisterStart: 8000 8001/* continuation for OP_CONST_STRING */ 8002 8003 /* 8004 * Continuation if the String has not yet been resolved. 8005 * r1: BBBB (String ref) 8006 * r9: target register 8007 */ 8008.LOP_CONST_STRING_resolve: 8009 EXPORT_PC() 8010 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8011 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8012 bl dvmResolveString @ r0<- String reference 8013 cmp r0, #0 @ failed? 8014 beq common_exceptionThrown @ yup, handle the exception 8015 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8016 GET_INST_OPCODE(ip) @ extract opcode from rINST 8017 SET_VREG(r0, r9) @ vAA<- r0 8018 GOTO_OPCODE(ip) @ jump to next instruction 8019 8020 8021/* continuation for OP_CONST_STRING_JUMBO */ 8022 8023 /* 8024 * Continuation if the String has not yet been resolved. 8025 * r1: BBBBBBBB (String ref) 8026 * r9: target register 8027 */ 8028.LOP_CONST_STRING_JUMBO_resolve: 8029 EXPORT_PC() 8030 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8031 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8032 bl dvmResolveString @ r0<- String reference 8033 cmp r0, #0 @ failed? 8034 beq common_exceptionThrown @ yup, handle the exception 8035 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 8036 GET_INST_OPCODE(ip) @ extract opcode from rINST 8037 SET_VREG(r0, r9) @ vAA<- r0 8038 GOTO_OPCODE(ip) @ jump to next instruction 8039 8040 8041/* continuation for OP_CONST_CLASS */ 8042 8043 /* 8044 * Continuation if the Class has not yet been resolved. 8045 * r1: BBBB (Class ref) 8046 * r9: target register 8047 */ 8048.LOP_CONST_CLASS_resolve: 8049 EXPORT_PC() 8050 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8051 mov r2, #1 @ r2<- true 8052 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8053 bl dvmResolveClass @ r0<- Class reference 8054 cmp r0, #0 @ failed? 8055 beq common_exceptionThrown @ yup, handle the exception 8056 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8057 GET_INST_OPCODE(ip) @ extract opcode from rINST 8058 SET_VREG(r0, r9) @ vAA<- r0 8059 GOTO_OPCODE(ip) @ jump to next instruction 8060 8061 8062/* continuation for OP_CHECK_CAST */ 8063 8064 /* 8065 * Trivial test failed, need to perform full check. This is common. 8066 * r0 holds obj->clazz 8067 * r1 holds class resolved from BBBB 8068 * r9 holds object 8069 */ 8070.LOP_CHECK_CAST_fullcheck: 8071 bl dvmInstanceofNonTrivial @ r0<- boolean result 8072 cmp r0, #0 @ failed? 8073 bne .LOP_CHECK_CAST_okay @ no, success 8074 8075 @ A cast has failed. We need to throw a ClassCastException with the 8076 @ class of the object that failed to be cast. 8077 EXPORT_PC() @ about to throw 8078 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 8079 ldr r0, .LstrClassCastExceptionPtr 8080 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 8081 bl dvmThrowExceptionWithClassMessage 8082 b common_exceptionThrown 8083 8084 /* 8085 * Resolution required. This is the least-likely path. 8086 * 8087 * r2 holds BBBB 8088 * r9 holds object 8089 */ 8090.LOP_CHECK_CAST_resolve: 8091 EXPORT_PC() @ resolve() could throw 8092 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8093 mov r1, r2 @ r1<- BBBB 8094 mov r2, #0 @ r2<- false 8095 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8096 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8097 cmp r0, #0 @ got null? 8098 beq common_exceptionThrown @ yes, handle exception 8099 mov r1, r0 @ r1<- class resolved from BBB 8100 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8101 b .LOP_CHECK_CAST_resolved @ pick up where we left off 8102 8103.LstrClassCastExceptionPtr: 8104 .word .LstrClassCastException 8105 8106 8107/* continuation for OP_INSTANCE_OF */ 8108 8109 /* 8110 * Trivial test failed, need to perform full check. This is common. 8111 * r0 holds obj->clazz 8112 * r1 holds class resolved from BBBB 8113 * r9 holds A 8114 */ 8115.LOP_INSTANCE_OF_fullcheck: 8116 bl dvmInstanceofNonTrivial @ r0<- boolean result 8117 @ fall through to OP_INSTANCE_OF_store 8118 8119 /* 8120 * r0 holds boolean result 8121 * r9 holds A 8122 */ 8123.LOP_INSTANCE_OF_store: 8124 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8125 SET_VREG(r0, r9) @ vA<- r0 8126 GET_INST_OPCODE(ip) @ extract opcode from rINST 8127 GOTO_OPCODE(ip) @ jump to next instruction 8128 8129 /* 8130 * Trivial test succeeded, save and bail. 8131 * r9 holds A 8132 */ 8133.LOP_INSTANCE_OF_trivial: 8134 mov r0, #1 @ indicate success 8135 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 8136 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8137 SET_VREG(r0, r9) @ vA<- r0 8138 GET_INST_OPCODE(ip) @ extract opcode from rINST 8139 GOTO_OPCODE(ip) @ jump to next instruction 8140 8141 /* 8142 * Resolution required. This is the least-likely path. 8143 * 8144 * r3 holds BBBB 8145 * r9 holds A 8146 */ 8147.LOP_INSTANCE_OF_resolve: 8148 EXPORT_PC() @ resolve() could throw 8149 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8150 mov r1, r3 @ r1<- BBBB 8151 mov r2, #1 @ r2<- true 8152 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8153 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8154 cmp r0, #0 @ got null? 8155 beq common_exceptionThrown @ yes, handle exception 8156 mov r1, r0 @ r1<- class resolved from BBB 8157 mov r3, rINST, lsr #12 @ r3<- B 8158 GET_VREG(r0, r3) @ r0<- vB (object) 8159 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 8160 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 8161 8162 8163/* continuation for OP_NEW_INSTANCE */ 8164 8165 .balign 32 @ minimize cache lines 8166.LOP_NEW_INSTANCE_finish: @ r0=new object 8167 mov r3, rINST, lsr #8 @ r3<- AA 8168 cmp r0, #0 @ failed? 8169 beq common_exceptionThrown @ yes, handle the exception 8170 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8171 GET_INST_OPCODE(ip) @ extract opcode from rINST 8172 SET_VREG(r0, r3) @ vAA<- r0 8173 GOTO_OPCODE(ip) @ jump to next instruction 8174 8175 /* 8176 * Class initialization required. 8177 * 8178 * r0 holds class object 8179 */ 8180.LOP_NEW_INSTANCE_needinit: 8181 mov r9, r0 @ save r0 8182 bl dvmInitClass @ initialize class 8183 cmp r0, #0 @ check boolean result 8184 mov r0, r9 @ restore r0 8185 bne .LOP_NEW_INSTANCE_initialized @ success, continue 8186 b common_exceptionThrown @ failed, deal with init exception 8187 8188 /* 8189 * Resolution required. This is the least-likely path. 8190 * 8191 * r1 holds BBBB 8192 */ 8193.LOP_NEW_INSTANCE_resolve: 8194 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8195 mov r2, #0 @ r2<- false 8196 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8197 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8198 cmp r0, #0 @ got null? 8199 bne .LOP_NEW_INSTANCE_resolved @ no, continue 8200 b common_exceptionThrown @ yes, handle exception 8201 8202.LstrInstantiationErrorPtr: 8203 .word .LstrInstantiationError 8204 8205 8206/* continuation for OP_NEW_ARRAY */ 8207 8208 8209 /* 8210 * Resolve class. (This is an uncommon case.) 8211 * 8212 * r1 holds array length 8213 * r2 holds class ref CCCC 8214 */ 8215.LOP_NEW_ARRAY_resolve: 8216 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8217 mov r9, r1 @ r9<- length (save) 8218 mov r1, r2 @ r1<- CCCC 8219 mov r2, #0 @ r2<- false 8220 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8221 bl dvmResolveClass @ r0<- call(clazz, ref) 8222 cmp r0, #0 @ got null? 8223 mov r1, r9 @ r1<- length (restore) 8224 beq common_exceptionThrown @ yes, handle exception 8225 @ fall through to OP_NEW_ARRAY_finish 8226 8227 /* 8228 * Finish allocation. 8229 * 8230 * r0 holds class 8231 * r1 holds array length 8232 */ 8233.LOP_NEW_ARRAY_finish: 8234 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8235 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8236 cmp r0, #0 @ failed? 8237 mov r2, rINST, lsr #8 @ r2<- A+ 8238 beq common_exceptionThrown @ yes, handle the exception 8239 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8240 and r2, r2, #15 @ r2<- A 8241 GET_INST_OPCODE(ip) @ extract opcode from rINST 8242 SET_VREG(r0, r2) @ vA<- r0 8243 GOTO_OPCODE(ip) @ jump to next instruction 8244 8245 8246/* continuation for OP_FILLED_NEW_ARRAY */ 8247 8248 /* 8249 * On entry: 8250 * r0 holds array class 8251 * r10 holds AA or BA 8252 */ 8253.LOP_FILLED_NEW_ARRAY_continue: 8254 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8255 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8256 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8257 .if 0 8258 mov r1, r10 @ r1<- AA (length) 8259 .else 8260 mov r1, r10, lsr #4 @ r1<- B (length) 8261 .endif 8262 cmp r3, #'I' @ array of ints? 8263 cmpne r3, #'L' @ array of objects? 8264 cmpne r3, #'[' @ array of arrays? 8265 mov r9, r1 @ save length in r9 8266 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8267 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8268 cmp r0, #0 @ null return? 8269 beq common_exceptionThrown @ alloc failed, handle exception 8270 8271 FETCH(r1, 2) @ r1<- FEDC or CCCC 8272 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8273 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8274 subs r9, r9, #1 @ length--, check for neg 8275 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8276 bmi 2f @ was zero, bail 8277 8278 @ copy values from registers into the array 8279 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8280 .if 0 8281 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 82821: ldr r3, [r2], #4 @ r3<- *r2++ 8283 subs r9, r9, #1 @ count-- 8284 str r3, [r0], #4 @ *contents++ = vX 8285 bpl 1b 8286 @ continue at 2 8287 .else 8288 cmp r9, #4 @ length was initially 5? 8289 and r2, r10, #15 @ r2<- A 8290 bne 1f @ <= 4 args, branch 8291 GET_VREG(r3, r2) @ r3<- vA 8292 sub r9, r9, #1 @ count-- 8293 str r3, [r0, #16] @ contents[4] = vA 82941: and r2, r1, #15 @ r2<- F/E/D/C 8295 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8296 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8297 subs r9, r9, #1 @ count-- 8298 str r3, [r0], #4 @ *contents++ = vX 8299 bpl 1b 8300 @ continue at 2 8301 .endif 8302 83032: 8304 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8305 GOTO_OPCODE(ip) @ execute it 8306 8307 /* 8308 * Throw an exception indicating that we have not implemented this 8309 * mode of filled-new-array. 8310 */ 8311.LOP_FILLED_NEW_ARRAY_notimpl: 8312 ldr r0, .L_strInternalError 8313 ldr r1, .L_strFilledNewArrayNotImpl 8314 bl dvmThrowException 8315 b common_exceptionThrown 8316 8317 .if (!0) @ define in one or the other, not both 8318.L_strFilledNewArrayNotImpl: 8319 .word .LstrFilledNewArrayNotImpl 8320.L_strInternalError: 8321 .word .LstrInternalError 8322 .endif 8323 8324 8325/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8326 8327 /* 8328 * On entry: 8329 * r0 holds array class 8330 * r10 holds AA or BA 8331 */ 8332.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8333 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8334 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8335 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8336 .if 1 8337 mov r1, r10 @ r1<- AA (length) 8338 .else 8339 mov r1, r10, lsr #4 @ r1<- B (length) 8340 .endif 8341 cmp r3, #'I' @ array of ints? 8342 cmpne r3, #'L' @ array of objects? 8343 cmpne r3, #'[' @ array of arrays? 8344 mov r9, r1 @ save length in r9 8345 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8346 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8347 cmp r0, #0 @ null return? 8348 beq common_exceptionThrown @ alloc failed, handle exception 8349 8350 FETCH(r1, 2) @ r1<- FEDC or CCCC 8351 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8352 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8353 subs r9, r9, #1 @ length--, check for neg 8354 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8355 bmi 2f @ was zero, bail 8356 8357 @ copy values from registers into the array 8358 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8359 .if 1 8360 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 83611: ldr r3, [r2], #4 @ r3<- *r2++ 8362 subs r9, r9, #1 @ count-- 8363 str r3, [r0], #4 @ *contents++ = vX 8364 bpl 1b 8365 @ continue at 2 8366 .else 8367 cmp r9, #4 @ length was initially 5? 8368 and r2, r10, #15 @ r2<- A 8369 bne 1f @ <= 4 args, branch 8370 GET_VREG(r3, r2) @ r3<- vA 8371 sub r9, r9, #1 @ count-- 8372 str r3, [r0, #16] @ contents[4] = vA 83731: and r2, r1, #15 @ r2<- F/E/D/C 8374 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8375 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8376 subs r9, r9, #1 @ count-- 8377 str r3, [r0], #4 @ *contents++ = vX 8378 bpl 1b 8379 @ continue at 2 8380 .endif 8381 83822: 8383 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8384 GOTO_OPCODE(ip) @ execute it 8385 8386 /* 8387 * Throw an exception indicating that we have not implemented this 8388 * mode of filled-new-array. 8389 */ 8390.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8391 ldr r0, .L_strInternalError 8392 ldr r1, .L_strFilledNewArrayNotImpl 8393 bl dvmThrowException 8394 b common_exceptionThrown 8395 8396 .if (!1) @ define in one or the other, not both 8397.L_strFilledNewArrayNotImpl: 8398 .word .LstrFilledNewArrayNotImpl 8399.L_strInternalError: 8400 .word .LstrInternalError 8401 .endif 8402 8403 8404/* continuation for OP_CMPL_FLOAT */ 8405 8406 @ Test for NaN with a second comparison. EABI forbids testing bit 8407 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8408 @ make the library call. 8409.LOP_CMPL_FLOAT_gt_or_nan: 8410 mov r1, r9 @ reverse order 8411 mov r0, r10 8412 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8413 @bleq common_abort 8414 movcc r1, #1 @ (greater than) r1<- 1 8415 bcc .LOP_CMPL_FLOAT_finish 8416 mvn r1, #0 @ r1<- 1 or -1 for NaN 8417 b .LOP_CMPL_FLOAT_finish 8418 8419 8420#if 0 /* "clasic" form */ 8421 FETCH(r0, 1) @ r0<- CCBB 8422 and r2, r0, #255 @ r2<- BB 8423 mov r3, r0, lsr #8 @ r3<- CC 8424 GET_VREG(r9, r2) @ r9<- vBB 8425 GET_VREG(r10, r3) @ r10<- vCC 8426 mov r0, r9 @ r0<- vBB 8427 mov r1, r10 @ r1<- vCC 8428 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8429 cmp r0, #0 @ equal? 8430 movne r1, #0 @ yes, result is 0 8431 bne OP_CMPL_FLOAT_finish 8432 mov r0, r9 @ r0<- vBB 8433 mov r1, r10 @ r1<- vCC 8434 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8435 cmp r0, #0 @ less than? 8436 b OP_CMPL_FLOAT_continue 8437@%break 8438 8439OP_CMPL_FLOAT_continue: 8440 mvnne r1, #0 @ yes, result is -1 8441 bne OP_CMPL_FLOAT_finish 8442 mov r0, r9 @ r0<- vBB 8443 mov r1, r10 @ r1<- vCC 8444 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8445 cmp r0, #0 @ greater than? 8446 beq OP_CMPL_FLOAT_nan @ no, must be NaN 8447 mov r1, #1 @ yes, result is 1 8448 @ fall through to _finish 8449 8450OP_CMPL_FLOAT_finish: 8451 mov r3, rINST, lsr #8 @ r3<- AA 8452 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8453 SET_VREG(r1, r3) @ vAA<- r1 8454 GET_INST_OPCODE(ip) @ extract opcode from rINST 8455 GOTO_OPCODE(ip) @ jump to next instruction 8456 8457 /* 8458 * This is expected to be uncommon, so we double-branch (once to here, 8459 * again back to _finish). 8460 */ 8461OP_CMPL_FLOAT_nan: 8462 mvn r1, #0 @ r1<- 1 or -1 for NaN 8463 b OP_CMPL_FLOAT_finish 8464 8465#endif 8466 8467 8468/* continuation for OP_CMPG_FLOAT */ 8469 8470 @ Test for NaN with a second comparison. EABI forbids testing bit 8471 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8472 @ make the library call. 8473.LOP_CMPG_FLOAT_gt_or_nan: 8474 mov r1, r9 @ reverse order 8475 mov r0, r10 8476 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8477 @bleq common_abort 8478 movcc r1, #1 @ (greater than) r1<- 1 8479 bcc .LOP_CMPG_FLOAT_finish 8480 mov r1, #1 @ r1<- 1 or -1 for NaN 8481 b .LOP_CMPG_FLOAT_finish 8482 8483 8484#if 0 /* "clasic" form */ 8485 FETCH(r0, 1) @ r0<- CCBB 8486 and r2, r0, #255 @ r2<- BB 8487 mov r3, r0, lsr #8 @ r3<- CC 8488 GET_VREG(r9, r2) @ r9<- vBB 8489 GET_VREG(r10, r3) @ r10<- vCC 8490 mov r0, r9 @ r0<- vBB 8491 mov r1, r10 @ r1<- vCC 8492 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8493 cmp r0, #0 @ equal? 8494 movne r1, #0 @ yes, result is 0 8495 bne OP_CMPG_FLOAT_finish 8496 mov r0, r9 @ r0<- vBB 8497 mov r1, r10 @ r1<- vCC 8498 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8499 cmp r0, #0 @ less than? 8500 b OP_CMPG_FLOAT_continue 8501@%break 8502 8503OP_CMPG_FLOAT_continue: 8504 mvnne r1, #0 @ yes, result is -1 8505 bne OP_CMPG_FLOAT_finish 8506 mov r0, r9 @ r0<- vBB 8507 mov r1, r10 @ r1<- vCC 8508 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8509 cmp r0, #0 @ greater than? 8510 beq OP_CMPG_FLOAT_nan @ no, must be NaN 8511 mov r1, #1 @ yes, result is 1 8512 @ fall through to _finish 8513 8514OP_CMPG_FLOAT_finish: 8515 mov r3, rINST, lsr #8 @ r3<- AA 8516 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8517 SET_VREG(r1, r3) @ vAA<- r1 8518 GET_INST_OPCODE(ip) @ extract opcode from rINST 8519 GOTO_OPCODE(ip) @ jump to next instruction 8520 8521 /* 8522 * This is expected to be uncommon, so we double-branch (once to here, 8523 * again back to _finish). 8524 */ 8525OP_CMPG_FLOAT_nan: 8526 mov r1, #1 @ r1<- 1 or -1 for NaN 8527 b OP_CMPG_FLOAT_finish 8528 8529#endif 8530 8531 8532/* continuation for OP_CMPL_DOUBLE */ 8533 8534 @ Test for NaN with a second comparison. EABI forbids testing bit 8535 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8536 @ make the library call. 8537.LOP_CMPL_DOUBLE_gt_or_nan: 8538 ldmia r10, {r0-r1} @ reverse order 8539 ldmia r9, {r2-r3} 8540 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8541 @bleq common_abort 8542 movcc r1, #1 @ (greater than) r1<- 1 8543 bcc .LOP_CMPL_DOUBLE_finish 8544 mvn r1, #0 @ r1<- 1 or -1 for NaN 8545 b .LOP_CMPL_DOUBLE_finish 8546 8547 8548/* continuation for OP_CMPG_DOUBLE */ 8549 8550 @ Test for NaN with a second comparison. EABI forbids testing bit 8551 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8552 @ make the library call. 8553.LOP_CMPG_DOUBLE_gt_or_nan: 8554 ldmia r10, {r0-r1} @ reverse order 8555 ldmia r9, {r2-r3} 8556 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8557 @bleq common_abort 8558 movcc r1, #1 @ (greater than) r1<- 1 8559 bcc .LOP_CMPG_DOUBLE_finish 8560 mov r1, #1 @ r1<- 1 or -1 for NaN 8561 b .LOP_CMPG_DOUBLE_finish 8562 8563 8564/* continuation for OP_CMP_LONG */ 8565 8566.LOP_CMP_LONG_less: 8567 mvn r1, #0 @ r1<- -1 8568 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8569 @ instead, we just replicate the tail end. 8570 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8571 SET_VREG(r1, r9) @ vAA<- r1 8572 GET_INST_OPCODE(ip) @ extract opcode from rINST 8573 GOTO_OPCODE(ip) @ jump to next instruction 8574 8575.LOP_CMP_LONG_greater: 8576 mov r1, #1 @ r1<- 1 8577 @ fall through to _finish 8578 8579.LOP_CMP_LONG_finish: 8580 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8581 SET_VREG(r1, r9) @ vAA<- r1 8582 GET_INST_OPCODE(ip) @ extract opcode from rINST 8583 GOTO_OPCODE(ip) @ jump to next instruction 8584 8585 8586/* continuation for OP_AGET_WIDE */ 8587 8588.LOP_AGET_WIDE_finish: 8589 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8590 add r0, r0, #offArrayObject_contents 8591 ldmia r0, {r2-r3} @ r2/r3 <- vBB[vCC] 8592 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8593 GET_INST_OPCODE(ip) @ extract opcode from rINST 8594 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8595 GOTO_OPCODE(ip) @ jump to next instruction 8596 8597 8598/* continuation for OP_APUT_WIDE */ 8599 8600.LOP_APUT_WIDE_finish: 8601 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8602 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8603 GET_INST_OPCODE(ip) @ extract opcode from rINST 8604 add r0, #offArrayObject_contents 8605 stmia r0, {r2-r3} @ vBB[vCC] <- r2/r3 8606 GOTO_OPCODE(ip) @ jump to next instruction 8607 8608 8609/* continuation for OP_APUT_OBJECT */ 8610 /* 8611 * On entry: 8612 * r1 = vBB (arrayObj) 8613 * r9 = vAA (obj) 8614 * r10 = offset into array (vBB + vCC * width) 8615 */ 8616.LOP_APUT_OBJECT_finish: 8617 cmp r9, #0 @ storing null reference? 8618 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8619 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8620 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8621 bl dvmCanPutArrayElement @ test object type vs. array type 8622 cmp r0, #0 @ okay? 8623 beq common_errArrayStore @ no 8624.LOP_APUT_OBJECT_skip_check: 8625 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8626 GET_INST_OPCODE(ip) @ extract opcode from rINST 8627 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8628 GOTO_OPCODE(ip) @ jump to next instruction 8629 8630 8631/* continuation for OP_IGET */ 8632 8633 /* 8634 * Currently: 8635 * r0 holds resolved field 8636 * r9 holds object 8637 */ 8638.LOP_IGET_finish: 8639 @bl common_squeak0 8640 cmp r9, #0 @ check object for null 8641 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8642 beq common_errNullObject @ object was null 8643 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8644 mov r2, rINST, lsr #8 @ r2<- A+ 8645 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8646 and r2, r2, #15 @ r2<- A 8647 GET_INST_OPCODE(ip) @ extract opcode from rINST 8648 SET_VREG(r0, r2) @ fp[A]<- r0 8649 GOTO_OPCODE(ip) @ jump to next instruction 8650 8651 8652/* continuation for OP_IGET_WIDE */ 8653 8654 /* 8655 * Currently: 8656 * r0 holds resolved field 8657 * r9 holds object 8658 */ 8659.LOP_IGET_WIDE_finish: 8660 cmp r9, #0 @ check object for null 8661 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8662 beq common_errNullObject @ object was null 8663 mov r2, rINST, lsr #8 @ r2<- A+ 8664 add r9, r9, r3 @ r9<- obj + field offset 8665 ldmia r9, {r0-r1} @ r0/r1<- obj.field (64-bit align ok) 8666 and r2, r2, #15 @ r2<- A 8667 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8668 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8669 GET_INST_OPCODE(ip) @ extract opcode from rINST 8670 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8671 GOTO_OPCODE(ip) @ jump to next instruction 8672 8673 8674/* continuation for OP_IGET_OBJECT */ 8675 8676 /* 8677 * Currently: 8678 * r0 holds resolved field 8679 * r9 holds object 8680 */ 8681.LOP_IGET_OBJECT_finish: 8682 @bl common_squeak0 8683 cmp r9, #0 @ check object for null 8684 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8685 beq common_errNullObject @ object was null 8686 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8687 mov r2, rINST, lsr #8 @ r2<- A+ 8688 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8689 and r2, r2, #15 @ r2<- A 8690 GET_INST_OPCODE(ip) @ extract opcode from rINST 8691 SET_VREG(r0, r2) @ fp[A]<- r0 8692 GOTO_OPCODE(ip) @ jump to next instruction 8693 8694 8695/* continuation for OP_IGET_BOOLEAN */ 8696 8697 /* 8698 * Currently: 8699 * r0 holds resolved field 8700 * r9 holds object 8701 */ 8702.LOP_IGET_BOOLEAN_finish: 8703 @bl common_squeak1 8704 cmp r9, #0 @ check object for null 8705 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8706 beq common_errNullObject @ object was null 8707 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8708 mov r2, rINST, lsr #8 @ r2<- A+ 8709 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8710 and r2, r2, #15 @ r2<- A 8711 GET_INST_OPCODE(ip) @ extract opcode from rINST 8712 SET_VREG(r0, r2) @ fp[A]<- r0 8713 GOTO_OPCODE(ip) @ jump to next instruction 8714 8715 8716/* continuation for OP_IGET_BYTE */ 8717 8718 /* 8719 * Currently: 8720 * r0 holds resolved field 8721 * r9 holds object 8722 */ 8723.LOP_IGET_BYTE_finish: 8724 @bl common_squeak2 8725 cmp r9, #0 @ check object for null 8726 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8727 beq common_errNullObject @ object was null 8728 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8729 mov r2, rINST, lsr #8 @ r2<- A+ 8730 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8731 and r2, r2, #15 @ r2<- A 8732 GET_INST_OPCODE(ip) @ extract opcode from rINST 8733 SET_VREG(r0, r2) @ fp[A]<- r0 8734 GOTO_OPCODE(ip) @ jump to next instruction 8735 8736 8737/* continuation for OP_IGET_CHAR */ 8738 8739 /* 8740 * Currently: 8741 * r0 holds resolved field 8742 * r9 holds object 8743 */ 8744.LOP_IGET_CHAR_finish: 8745 @bl common_squeak3 8746 cmp r9, #0 @ check object for null 8747 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8748 beq common_errNullObject @ object was null 8749 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8750 mov r2, rINST, lsr #8 @ r2<- A+ 8751 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8752 and r2, r2, #15 @ r2<- A 8753 GET_INST_OPCODE(ip) @ extract opcode from rINST 8754 SET_VREG(r0, r2) @ fp[A]<- r0 8755 GOTO_OPCODE(ip) @ jump to next instruction 8756 8757 8758/* continuation for OP_IGET_SHORT */ 8759 8760 /* 8761 * Currently: 8762 * r0 holds resolved field 8763 * r9 holds object 8764 */ 8765.LOP_IGET_SHORT_finish: 8766 @bl common_squeak4 8767 cmp r9, #0 @ check object for null 8768 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8769 beq common_errNullObject @ object was null 8770 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8771 mov r2, rINST, lsr #8 @ r2<- A+ 8772 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8773 and r2, r2, #15 @ r2<- A 8774 GET_INST_OPCODE(ip) @ extract opcode from rINST 8775 SET_VREG(r0, r2) @ fp[A]<- r0 8776 GOTO_OPCODE(ip) @ jump to next instruction 8777 8778 8779/* continuation for OP_IPUT */ 8780 8781 /* 8782 * Currently: 8783 * r0 holds resolved field 8784 * r9 holds object 8785 */ 8786.LOP_IPUT_finish: 8787 @bl common_squeak0 8788 mov r1, rINST, lsr #8 @ r1<- A+ 8789 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8790 and r1, r1, #15 @ r1<- A 8791 cmp r9, #0 @ check object for null 8792 GET_VREG(r0, r1) @ r0<- fp[A] 8793 beq common_errNullObject @ object was null 8794 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8795 GET_INST_OPCODE(ip) @ extract opcode from rINST 8796 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8797 GOTO_OPCODE(ip) @ jump to next instruction 8798 8799 8800/* continuation for OP_IPUT_WIDE */ 8801 8802 /* 8803 * Currently: 8804 * r0 holds resolved field 8805 * r9 holds object 8806 */ 8807.LOP_IPUT_WIDE_finish: 8808 mov r2, rINST, lsr #8 @ r2<- A+ 8809 cmp r9, #0 @ check object for null 8810 and r2, r2, #15 @ r2<- A 8811 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8812 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8813 beq common_errNullObject @ object was null 8814 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8815 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8816 GET_INST_OPCODE(ip) @ extract opcode from rINST 8817 add r9, r9, r3 @ r9<- object + byte offset 8818 stmia r9, {r0-r1} @ obj.field (64 bits, aligned)<- r0/r1 8819 GOTO_OPCODE(ip) @ jump to next instruction 8820 8821 8822/* continuation for OP_IPUT_OBJECT */ 8823 8824 /* 8825 * Currently: 8826 * r0 holds resolved field 8827 * r9 holds object 8828 */ 8829.LOP_IPUT_OBJECT_finish: 8830 @bl common_squeak0 8831 mov r1, rINST, lsr #8 @ r1<- A+ 8832 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8833 and r1, r1, #15 @ r1<- A 8834 cmp r9, #0 @ check object for null 8835 GET_VREG(r0, r1) @ r0<- fp[A] 8836 beq common_errNullObject @ object was null 8837 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8838 GET_INST_OPCODE(ip) @ extract opcode from rINST 8839 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8840 GOTO_OPCODE(ip) @ jump to next instruction 8841 8842 8843/* continuation for OP_IPUT_BOOLEAN */ 8844 8845 /* 8846 * Currently: 8847 * r0 holds resolved field 8848 * r9 holds object 8849 */ 8850.LOP_IPUT_BOOLEAN_finish: 8851 @bl common_squeak1 8852 mov r1, rINST, lsr #8 @ r1<- A+ 8853 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8854 and r1, r1, #15 @ r1<- A 8855 cmp r9, #0 @ check object for null 8856 GET_VREG(r0, r1) @ r0<- fp[A] 8857 beq common_errNullObject @ object was null 8858 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8859 GET_INST_OPCODE(ip) @ extract opcode from rINST 8860 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8861 GOTO_OPCODE(ip) @ jump to next instruction 8862 8863 8864/* continuation for OP_IPUT_BYTE */ 8865 8866 /* 8867 * Currently: 8868 * r0 holds resolved field 8869 * r9 holds object 8870 */ 8871.LOP_IPUT_BYTE_finish: 8872 @bl common_squeak2 8873 mov r1, rINST, lsr #8 @ r1<- A+ 8874 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8875 and r1, r1, #15 @ r1<- A 8876 cmp r9, #0 @ check object for null 8877 GET_VREG(r0, r1) @ r0<- fp[A] 8878 beq common_errNullObject @ object was null 8879 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8880 GET_INST_OPCODE(ip) @ extract opcode from rINST 8881 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8882 GOTO_OPCODE(ip) @ jump to next instruction 8883 8884 8885/* continuation for OP_IPUT_CHAR */ 8886 8887 /* 8888 * Currently: 8889 * r0 holds resolved field 8890 * r9 holds object 8891 */ 8892.LOP_IPUT_CHAR_finish: 8893 @bl common_squeak3 8894 mov r1, rINST, lsr #8 @ r1<- A+ 8895 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8896 and r1, r1, #15 @ r1<- A 8897 cmp r9, #0 @ check object for null 8898 GET_VREG(r0, r1) @ r0<- fp[A] 8899 beq common_errNullObject @ object was null 8900 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8901 GET_INST_OPCODE(ip) @ extract opcode from rINST 8902 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8903 GOTO_OPCODE(ip) @ jump to next instruction 8904 8905 8906/* continuation for OP_IPUT_SHORT */ 8907 8908 /* 8909 * Currently: 8910 * r0 holds resolved field 8911 * r9 holds object 8912 */ 8913.LOP_IPUT_SHORT_finish: 8914 @bl common_squeak4 8915 mov r1, rINST, lsr #8 @ r1<- A+ 8916 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8917 and r1, r1, #15 @ r1<- A 8918 cmp r9, #0 @ check object for null 8919 GET_VREG(r0, r1) @ r0<- fp[A] 8920 beq common_errNullObject @ object was null 8921 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8922 GET_INST_OPCODE(ip) @ extract opcode from rINST 8923 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8924 GOTO_OPCODE(ip) @ jump to next instruction 8925 8926 8927/* continuation for OP_SGET */ 8928 8929 /* 8930 * Continuation if the field has not yet been resolved. 8931 * r1: BBBB field ref 8932 */ 8933.LOP_SGET_resolve: 8934 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8935 EXPORT_PC() @ resolve() could throw, so export now 8936 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8937 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8938 cmp r0, #0 @ success? 8939 bne .LOP_SGET_finish @ yes, finish 8940 b common_exceptionThrown @ no, handle exception 8941 8942 8943/* continuation for OP_SGET_WIDE */ 8944 8945 /* 8946 * Continuation if the field has not yet been resolved. 8947 * r1: BBBB field ref 8948 */ 8949.LOP_SGET_WIDE_resolve: 8950 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8951 EXPORT_PC() @ resolve() could throw, so export now 8952 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8953 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8954 cmp r0, #0 @ success? 8955 bne .LOP_SGET_WIDE_finish @ yes, finish 8956 b common_exceptionThrown @ no, handle exception 8957 8958 8959/* continuation for OP_SGET_OBJECT */ 8960 8961 /* 8962 * Continuation if the field has not yet been resolved. 8963 * r1: BBBB field ref 8964 */ 8965.LOP_SGET_OBJECT_resolve: 8966 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8967 EXPORT_PC() @ resolve() could throw, so export now 8968 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8969 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8970 cmp r0, #0 @ success? 8971 bne .LOP_SGET_OBJECT_finish @ yes, finish 8972 b common_exceptionThrown @ no, handle exception 8973 8974 8975/* continuation for OP_SGET_BOOLEAN */ 8976 8977 /* 8978 * Continuation if the field has not yet been resolved. 8979 * r1: BBBB field ref 8980 */ 8981.LOP_SGET_BOOLEAN_resolve: 8982 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8983 EXPORT_PC() @ resolve() could throw, so export now 8984 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8985 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8986 cmp r0, #0 @ success? 8987 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8988 b common_exceptionThrown @ no, handle exception 8989 8990 8991/* continuation for OP_SGET_BYTE */ 8992 8993 /* 8994 * Continuation if the field has not yet been resolved. 8995 * r1: BBBB field ref 8996 */ 8997.LOP_SGET_BYTE_resolve: 8998 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8999 EXPORT_PC() @ resolve() could throw, so export now 9000 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9001 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9002 cmp r0, #0 @ success? 9003 bne .LOP_SGET_BYTE_finish @ yes, finish 9004 b common_exceptionThrown @ no, handle exception 9005 9006 9007/* continuation for OP_SGET_CHAR */ 9008 9009 /* 9010 * Continuation if the field has not yet been resolved. 9011 * r1: BBBB field ref 9012 */ 9013.LOP_SGET_CHAR_resolve: 9014 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9015 EXPORT_PC() @ resolve() could throw, so export now 9016 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9017 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9018 cmp r0, #0 @ success? 9019 bne .LOP_SGET_CHAR_finish @ yes, finish 9020 b common_exceptionThrown @ no, handle exception 9021 9022 9023/* continuation for OP_SGET_SHORT */ 9024 9025 /* 9026 * Continuation if the field has not yet been resolved. 9027 * r1: BBBB field ref 9028 */ 9029.LOP_SGET_SHORT_resolve: 9030 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9031 EXPORT_PC() @ resolve() could throw, so export now 9032 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9033 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9034 cmp r0, #0 @ success? 9035 bne .LOP_SGET_SHORT_finish @ yes, finish 9036 b common_exceptionThrown @ no, handle exception 9037 9038 9039/* continuation for OP_SPUT */ 9040 9041 /* 9042 * Continuation if the field has not yet been resolved. 9043 * r1: BBBB field ref 9044 */ 9045.LOP_SPUT_resolve: 9046 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9047 EXPORT_PC() @ resolve() could throw, so export now 9048 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9049 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9050 cmp r0, #0 @ success? 9051 bne .LOP_SPUT_finish @ yes, finish 9052 b common_exceptionThrown @ no, handle exception 9053 9054 9055/* continuation for OP_SPUT_WIDE */ 9056 9057 /* 9058 * Continuation if the field has not yet been resolved. 9059 * r1: BBBB field ref 9060 * r9: &fp[AA] 9061 */ 9062.LOP_SPUT_WIDE_resolve: 9063 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9064 EXPORT_PC() @ resolve() could throw, so export now 9065 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9066 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9067 cmp r0, #0 @ success? 9068 bne .LOP_SPUT_WIDE_finish @ yes, finish 9069 b common_exceptionThrown @ no, handle exception 9070 9071 9072/* continuation for OP_SPUT_OBJECT */ 9073 9074 /* 9075 * Continuation if the field has not yet been resolved. 9076 * r1: BBBB field ref 9077 */ 9078.LOP_SPUT_OBJECT_resolve: 9079 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9080 EXPORT_PC() @ resolve() could throw, so export now 9081 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9082 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9083 cmp r0, #0 @ success? 9084 bne .LOP_SPUT_OBJECT_finish @ yes, finish 9085 b common_exceptionThrown @ no, handle exception 9086 9087 9088/* continuation for OP_SPUT_BOOLEAN */ 9089 9090 /* 9091 * Continuation if the field has not yet been resolved. 9092 * r1: BBBB field ref 9093 */ 9094.LOP_SPUT_BOOLEAN_resolve: 9095 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9096 EXPORT_PC() @ resolve() could throw, so export now 9097 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9098 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9099 cmp r0, #0 @ success? 9100 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 9101 b common_exceptionThrown @ no, handle exception 9102 9103 9104/* continuation for OP_SPUT_BYTE */ 9105 9106 /* 9107 * Continuation if the field has not yet been resolved. 9108 * r1: BBBB field ref 9109 */ 9110.LOP_SPUT_BYTE_resolve: 9111 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9112 EXPORT_PC() @ resolve() could throw, so export now 9113 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9114 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9115 cmp r0, #0 @ success? 9116 bne .LOP_SPUT_BYTE_finish @ yes, finish 9117 b common_exceptionThrown @ no, handle exception 9118 9119 9120/* continuation for OP_SPUT_CHAR */ 9121 9122 /* 9123 * Continuation if the field has not yet been resolved. 9124 * r1: BBBB field ref 9125 */ 9126.LOP_SPUT_CHAR_resolve: 9127 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9128 EXPORT_PC() @ resolve() could throw, so export now 9129 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9130 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9131 cmp r0, #0 @ success? 9132 bne .LOP_SPUT_CHAR_finish @ yes, finish 9133 b common_exceptionThrown @ no, handle exception 9134 9135 9136/* continuation for OP_SPUT_SHORT */ 9137 9138 /* 9139 * Continuation if the field has not yet been resolved. 9140 * r1: BBBB field ref 9141 */ 9142.LOP_SPUT_SHORT_resolve: 9143 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9144 EXPORT_PC() @ resolve() could throw, so export now 9145 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9146 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9147 cmp r0, #0 @ success? 9148 bne .LOP_SPUT_SHORT_finish @ yes, finish 9149 b common_exceptionThrown @ no, handle exception 9150 9151 9152/* continuation for OP_INVOKE_VIRTUAL */ 9153 9154 /* 9155 * At this point: 9156 * r0 = resolved base method 9157 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9158 */ 9159.LOP_INVOKE_VIRTUAL_continue: 9160 GET_VREG(r1, r10) @ r1<- "this" ptr 9161 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9162 cmp r1, #0 @ is "this" null? 9163 beq common_errNullObject @ null "this", throw exception 9164 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9165 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9166 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9167 bl common_invokeMethodNoRange @ continue on 9168 9169 9170/* continuation for OP_INVOKE_SUPER */ 9171 9172 /* 9173 * At this point: 9174 * r0 = resolved base method 9175 * r9 = method->clazz 9176 */ 9177.LOP_INVOKE_SUPER_continue: 9178 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9179 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9180 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9181 EXPORT_PC() @ must export for invoke 9182 cmp r2, r3 @ compare (methodIndex, vtableCount) 9183 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 9184 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9185 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9186 bl common_invokeMethodNoRange @ continue on 9187 9188.LOP_INVOKE_SUPER_resolve: 9189 mov r0, r9 @ r0<- method->clazz 9190 mov r2, #METHOD_VIRTUAL @ resolver method type 9191 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9192 cmp r0, #0 @ got null? 9193 bne .LOP_INVOKE_SUPER_continue @ no, continue 9194 b common_exceptionThrown @ yes, handle exception 9195 9196 /* 9197 * Throw a NoSuchMethodError with the method name as the message. 9198 * r0 = resolved base method 9199 */ 9200.LOP_INVOKE_SUPER_nsm: 9201 ldr r1, [r0, #offMethod_name] @ r1<- method name 9202 b common_errNoSuchMethod 9203 9204 9205/* continuation for OP_INVOKE_DIRECT */ 9206 9207 /* 9208 * On entry: 9209 * r1 = reference (BBBB or CCCC) 9210 * r10 = "this" register 9211 */ 9212.LOP_INVOKE_DIRECT_resolve: 9213 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9214 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9215 mov r2, #METHOD_DIRECT @ resolver method type 9216 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9217 cmp r0, #0 @ got null? 9218 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9219 bne .LOP_INVOKE_DIRECT_finish @ no, continue 9220 b common_exceptionThrown @ yes, handle exception 9221 9222 9223/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 9224 9225 /* 9226 * At this point: 9227 * r0 = resolved base method 9228 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9229 */ 9230.LOP_INVOKE_VIRTUAL_RANGE_continue: 9231 GET_VREG(r1, r10) @ r1<- "this" ptr 9232 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9233 cmp r1, #0 @ is "this" null? 9234 beq common_errNullObject @ null "this", throw exception 9235 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9236 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9237 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9238 bl common_invokeMethodRange @ continue on 9239 9240 9241/* continuation for OP_INVOKE_SUPER_RANGE */ 9242 9243 /* 9244 * At this point: 9245 * r0 = resolved base method 9246 * r9 = method->clazz 9247 */ 9248.LOP_INVOKE_SUPER_RANGE_continue: 9249 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9250 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9251 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9252 EXPORT_PC() @ must export for invoke 9253 cmp r2, r3 @ compare (methodIndex, vtableCount) 9254 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 9255 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9256 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9257 bl common_invokeMethodRange @ continue on 9258 9259.LOP_INVOKE_SUPER_RANGE_resolve: 9260 mov r0, r9 @ r0<- method->clazz 9261 mov r2, #METHOD_VIRTUAL @ resolver method type 9262 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9263 cmp r0, #0 @ got null? 9264 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 9265 b common_exceptionThrown @ yes, handle exception 9266 9267 /* 9268 * Throw a NoSuchMethodError with the method name as the message. 9269 * r0 = resolved base method 9270 */ 9271.LOP_INVOKE_SUPER_RANGE_nsm: 9272 ldr r1, [r0, #offMethod_name] @ r1<- method name 9273 b common_errNoSuchMethod 9274 9275 9276/* continuation for OP_INVOKE_DIRECT_RANGE */ 9277 9278 /* 9279 * On entry: 9280 * r1 = reference (BBBB or CCCC) 9281 * r10 = "this" register 9282 */ 9283.LOP_INVOKE_DIRECT_RANGE_resolve: 9284 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9285 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9286 mov r2, #METHOD_DIRECT @ resolver method type 9287 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9288 cmp r0, #0 @ got null? 9289 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9290 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 9291 b common_exceptionThrown @ yes, handle exception 9292 9293 9294/* continuation for OP_FLOAT_TO_LONG */ 9295/* 9296 * Convert the float in r0 to a long in r0/r1. 9297 * 9298 * We have to clip values to long min/max per the specification. The 9299 * expected common case is a "reasonable" value that converts directly 9300 * to modest integer. The EABI convert function isn't doing this for us. 9301 */ 9302f2l_doconv: 9303 stmfd sp!, {r4, lr} 9304 mov r1, #0x5f000000 @ (float)maxlong 9305 mov r4, r0 9306 bl __aeabi_fcmpge @ is arg >= maxlong? 9307 cmp r0, #0 @ nonzero == yes 9308 mvnne r0, #0 @ return maxlong (7fffffff) 9309 mvnne r1, #0x80000000 9310 ldmnefd sp!, {r4, pc} 9311 9312 mov r0, r4 @ recover arg 9313 mov r1, #0xdf000000 @ (float)minlong 9314 bl __aeabi_fcmple @ is arg <= minlong? 9315 cmp r0, #0 @ nonzero == yes 9316 movne r0, #0 @ return minlong (80000000) 9317 movne r1, #0x80000000 9318 ldmnefd sp!, {r4, pc} 9319 9320 mov r0, r4 @ recover arg 9321 mov r1, r4 9322 bl __aeabi_fcmpeq @ is arg == self? 9323 cmp r0, #0 @ zero == no 9324 moveq r1, #0 @ return zero for NaN 9325 ldmeqfd sp!, {r4, pc} 9326 9327 mov r0, r4 @ recover arg 9328 bl __aeabi_f2lz @ convert float to long 9329 ldmfd sp!, {r4, pc} 9330 9331 9332/* continuation for OP_DOUBLE_TO_LONG */ 9333/* 9334 * Convert the double in r0/r1 to a long in r0/r1. 9335 * 9336 * We have to clip values to long min/max per the specification. The 9337 * expected common case is a "reasonable" value that converts directly 9338 * to modest integer. The EABI convert function isn't doing this for us. 9339 */ 9340d2l_doconv: 9341 stmfd sp!, {r4, r5, lr} @ save regs 9342 ldr r3, .LOP_DOUBLE_TO_LONG_max @ (double)maxlong, hi 9343 sub sp, sp, #4 @ align for EABI 9344 mov r2, #0 @ (double)maxlong, lo 9345 mov r4, r0 @ save r0 9346 mov r5, r1 @ and r1 9347 bl __aeabi_dcmpge @ is arg >= maxlong? 9348 cmp r0, #0 @ nonzero == yes 9349 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9350 mvnne r1, #0x80000000 9351 bne 1f 9352 9353 mov r0, r4 @ recover arg 9354 mov r1, r5 9355 ldr r3, .LOP_DOUBLE_TO_LONG_min @ (double)minlong, hi 9356 mov r2, #0 @ (double)minlong, lo 9357 bl __aeabi_dcmple @ is arg <= minlong? 9358 cmp r0, #0 @ nonzero == yes 9359 movne r0, #0 @ return minlong (8000000000000000) 9360 movne r1, #0x80000000 9361 bne 1f 9362 9363 mov r0, r4 @ recover arg 9364 mov r1, r5 9365 mov r2, r4 @ compare against self 9366 mov r3, r5 9367 bl __aeabi_dcmpeq @ is arg == self? 9368 cmp r0, #0 @ zero == no 9369 moveq r1, #0 @ return zero for NaN 9370 beq 1f 9371 9372 mov r0, r4 @ recover arg 9373 mov r1, r5 9374 bl __aeabi_d2lz @ convert double to long 9375 93761: 9377 add sp, sp, #4 9378 ldmfd sp!, {r4, r5, pc} 9379 9380.LOP_DOUBLE_TO_LONG_max: 9381 .word 0x43e00000 @ maxlong, as a double (high word) 9382.LOP_DOUBLE_TO_LONG_min: 9383 .word 0xc3e00000 @ minlong, as a double (high word) 9384 9385 9386/* continuation for OP_MUL_LONG */ 9387 9388.LOP_MUL_LONG_finish: 9389 GET_INST_OPCODE(ip) @ extract opcode from rINST 9390 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9391 GOTO_OPCODE(ip) @ jump to next instruction 9392 9393 9394/* continuation for OP_SHL_LONG */ 9395 9396.LOP_SHL_LONG_finish: 9397 mov r0, r0, asl r2 @ r0<- r0 << r2 9398 GET_INST_OPCODE(ip) @ extract opcode from rINST 9399 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9400 GOTO_OPCODE(ip) @ jump to next instruction 9401 9402 9403/* continuation for OP_SHR_LONG */ 9404 9405.LOP_SHR_LONG_finish: 9406 mov r1, r1, asr r2 @ r1<- r1 >> r2 9407 GET_INST_OPCODE(ip) @ extract opcode from rINST 9408 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9409 GOTO_OPCODE(ip) @ jump to next instruction 9410 9411 9412/* continuation for OP_USHR_LONG */ 9413 9414.LOP_USHR_LONG_finish: 9415 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9416 GET_INST_OPCODE(ip) @ extract opcode from rINST 9417 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9418 GOTO_OPCODE(ip) @ jump to next instruction 9419 9420 9421/* continuation for OP_SHL_LONG_2ADDR */ 9422 9423.LOP_SHL_LONG_2ADDR_finish: 9424 GET_INST_OPCODE(ip) @ extract opcode from rINST 9425 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9426 GOTO_OPCODE(ip) @ jump to next instruction 9427 9428 9429/* continuation for OP_SHR_LONG_2ADDR */ 9430 9431.LOP_SHR_LONG_2ADDR_finish: 9432 GET_INST_OPCODE(ip) @ extract opcode from rINST 9433 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9434 GOTO_OPCODE(ip) @ jump to next instruction 9435 9436 9437/* continuation for OP_USHR_LONG_2ADDR */ 9438 9439.LOP_USHR_LONG_2ADDR_finish: 9440 GET_INST_OPCODE(ip) @ extract opcode from rINST 9441 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9442 GOTO_OPCODE(ip) @ jump to next instruction 9443 9444 9445/* continuation for OP_EXECUTE_INLINE */ 9446 9447 /* 9448 * Extract args, call function. 9449 * r0 = #of args (0-4) 9450 * r10 = call index 9451 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9452 * 9453 * Other ideas: 9454 * - Use a jump table from the main piece to jump directly into the 9455 * AND/LDR pairs. Costs a data load, saves a branch. 9456 * - Have five separate pieces that do the loading, so we can work the 9457 * interleave a little better. Increases code size. 9458 */ 9459.LOP_EXECUTE_INLINE_continue: 9460 rsb r0, r0, #4 @ r0<- 4-r0 9461 FETCH(r9, 2) @ r9<- FEDC 9462 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9463 bl common_abort @ (skipped due to ARM prefetch) 94644: and ip, r9, #0xf000 @ isolate F 9465 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 94663: and ip, r9, #0x0f00 @ isolate E 9467 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 94682: and ip, r9, #0x00f0 @ isolate D 9469 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 94701: and ip, r9, #0x000f @ isolate C 9471 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 94720: 9473 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9474 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9475 @ (not reached) 9476 9477.LOP_EXECUTE_INLINE_table: 9478 .word gDvmInlineOpsTable 9479 9480 9481 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9482 .global dvmAsmSisterEnd 9483dvmAsmSisterEnd: 9484 9485/* File: armv5te/footer.S */ 9486 9487/* 9488 * =========================================================================== 9489 * Common subroutines and data 9490 * =========================================================================== 9491 */ 9492 9493 9494 9495 .text 9496 .align 2 9497 9498#if defined(WITH_JIT) 9499/* 9500 * Return from the translation cache to the interpreter when the compiler is 9501 * having issues translating/executing a Dalvik instruction. We have to skip 9502 * the code cache lookup otherwise it is possible to indefinitely bouce 9503 * between the interpreter and the code cache if the instruction that fails 9504 * to be compiled happens to be at a trace start. 9505 */ 9506 .global dvmJitToInterpPunt 9507dvmJitToInterpPunt: 9508 mov rPC, r0 9509#ifdef EXIT_STATS 9510 mov r0,lr 9511 bl dvmBumpPunt; 9512#endif 9513 EXPORT_PC() 9514 adrl rIBASE, dvmAsmInstructionStart 9515 FETCH_INST() 9516 GET_INST_OPCODE(ip) 9517 GOTO_OPCODE(ip) 9518 9519/* 9520 * Return to the interpreter to handle a single instruction. 9521 * On entry: 9522 * r0 <= PC 9523 * r1 <= PC of resume instruction 9524 * lr <= resume point in translation 9525 */ 9526 .global dvmJitToInterpSingleStep 9527dvmJitToInterpSingleStep: 9528 str lr,[rGLUE,#offGlue_jitResume] 9529 str r1,[rGLUE,#offGlue_jitResumePC] 9530 mov r1,#kInterpEntryInstr 9531 @ enum is 4 byte in aapcs-EABI 9532 str r1, [rGLUE, #offGlue_entryPoint] 9533 mov rPC,r0 9534 EXPORT_PC() 9535 adrl rIBASE, dvmAsmInstructionStart 9536 mov r2,#kJitSingleStep @ Ask for single step and then revert 9537 str r2,[rGLUE,#offGlue_jitState] 9538 mov r1,#1 @ set changeInterp to bail to debug interp 9539 b common_gotoBail 9540 9541 9542/* 9543 * Return from the translation cache and immediately request 9544 * a translation for the exit target. Commonly used following 9545 * invokes. 9546 */ 9547 .global dvmJitToTraceSelect 9548dvmJitToTraceSelect: 9549 ldr rPC,[r14, #-1] @ get our target PC 9550 add rINST,r14,#-5 @ save start of chain branch 9551 mov r0,rPC 9552 bl dvmJitGetCodeAddr @ Is there a translation? 9553 cmp r0,#0 9554 beq 2f 9555 mov r1,rINST 9556 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9557 cmp r0,#0 @ successful chain? 9558 bxne r0 @ continue native execution 9559 b toInterpreter @ didn't chain - resume with interpreter 9560 9561/* No translation, so request one if profiling isn't disabled*/ 95622: 9563 adrl rIBASE, dvmAsmInstructionStart 9564 GET_JIT_PROF_TABLE(r0) 9565 FETCH_INST() 9566 cmp r0, #0 9567 bne common_selectTrace 9568 GET_INST_OPCODE(ip) 9569 GOTO_OPCODE(ip) 9570 9571/* 9572 * Return from the translation cache to the interpreter. 9573 * The return was done with a BLX from thumb mode, and 9574 * the following 32-bit word contains the target rPC value. 9575 * Note that lr (r14) will have its low-order bit set to denote 9576 * its thumb-mode origin. 9577 * 9578 * We'll need to stash our lr origin away, recover the new 9579 * target and then check to see if there is a translation available 9580 * for our new target. If so, we do a translation chain and 9581 * go back to native execution. Otherwise, it's back to the 9582 * interpreter (after treating this entry as a potential 9583 * trace start). 9584 */ 9585 .global dvmJitToInterpNormal 9586dvmJitToInterpNormal: 9587 ldr rPC,[r14, #-1] @ get our target PC 9588 add rINST,r14,#-5 @ save start of chain branch 9589#ifdef EXIT_STATS 9590 bl dvmBumpNormal 9591#endif 9592 mov r0,rPC 9593 bl dvmJitGetCodeAddr @ Is there a translation? 9594 cmp r0,#0 9595 beq toInterpreter @ go if not, otherwise do chain 9596 mov r1,rINST 9597 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9598 cmp r0,#0 @ successful chain? 9599 bxne r0 @ continue native execution 9600 b toInterpreter @ didn't chain - resume with interpreter 9601 9602/* 9603 * Return from the translation cache to the interpreter to do method invocation. 9604 * Check if translation exists for the callee, but don't chain to it. 9605 */ 9606 .global dvmJitToInterpNoChain 9607dvmJitToInterpNoChain: 9608#ifdef EXIT_STATS 9609 bl dvmBumpNoChain 9610#endif 9611 mov r0,rPC 9612 bl dvmJitGetCodeAddr @ Is there a translation? 9613 cmp r0,#0 9614 bxne r0 @ continue native execution if so 9615 9616/* 9617 * No translation, restore interpreter regs and start interpreting. 9618 * rGLUE & rFP were preserved in the translated code, and rPC has 9619 * already been restored by the time we get here. We'll need to set 9620 * up rIBASE & rINST, and load the address of the JitTable into r0. 9621 */ 9622toInterpreter: 9623 EXPORT_PC() 9624 adrl rIBASE, dvmAsmInstructionStart 9625 FETCH_INST() 9626 GET_JIT_PROF_TABLE(r0) 9627 @ NOTE: intended fallthrough 9628/* 9629 * Common code to update potential trace start counter, and initiate 9630 * a trace-build if appropriate. On entry, rPC should point to the 9631 * next instruction to execute, and rINST should be already loaded with 9632 * the next opcode word, and r0 holds a pointer to the jit profile 9633 * table (pJitProfTable). 9634 */ 9635common_testUpdateProfile: 9636 cmp r0,#0 9637 GET_INST_OPCODE(ip) 9638 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9639 9640common_updateProfile: 9641 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9642 lsl r3,r3,#20 @ shift out excess 4095 9643 ldrb r1,[r0,r3,lsr #20] @ get counter 9644 GET_INST_OPCODE(ip) 9645 subs r1,r1,#1 @ decrement counter 9646 strb r1,[r0,r3,lsr #20] @ and store it 9647 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9648 9649/* 9650 * Here, we switch to the debug interpreter to request 9651 * trace selection. First, though, check to see if there 9652 * is already a native translation in place (and, if so, 9653 * jump to it now). 9654 */ 9655 mov r1,#255 9656 strb r1,[r0,r3,lsr #20] @ reset counter 9657 EXPORT_PC() 9658 mov r0,rPC 9659 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9660 cmp r0,#0 9661 beq common_selectTrace 9662 bxne r0 @ jump to the translation 9663common_selectTrace: 9664 mov r2,#kJitTSelectRequest @ ask for trace selection 9665 str r2,[rGLUE,#offGlue_jitState] 9666 mov r1,#1 @ set changeInterp 9667 b common_gotoBail 9668 9669#endif 9670 9671/* 9672 * Common code when a backward branch is taken. 9673 * 9674 * On entry: 9675 * r9 is PC adjustment *in bytes* 9676 */ 9677common_backwardBranch: 9678 mov r0, #kInterpEntryInstr 9679 bl common_periodicChecks 9680#if defined(WITH_JIT) 9681 GET_JIT_PROF_TABLE(r0) 9682 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9683 cmp r0,#0 9684 bne common_updateProfile 9685 GET_INST_OPCODE(ip) 9686 GOTO_OPCODE(ip) 9687#else 9688 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9689 GET_INST_OPCODE(ip) @ extract opcode from rINST 9690 GOTO_OPCODE(ip) @ jump to next instruction 9691#endif 9692 9693 9694/* 9695 * Need to see if the thread needs to be suspended or debugger/profiler 9696 * activity has begun. 9697 * 9698 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9699 * have to do the second ldr. 9700 * 9701 * TODO: reduce this so we're just checking a single location. 9702 * 9703 * On entry: 9704 * r0 is reentry type, e.g. kInterpEntryInstr 9705 * r9 is trampoline PC adjustment *in bytes* 9706 */ 9707common_periodicChecks: 9708 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9709 9710#if defined(WITH_DEBUGGER) 9711 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9712#endif 9713#if defined(WITH_PROFILER) 9714 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9715#endif 9716 9717 ldr r3, [r3] @ r3<- suspendCount (int) 9718 9719#if defined(WITH_DEBUGGER) 9720 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9721#endif 9722#if defined (WITH_PROFILER) 9723 ldr r2, [r2] @ r2<- activeProfilers (int) 9724#endif 9725 9726 cmp r3, #0 @ suspend pending? 9727 bne 2f @ yes, do full suspension check 9728 9729#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9730# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9731 orrs r1, r1, r2 @ r1<- r1 | r2 9732 cmp r1, #0 @ debugger attached or profiler started? 9733# elif defined(WITH_DEBUGGER) 9734 cmp r1, #0 @ debugger attached? 9735# elif defined(WITH_PROFILER) 9736 cmp r2, #0 @ profiler started? 9737# endif 9738 bne 3f @ debugger/profiler, switch interp 9739#endif 9740 9741 bx lr @ nothing to do, return 9742 97432: @ check suspend 9744 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9745 EXPORT_PC() @ need for precise GC 9746 b dvmCheckSuspendPending @ suspend if necessary, then return 9747 97483: @ debugger/profiler enabled, bail out 9749 add rPC, rPC, r9 @ update rPC 9750 str r0, [rGLUE, #offGlue_entryPoint] 9751 mov r1, #1 @ "want switch" = true 9752 b common_gotoBail 9753 9754 9755/* 9756 * The equivalent of "goto bail", this calls through the "bail handler". 9757 * 9758 * State registers will be saved to the "glue" area before bailing. 9759 * 9760 * On entry: 9761 * r1 is "bool changeInterp", indicating if we want to switch to the 9762 * other interpreter or just bail all the way out 9763 */ 9764common_gotoBail: 9765 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9766 mov r0, rGLUE @ r0<- glue ptr 9767 b dvmMterpStdBail @ call(glue, changeInterp) 9768 9769 @add r1, r1, #1 @ using (boolean+1) 9770 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9771 @bl _longjmp @ does not return 9772 @bl common_abort 9773 9774 9775/* 9776 * Common code for method invocation with range. 9777 * 9778 * On entry: 9779 * r0 is "Method* methodToCall", the method we're trying to call 9780 */ 9781common_invokeMethodRange: 9782.LinvokeNewRange: 9783 @ prepare to copy args to "outs" area of current frame 9784 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9785 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9786 beq .LinvokeArgsDone @ if no args, skip the rest 9787 FETCH(r1, 2) @ r1<- CCCC 9788 9789 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9790 @ (very few methods have > 10 args; could unroll for common cases) 9791 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9792 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9793 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 97941: ldr r1, [r3], #4 @ val = *fp++ 9795 subs r2, r2, #1 @ count-- 9796 str r1, [r10], #4 @ *outs++ = val 9797 bne 1b @ ...while count != 0 9798 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9799 b .LinvokeArgsDone 9800 9801/* 9802 * Common code for method invocation without range. 9803 * 9804 * On entry: 9805 * r0 is "Method* methodToCall", the method we're trying to call 9806 */ 9807common_invokeMethodNoRange: 9808.LinvokeNewNoRange: 9809 @ prepare to copy args to "outs" area of current frame 9810 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9811 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9812 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9813 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9814 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9815 beq .LinvokeArgsDone 9816 9817 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9818.LinvokeNonRange: 9819 rsb r2, r2, #5 @ r2<- 5-r2 9820 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9821 bl common_abort @ (skipped due to ARM prefetch) 98225: and ip, rINST, #0x0f00 @ isolate A 9823 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9824 mov r0, r0 @ nop 9825 str r2, [r10, #-4]! @ *--outs = vA 98264: and ip, r1, #0xf000 @ isolate G 9827 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9828 mov r0, r0 @ nop 9829 str r2, [r10, #-4]! @ *--outs = vG 98303: and ip, r1, #0x0f00 @ isolate F 9831 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9832 mov r0, r0 @ nop 9833 str r2, [r10, #-4]! @ *--outs = vF 98342: and ip, r1, #0x00f0 @ isolate E 9835 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9836 mov r0, r0 @ nop 9837 str r2, [r10, #-4]! @ *--outs = vE 98381: and ip, r1, #0x000f @ isolate D 9839 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9840 mov r0, r0 @ nop 9841 str r2, [r10, #-4]! @ *--outs = vD 98420: @ fall through to .LinvokeArgsDone 9843 9844.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9845 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9846 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9847 @ find space for the new stack frame, check for overflow 9848 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9849 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9850 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9851@ bl common_dumpRegs 9852 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9853 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9854 cmp r3, r9 @ bottom < interpStackEnd? 9855 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9856 blt .LstackOverflow @ yes, this frame will overflow stack 9857 9858 @ set up newSaveArea 9859#ifdef EASY_GDB 9860 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9861 str ip, [r10, #offStackSaveArea_prevSave] 9862#endif 9863 str rFP, [r10, #offStackSaveArea_prevFrame] 9864 str rPC, [r10, #offStackSaveArea_savedPc] 9865#if defined(WITH_JIT) 9866 mov r9, #0 9867 str r9, [r10, #offStackSaveArea_returnAddr] 9868#endif 9869 str r0, [r10, #offStackSaveArea_method] 9870 tst r3, #ACC_NATIVE 9871 bne .LinvokeNative 9872 9873 /* 9874 stmfd sp!, {r0-r3} 9875 bl common_printNewline 9876 mov r0, rFP 9877 mov r1, #0 9878 bl dvmDumpFp 9879 ldmfd sp!, {r0-r3} 9880 stmfd sp!, {r0-r3} 9881 mov r0, r1 9882 mov r1, r10 9883 bl dvmDumpFp 9884 bl common_printNewline 9885 ldmfd sp!, {r0-r3} 9886 */ 9887 9888 ldrh r9, [r2] @ r9 <- load INST from new PC 9889 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9890 mov rPC, r2 @ publish new rPC 9891 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9892 9893 @ Update "glue" values for the new method 9894 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9895 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9896 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9897#if defined(WITH_JIT) 9898 GET_JIT_PROF_TABLE(r0) 9899 mov rFP, r1 @ fp = newFp 9900 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9901 mov rINST, r9 @ publish new rINST 9902 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9903 cmp r0,#0 9904 bne common_updateProfile 9905 GOTO_OPCODE(ip) @ jump to next instruction 9906#else 9907 mov rFP, r1 @ fp = newFp 9908 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9909 mov rINST, r9 @ publish new rINST 9910 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9911 GOTO_OPCODE(ip) @ jump to next instruction 9912#endif 9913 9914.LinvokeNative: 9915 @ Prep for the native call 9916 @ r0=methodToCall, r1=newFp, r10=newSaveArea 9917 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9918 ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext 9919 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 9920 str r9, [r10, #offStackSaveArea_localRefTop] @newFp->localRefTop=refNext 9921 mov r9, r3 @ r9<- glue->self (preserve) 9922 9923 mov r2, r0 @ r2<- methodToCall 9924 mov r0, r1 @ r0<- newFp (points to args) 9925 add r1, rGLUE, #offGlue_retval @ r1<- &retval 9926 9927#ifdef ASSIST_DEBUGGER 9928 /* insert fake function header to help gdb find the stack frame */ 9929 b .Lskip 9930 .type dalvik_mterp, %function 9931dalvik_mterp: 9932 .fnstart 9933 MTERP_ENTRY1 9934 MTERP_ENTRY2 9935.Lskip: 9936#endif 9937 9938 @mov lr, pc @ set return addr 9939 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 9940 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 9941 9942 @ native return; r9=self, r10=newSaveArea 9943 @ equivalent to dvmPopJniLocals 9944 ldr r0, [r10, #offStackSaveArea_localRefTop] @ r0<- newSave->localRefTop 9945 ldr r1, [r9, #offThread_exception] @ check for exception 9946 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 9947 cmp r1, #0 @ null? 9948 str r0, [r9, #offThread_jniLocal_nextEntry] @ self->refNext<- r0 9949 bne common_exceptionThrown @ no, handle exception 9950 9951 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 9952 GET_INST_OPCODE(ip) @ extract opcode from rINST 9953 GOTO_OPCODE(ip) @ jump to next instruction 9954 9955.LstackOverflow: 9956 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 9957 bl dvmHandleStackOverflow 9958 b common_exceptionThrown 9959#ifdef ASSIST_DEBUGGER 9960 .fnend 9961#endif 9962 9963 9964 /* 9965 * Common code for method invocation, calling through "glue code". 9966 * 9967 * TODO: now that we have range and non-range invoke handlers, this 9968 * needs to be split into two. Maybe just create entry points 9969 * that set r9 and jump here? 9970 * 9971 * On entry: 9972 * r0 is "Method* methodToCall", the method we're trying to call 9973 * r9 is "bool methodCallRange", indicating if this is a /range variant 9974 */ 9975 .if 0 9976.LinvokeOld: 9977 sub sp, sp, #8 @ space for args + pad 9978 FETCH(ip, 2) @ ip<- FEDC or CCCC 9979 mov r2, r0 @ A2<- methodToCall 9980 mov r0, rGLUE @ A0<- glue 9981 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9982 mov r1, r9 @ A1<- methodCallRange 9983 mov r3, rINST, lsr #8 @ A3<- AA 9984 str ip, [sp, #0] @ A4<- ip 9985 bl dvmMterp_invokeMethod @ call the C invokeMethod 9986 add sp, sp, #8 @ remove arg area 9987 b common_resumeAfterGlueCall @ continue to next instruction 9988 .endif 9989 9990 9991 9992/* 9993 * Common code for handling a return instruction. 9994 * 9995 * This does not return. 9996 */ 9997common_returnFromMethod: 9998.LreturnNew: 9999 mov r0, #kInterpEntryReturn 10000 mov r9, #0 10001 bl common_periodicChecks 10002 10003 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10004 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10005 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10006 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10007 @ r2<- method we're returning to 10008 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10009 cmp r2, #0 @ is this a break frame? 10010 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10011 mov r1, #0 @ "want switch" = false 10012 beq common_gotoBail @ break frame, bail out completely 10013 10014 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10015 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10016 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10017 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10018#if defined(WITH_JIT) 10019 ldr r3, [r0, #offStackSaveArea_returnAddr] @ r3 = saveArea->returnAddr 10020 GET_JIT_PROF_TABLE(r0) 10021 mov rPC, r9 @ publish new rPC 10022 str r1, [rGLUE, #offGlue_methodClassDex] 10023 cmp r3, #0 @ caller is compiled code 10024 blxne r3 10025 GET_INST_OPCODE(ip) @ extract opcode from rINST 10026 cmp r0,#0 10027 bne common_updateProfile 10028 GOTO_OPCODE(ip) @ jump to next instruction 10029#else 10030 GET_INST_OPCODE(ip) @ extract opcode from rINST 10031 mov rPC, r9 @ publish new rPC 10032 str r1, [rGLUE, #offGlue_methodClassDex] 10033 GOTO_OPCODE(ip) @ jump to next instruction 10034#endif 10035 10036 /* 10037 * Return handling, calls through "glue code". 10038 */ 10039 .if 0 10040.LreturnOld: 10041 SAVE_PC_FP_TO_GLUE() @ export state 10042 mov r0, rGLUE @ arg to function 10043 bl dvmMterp_returnFromMethod 10044 b common_resumeAfterGlueCall 10045 .endif 10046 10047 10048/* 10049 * Somebody has thrown an exception. Handle it. 10050 * 10051 * If the exception processing code returns to us (instead of falling 10052 * out of the interpreter), continue with whatever the next instruction 10053 * now happens to be. 10054 * 10055 * This does not return. 10056 */ 10057 .global dvmMterpCommonExceptionThrown 10058dvmMterpCommonExceptionThrown: 10059common_exceptionThrown: 10060.LexceptionNew: 10061 mov r0, #kInterpEntryThrow 10062 mov r9, #0 10063 bl common_periodicChecks 10064 10065#if defined(WITH_JIT) 10066 mov r2,#kJitTSelectAbort @ abandon trace selection in progress 10067 str r2,[rGLUE,#offGlue_jitState] 10068#endif 10069 10070 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10071 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10072 mov r1, r10 @ r1<- self 10073 mov r0, r9 @ r0<- exception 10074 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10075 mov r3, #0 @ r3<- NULL 10076 str r3, [r10, #offThread_exception] @ self->exception = NULL 10077 10078 /* set up args and a local for "&fp" */ 10079 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10080 str rFP, [sp, #-4]! @ *--sp = fp 10081 mov ip, sp @ ip<- &fp 10082 mov r3, #0 @ r3<- false 10083 str ip, [sp, #-4]! @ *--sp = &fp 10084 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10085 mov r0, r10 @ r0<- self 10086 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10087 mov r2, r9 @ r2<- exception 10088 sub r1, rPC, r1 @ r1<- pc - method->insns 10089 mov r1, r1, asr #1 @ r1<- offset in code units 10090 10091 /* call, r0 gets catchRelPc (a code-unit offset) */ 10092 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10093 10094 /* fix earlier stack overflow if necessary; may trash rFP */ 10095 ldrb r1, [r10, #offThread_stackOverflowed] 10096 cmp r1, #0 @ did we overflow earlier? 10097 beq 1f @ no, skip ahead 10098 mov rFP, r0 @ save relPc result in rFP 10099 mov r0, r10 @ r0<- self 10100 bl dvmCleanupStackOverflow @ call(self) 10101 mov r0, rFP @ restore result 101021: 10103 10104 /* update frame pointer and check result from dvmFindCatchBlock */ 10105 ldr rFP, [sp, #4] @ retrieve the updated rFP 10106 cmp r0, #0 @ is catchRelPc < 0? 10107 add sp, sp, #8 @ restore stack 10108 bmi .LnotCaughtLocally 10109 10110 /* adjust locals to match self->curFrame and updated PC */ 10111 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10112 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10113 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10114 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10115 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10116 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10117 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10118 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10119 10120 /* release the tracked alloc on the exception */ 10121 mov r0, r9 @ r0<- exception 10122 mov r1, r10 @ r1<- self 10123 bl dvmReleaseTrackedAlloc @ release the exception 10124 10125 /* restore the exception if the handler wants it */ 10126 FETCH_INST() @ load rINST from rPC 10127 GET_INST_OPCODE(ip) @ extract opcode from rINST 10128 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10129 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10130 GOTO_OPCODE(ip) @ jump to next instruction 10131 10132.LnotCaughtLocally: @ r9=exception, r10=self 10133 /* fix stack overflow if necessary */ 10134 ldrb r1, [r10, #offThread_stackOverflowed] 10135 cmp r1, #0 @ did we overflow earlier? 10136 movne r0, r10 @ if yes: r0<- self 10137 blne dvmCleanupStackOverflow @ if yes: call(self) 10138 10139 @ may want to show "not caught locally" debug messages here 10140#if DVM_SHOW_EXCEPTION >= 2 10141 /* call __android_log_print(prio, tag, format, ...) */ 10142 /* "Exception %s from %s:%d not caught locally" */ 10143 @ dvmLineNumFromPC(method, pc - method->insns) 10144 ldr r0, [rGLUE, #offGlue_method] 10145 ldr r1, [r0, #offMethod_insns] 10146 sub r1, rPC, r1 10147 asr r1, r1, #1 10148 bl dvmLineNumFromPC 10149 str r0, [sp, #-4]! 10150 @ dvmGetMethodSourceFile(method) 10151 ldr r0, [rGLUE, #offGlue_method] 10152 bl dvmGetMethodSourceFile 10153 str r0, [sp, #-4]! 10154 @ exception->clazz->descriptor 10155 ldr r3, [r9, #offObject_clazz] 10156 ldr r3, [r3, #offClassObject_descriptor] 10157 @ 10158 ldr r2, strExceptionNotCaughtLocally 10159 ldr r1, strLogTag 10160 mov r0, #3 @ LOG_DEBUG 10161 bl __android_log_print 10162#endif 10163 str r9, [r10, #offThread_exception] @ restore exception 10164 mov r0, r9 @ r0<- exception 10165 mov r1, r10 @ r1<- self 10166 bl dvmReleaseTrackedAlloc @ release the exception 10167 mov r1, #0 @ "want switch" = false 10168 b common_gotoBail @ bail out 10169 10170 10171 /* 10172 * Exception handling, calls through "glue code". 10173 */ 10174 .if 0 10175.LexceptionOld: 10176 SAVE_PC_FP_TO_GLUE() @ export state 10177 mov r0, rGLUE @ arg to function 10178 bl dvmMterp_exceptionThrown 10179 b common_resumeAfterGlueCall 10180 .endif 10181 10182 10183/* 10184 * After returning from a "glued" function, pull out the updated 10185 * values and start executing at the next instruction. 10186 */ 10187common_resumeAfterGlueCall: 10188 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10189 FETCH_INST() @ load rINST from rPC 10190 GET_INST_OPCODE(ip) @ extract opcode from rINST 10191 GOTO_OPCODE(ip) @ jump to next instruction 10192 10193/* 10194 * Invalid array index. 10195 */ 10196common_errArrayIndex: 10197 EXPORT_PC() 10198 ldr r0, strArrayIndexException 10199 mov r1, #0 10200 bl dvmThrowException 10201 b common_exceptionThrown 10202 10203/* 10204 * Invalid array value. 10205 */ 10206common_errArrayStore: 10207 EXPORT_PC() 10208 ldr r0, strArrayStoreException 10209 mov r1, #0 10210 bl dvmThrowException 10211 b common_exceptionThrown 10212 10213/* 10214 * Integer divide or mod by zero. 10215 */ 10216common_errDivideByZero: 10217 EXPORT_PC() 10218 ldr r0, strArithmeticException 10219 ldr r1, strDivideByZero 10220 bl dvmThrowException 10221 b common_exceptionThrown 10222 10223/* 10224 * Attempt to allocate an array with a negative size. 10225 */ 10226common_errNegativeArraySize: 10227 EXPORT_PC() 10228 ldr r0, strNegativeArraySizeException 10229 mov r1, #0 10230 bl dvmThrowException 10231 b common_exceptionThrown 10232 10233/* 10234 * Invocation of a non-existent method. 10235 */ 10236common_errNoSuchMethod: 10237 EXPORT_PC() 10238 ldr r0, strNoSuchMethodError 10239 mov r1, #0 10240 bl dvmThrowException 10241 b common_exceptionThrown 10242 10243/* 10244 * We encountered a null object when we weren't expecting one. We 10245 * export the PC, throw a NullPointerException, and goto the exception 10246 * processing code. 10247 */ 10248common_errNullObject: 10249 EXPORT_PC() 10250 ldr r0, strNullPointerException 10251 mov r1, #0 10252 bl dvmThrowException 10253 b common_exceptionThrown 10254 10255/* 10256 * For debugging, cause an immediate fault. The source address will 10257 * be in lr (use a bl instruction to jump here). 10258 */ 10259common_abort: 10260 ldr pc, .LdeadFood 10261.LdeadFood: 10262 .word 0xdeadf00d 10263 10264/* 10265 * Spit out a "we were here", preserving all registers. (The attempt 10266 * to save ip won't work, but we need to save an even number of 10267 * registers for EABI 64-bit stack alignment.) 10268 */ 10269 .macro SQUEAK num 10270common_squeak\num: 10271 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10272 ldr r0, strSqueak 10273 mov r1, #\num 10274 bl printf 10275 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10276 bx lr 10277 .endm 10278 10279 SQUEAK 0 10280 SQUEAK 1 10281 SQUEAK 2 10282 SQUEAK 3 10283 SQUEAK 4 10284 SQUEAK 5 10285 10286/* 10287 * Spit out the number in r0, preserving registers. 10288 */ 10289common_printNum: 10290 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10291 mov r1, r0 10292 ldr r0, strSqueak 10293 bl printf 10294 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10295 bx lr 10296 10297/* 10298 * Print a newline, preserving registers. 10299 */ 10300common_printNewline: 10301 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10302 ldr r0, strNewline 10303 bl printf 10304 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10305 bx lr 10306 10307 /* 10308 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10309 */ 10310common_printHex: 10311 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10312 mov r1, r0 10313 ldr r0, strPrintHex 10314 bl printf 10315 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10316 bx lr 10317 10318/* 10319 * Print the 64-bit quantity in r0-r1, preserving registers. 10320 */ 10321common_printLong: 10322 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10323 mov r3, r1 10324 mov r2, r0 10325 ldr r0, strPrintLong 10326 bl printf 10327 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10328 bx lr 10329 10330/* 10331 * Print full method info. Pass the Method* in r0. Preserves regs. 10332 */ 10333common_printMethod: 10334 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10335 bl dvmMterpPrintMethod 10336 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10337 bx lr 10338 10339/* 10340 * Call a C helper function that dumps regs and possibly some 10341 * additional info. Requires the C function to be compiled in. 10342 */ 10343 .if 0 10344common_dumpRegs: 10345 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10346 bl dvmMterpDumpArmRegs 10347 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10348 bx lr 10349 .endif 10350 10351#if 0 10352/* 10353 * Experiment on VFP mode. 10354 * 10355 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10356 * 10357 * Updates the bits specified by "mask", setting them to the values in "val". 10358 */ 10359setFPSCR: 10360 and r0, r0, r1 @ make sure no stray bits are set 10361 fmrx r2, fpscr @ get VFP reg 10362 mvn r1, r1 @ bit-invert mask 10363 and r2, r2, r1 @ clear masked bits 10364 orr r2, r2, r0 @ set specified bits 10365 fmxr fpscr, r2 @ set VFP reg 10366 mov r0, r2 @ return new value 10367 bx lr 10368 10369 .align 2 10370 .global dvmConfigureFP 10371 .type dvmConfigureFP, %function 10372dvmConfigureFP: 10373 stmfd sp!, {ip, lr} 10374 /* 0x03000000 sets DN/FZ */ 10375 /* 0x00009f00 clears the six exception enable flags */ 10376 bl common_squeak0 10377 mov r0, #0x03000000 @ r0<- 0x03000000 10378 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10379 bl setFPSCR 10380 ldmfd sp!, {ip, pc} 10381#endif 10382 10383 10384/* 10385 * String references, must be close to the code that uses them. 10386 */ 10387 .align 2 10388strArithmeticException: 10389 .word .LstrArithmeticException 10390strArrayIndexException: 10391 .word .LstrArrayIndexException 10392strArrayStoreException: 10393 .word .LstrArrayStoreException 10394strDivideByZero: 10395 .word .LstrDivideByZero 10396strNegativeArraySizeException: 10397 .word .LstrNegativeArraySizeException 10398strNoSuchMethodError: 10399 .word .LstrNoSuchMethodError 10400strNullPointerException: 10401 .word .LstrNullPointerException 10402 10403strLogTag: 10404 .word .LstrLogTag 10405strExceptionNotCaughtLocally: 10406 .word .LstrExceptionNotCaughtLocally 10407 10408strNewline: 10409 .word .LstrNewline 10410strSqueak: 10411 .word .LstrSqueak 10412strPrintHex: 10413 .word .LstrPrintHex 10414strPrintLong: 10415 .word .LstrPrintLong 10416 10417/* 10418 * Zero-terminated ASCII string data. 10419 * 10420 * On ARM we have two choices: do like gcc does, and LDR from a .word 10421 * with the address, or use an ADR pseudo-op to get the address 10422 * directly. ADR saves 4 bytes and an indirection, but it's using a 10423 * PC-relative addressing mode and hence has a limited range, which 10424 * makes it not work well with mergeable string sections. 10425 */ 10426 .section .rodata.str1.4,"aMS",%progbits,1 10427 10428.LstrBadEntryPoint: 10429 .asciz "Bad entry point %d\n" 10430.LstrArithmeticException: 10431 .asciz "Ljava/lang/ArithmeticException;" 10432.LstrArrayIndexException: 10433 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10434.LstrArrayStoreException: 10435 .asciz "Ljava/lang/ArrayStoreException;" 10436.LstrClassCastException: 10437 .asciz "Ljava/lang/ClassCastException;" 10438.LstrDivideByZero: 10439 .asciz "divide by zero" 10440.LstrFilledNewArrayNotImpl: 10441 .asciz "filled-new-array only implemented for objects and 'int'" 10442.LstrInternalError: 10443 .asciz "Ljava/lang/InternalError;" 10444.LstrInstantiationError: 10445 .asciz "Ljava/lang/InstantiationError;" 10446.LstrNegativeArraySizeException: 10447 .asciz "Ljava/lang/NegativeArraySizeException;" 10448.LstrNoSuchMethodError: 10449 .asciz "Ljava/lang/NoSuchMethodError;" 10450.LstrNullPointerException: 10451 .asciz "Ljava/lang/NullPointerException;" 10452 10453.LstrLogTag: 10454 .asciz "mterp" 10455.LstrExceptionNotCaughtLocally: 10456 .asciz "Exception %s from %s:%d not caught locally\n" 10457 10458.LstrNewline: 10459 .asciz "\n" 10460.LstrSqueak: 10461 .asciz "<%d>" 10462.LstrPrintHex: 10463 .asciz "<0x%x>" 10464.LstrPrintLong: 10465 .asciz "<%lld>" 10466 10467 10468