InterpAsm-armv5te.S revision d5adae17d71e86a1a5f3ae7825054e3249fb7879
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204#if defined(WITH_JIT) 205#include "../common/jit-config.h" 206#endif 207 208/* File: armv5te/platform.S */ 209/* 210 * =========================================================================== 211 * CPU-version-specific defines 212 * =========================================================================== 213 */ 214 215/* 216 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 217 * one-way branch. 218 * 219 * May modify IP. Does not modify LR. 220 */ 221.macro LDR_PC source 222 ldr pc, \source 223.endm 224 225/* 226 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 227 * Jump to subroutine. 228 * 229 * May modify IP and LR. 230 */ 231.macro LDR_PC_LR source 232 mov lr, pc 233 ldr pc, \source 234.endm 235 236/* 237 * Macro for "LDMFD SP!, {...regs...,PC}". 238 * 239 * May modify IP and LR. 240 */ 241.macro LDMFD_PC regs 242 ldmfd sp!, {\regs,pc} 243.endm 244 245 246/* File: armv5te/entry.S */ 247/* 248 * Copyright (C) 2008 The Android Open Source Project 249 * 250 * Licensed under the Apache License, Version 2.0 (the "License"); 251 * you may not use this file except in compliance with the License. 252 * You may obtain a copy of the License at 253 * 254 * http://www.apache.org/licenses/LICENSE-2.0 255 * 256 * Unless required by applicable law or agreed to in writing, software 257 * distributed under the License is distributed on an "AS IS" BASIS, 258 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 259 * See the License for the specific language governing permissions and 260 * limitations under the License. 261 */ 262/* 263 * Interpreter entry point. 264 */ 265 266/* 267 * We don't have formal stack frames, so gdb scans upward in the code 268 * to find the start of the function (a label with the %function type), 269 * and then looks at the next few instructions to figure out what 270 * got pushed onto the stack. From this it figures out how to restore 271 * the registers, including PC, for the previous stack frame. If gdb 272 * sees a non-function label, it stops scanning, so either we need to 273 * have nothing but assembler-local labels between the entry point and 274 * the break, or we need to fake it out. 275 * 276 * When this is defined, we add some stuff to make gdb less confused. 277 */ 278#define ASSIST_DEBUGGER 1 279 280 .text 281 .align 2 282 .global dvmMterpStdRun 283 .type dvmMterpStdRun, %function 284 285/* 286 * On entry: 287 * r0 MterpGlue* glue 288 * 289 * This function returns a boolean "changeInterp" value. The return comes 290 * via a call to dvmMterpStdBail(). 291 */ 292dvmMterpStdRun: 293#define MTERP_ENTRY1 \ 294 .save {r4-r10,fp,lr}; \ 295 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 296#define MTERP_ENTRY2 \ 297 .pad #4; \ 298 sub sp, sp, #4 @ align 64 299 300 .fnstart 301 MTERP_ENTRY1 302 MTERP_ENTRY2 303 304 /* save stack pointer, add magic word for debuggerd */ 305 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 306 307 /* set up "named" registers, figure out entry point */ 308 mov rGLUE, r0 @ set rGLUE 309 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 310 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 311 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 312 cmp r1, #kInterpEntryInstr @ usual case? 313 bne .Lnot_instr @ no, handle it 314 315#if defined(WITH_JIT) 316.LentryInstr: 317 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 318 /* Entry is always a possible trace start */ 319 GET_JIT_PROF_TABLE(r0) 320 FETCH_INST() 321 mov r1, #0 @ prepare the value for the new state 322 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 323 cmp r0,#0 324 bne common_updateProfile 325 GET_INST_OPCODE(ip) 326 GOTO_OPCODE(ip) 327#else 328 /* start executing the instruction at rPC */ 329 FETCH_INST() @ load rINST from rPC 330 GET_INST_OPCODE(ip) @ extract opcode from rINST 331 GOTO_OPCODE(ip) @ jump to next instruction 332#endif 333 334.Lnot_instr: 335 cmp r1, #kInterpEntryReturn @ were we returning from a method? 336 beq common_returnFromMethod 337 338.Lnot_return: 339 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 340 beq common_exceptionThrown 341 342#if defined(WITH_JIT) 343.Lnot_throw: 344 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 345 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 346 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 347 bne .Lbad_arg 348 cmp rPC,r2 349 bne .LentryInstr @ must have branched, don't resume 350#if defined(WITH_SELF_VERIFICATION) 351 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 352 b jitSVShadowRunStart @ re-enter the translation after the 353 @ single-stepped instruction 354 @noreturn 355#endif 356 mov r1, #kInterpEntryInstr 357 str r1, [rGLUE, #offGlue_entryPoint] 358 bx r10 @ re-enter the translation 359#endif 360 361.Lbad_arg: 362 ldr r0, strBadEntryPoint 363 @ r1 holds value of entryPoint 364 bl printf 365 bl dvmAbort 366 .fnend 367 368 369 .global dvmMterpStdBail 370 .type dvmMterpStdBail, %function 371 372/* 373 * Restore the stack pointer and PC from the save point established on entry. 374 * This is essentially the same as a longjmp, but should be cheaper. The 375 * last instruction causes us to return to whoever called dvmMterpStdRun. 376 * 377 * We pushed some registers on the stack in dvmMterpStdRun, then saved 378 * SP and LR. Here we restore SP, restore the registers, and then restore 379 * LR to PC. 380 * 381 * On entry: 382 * r0 MterpGlue* glue 383 * r1 bool changeInterp 384 */ 385dvmMterpStdBail: 386 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 387 mov r0, r1 @ return the changeInterp value 388 add sp, sp, #4 @ un-align 64 389 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 390 391 392/* 393 * String references. 394 */ 395strBadEntryPoint: 396 .word .LstrBadEntryPoint 397 398 399 400 .global dvmAsmInstructionStart 401 .type dvmAsmInstructionStart, %function 402dvmAsmInstructionStart = .L_OP_NOP 403 .text 404 405/* ------------------------------ */ 406 .balign 64 407.L_OP_NOP: /* 0x00 */ 408/* File: armv5te/OP_NOP.S */ 409 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 410 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 411 GOTO_OPCODE(ip) @ execute it 412 413#ifdef ASSIST_DEBUGGER 414 /* insert fake function header to help gdb find the stack frame */ 415 .type dalvik_inst, %function 416dalvik_inst: 417 .fnstart 418 MTERP_ENTRY1 419 MTERP_ENTRY2 420 .fnend 421#endif 422 423 424/* ------------------------------ */ 425 .balign 64 426.L_OP_MOVE: /* 0x01 */ 427/* File: armv5te/OP_MOVE.S */ 428 /* for move, move-object, long-to-int */ 429 /* op vA, vB */ 430 mov r1, rINST, lsr #12 @ r1<- B from 15:12 431 mov r0, rINST, lsr #8 @ r0<- A from 11:8 432 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 433 GET_VREG(r2, r1) @ r2<- fp[B] 434 and r0, r0, #15 435 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 436 SET_VREG(r2, r0) @ fp[A]<- r2 437 GOTO_OPCODE(ip) @ execute next instruction 438 439 440/* ------------------------------ */ 441 .balign 64 442.L_OP_MOVE_FROM16: /* 0x02 */ 443/* File: armv5te/OP_MOVE_FROM16.S */ 444 /* for: move/from16, move-object/from16 */ 445 /* op vAA, vBBBB */ 446 FETCH(r1, 1) @ r1<- BBBB 447 mov r0, rINST, lsr #8 @ r0<- AA 448 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 449 GET_VREG(r2, r1) @ r2<- fp[BBBB] 450 GET_INST_OPCODE(ip) @ extract opcode from rINST 451 SET_VREG(r2, r0) @ fp[AA]<- r2 452 GOTO_OPCODE(ip) @ jump to next instruction 453 454 455/* ------------------------------ */ 456 .balign 64 457.L_OP_MOVE_16: /* 0x03 */ 458/* File: armv5te/OP_MOVE_16.S */ 459 /* for: move/16, move-object/16 */ 460 /* op vAAAA, vBBBB */ 461 FETCH(r1, 2) @ r1<- BBBB 462 FETCH(r0, 1) @ r0<- AAAA 463 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 464 GET_VREG(r2, r1) @ r2<- fp[BBBB] 465 GET_INST_OPCODE(ip) @ extract opcode from rINST 466 SET_VREG(r2, r0) @ fp[AAAA]<- r2 467 GOTO_OPCODE(ip) @ jump to next instruction 468 469 470/* ------------------------------ */ 471 .balign 64 472.L_OP_MOVE_WIDE: /* 0x04 */ 473/* File: armv5te/OP_MOVE_WIDE.S */ 474 /* move-wide vA, vB */ 475 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 476 mov r2, rINST, lsr #8 @ r2<- A(+) 477 mov r3, rINST, lsr #12 @ r3<- B 478 and r2, r2, #15 479 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 480 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 481 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 482 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 483 GET_INST_OPCODE(ip) @ extract opcode from rINST 484 stmia r2, {r0-r1} @ fp[A]<- r0/r1 485 GOTO_OPCODE(ip) @ jump to next instruction 486 487 488/* ------------------------------ */ 489 .balign 64 490.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 491/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 492 /* move-wide/from16 vAA, vBBBB */ 493 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 494 FETCH(r3, 1) @ r3<- BBBB 495 mov r2, rINST, lsr #8 @ r2<- AA 496 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 497 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 498 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 500 GET_INST_OPCODE(ip) @ extract opcode from rINST 501 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 502 GOTO_OPCODE(ip) @ jump to next instruction 503 504 505/* ------------------------------ */ 506 .balign 64 507.L_OP_MOVE_WIDE_16: /* 0x06 */ 508/* File: armv5te/OP_MOVE_WIDE_16.S */ 509 /* move-wide/16 vAAAA, vBBBB */ 510 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 511 FETCH(r3, 2) @ r3<- BBBB 512 FETCH(r2, 1) @ r2<- AAAA 513 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 514 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 515 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 516 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 517 GET_INST_OPCODE(ip) @ extract opcode from rINST 518 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 519 GOTO_OPCODE(ip) @ jump to next instruction 520 521 522/* ------------------------------ */ 523 .balign 64 524.L_OP_MOVE_OBJECT: /* 0x07 */ 525/* File: armv5te/OP_MOVE_OBJECT.S */ 526/* File: armv5te/OP_MOVE.S */ 527 /* for move, move-object, long-to-int */ 528 /* op vA, vB */ 529 mov r1, rINST, lsr #12 @ r1<- B from 15:12 530 mov r0, rINST, lsr #8 @ r0<- A from 11:8 531 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 532 GET_VREG(r2, r1) @ r2<- fp[B] 533 and r0, r0, #15 534 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 535 SET_VREG(r2, r0) @ fp[A]<- r2 536 GOTO_OPCODE(ip) @ execute next instruction 537 538 539 540/* ------------------------------ */ 541 .balign 64 542.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 543/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 544/* File: armv5te/OP_MOVE_FROM16.S */ 545 /* for: move/from16, move-object/from16 */ 546 /* op vAA, vBBBB */ 547 FETCH(r1, 1) @ r1<- BBBB 548 mov r0, rINST, lsr #8 @ r0<- AA 549 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 550 GET_VREG(r2, r1) @ r2<- fp[BBBB] 551 GET_INST_OPCODE(ip) @ extract opcode from rINST 552 SET_VREG(r2, r0) @ fp[AA]<- r2 553 GOTO_OPCODE(ip) @ jump to next instruction 554 555 556 557/* ------------------------------ */ 558 .balign 64 559.L_OP_MOVE_OBJECT_16: /* 0x09 */ 560/* File: armv5te/OP_MOVE_OBJECT_16.S */ 561/* File: armv5te/OP_MOVE_16.S */ 562 /* for: move/16, move-object/16 */ 563 /* op vAAAA, vBBBB */ 564 FETCH(r1, 2) @ r1<- BBBB 565 FETCH(r0, 1) @ r0<- AAAA 566 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 567 GET_VREG(r2, r1) @ r2<- fp[BBBB] 568 GET_INST_OPCODE(ip) @ extract opcode from rINST 569 SET_VREG(r2, r0) @ fp[AAAA]<- r2 570 GOTO_OPCODE(ip) @ jump to next instruction 571 572 573 574/* ------------------------------ */ 575 .balign 64 576.L_OP_MOVE_RESULT: /* 0x0a */ 577/* File: armv5te/OP_MOVE_RESULT.S */ 578 /* for: move-result, move-result-object */ 579 /* op vAA */ 580 mov r2, rINST, lsr #8 @ r2<- AA 581 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 582 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 583 GET_INST_OPCODE(ip) @ extract opcode from rINST 584 SET_VREG(r0, r2) @ fp[AA]<- r0 585 GOTO_OPCODE(ip) @ jump to next instruction 586 587 588/* ------------------------------ */ 589 .balign 64 590.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 591/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 592 /* move-result-wide vAA */ 593 mov r2, rINST, lsr #8 @ r2<- AA 594 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 595 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 596 ldmia r3, {r0-r1} @ r0/r1<- retval.j 597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 598 GET_INST_OPCODE(ip) @ extract opcode from rINST 599 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 600 GOTO_OPCODE(ip) @ jump to next instruction 601 602 603/* ------------------------------ */ 604 .balign 64 605.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 606/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 607/* File: armv5te/OP_MOVE_RESULT.S */ 608 /* for: move-result, move-result-object */ 609 /* op vAA */ 610 mov r2, rINST, lsr #8 @ r2<- AA 611 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 612 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 613 GET_INST_OPCODE(ip) @ extract opcode from rINST 614 SET_VREG(r0, r2) @ fp[AA]<- r0 615 GOTO_OPCODE(ip) @ jump to next instruction 616 617 618 619/* ------------------------------ */ 620 .balign 64 621.L_OP_MOVE_EXCEPTION: /* 0x0d */ 622/* File: armv5te/OP_MOVE_EXCEPTION.S */ 623 /* move-exception vAA */ 624 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 625 mov r2, rINST, lsr #8 @ r2<- AA 626 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 627 mov r1, #0 @ r1<- 0 628 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 629 SET_VREG(r3, r2) @ fp[AA]<- exception obj 630 GET_INST_OPCODE(ip) @ extract opcode from rINST 631 str r1, [r0, #offThread_exception] @ dvmClearException bypass 632 GOTO_OPCODE(ip) @ jump to next instruction 633 634 635/* ------------------------------ */ 636 .balign 64 637.L_OP_RETURN_VOID: /* 0x0e */ 638/* File: armv5te/OP_RETURN_VOID.S */ 639 b common_returnFromMethod 640 641 642/* ------------------------------ */ 643 .balign 64 644.L_OP_RETURN: /* 0x0f */ 645/* File: armv5te/OP_RETURN.S */ 646 /* 647 * Return a 32-bit value. Copies the return value into the "glue" 648 * structure, then jumps to the return handler. 649 * 650 * for: return, return-object 651 */ 652 /* op vAA */ 653 mov r2, rINST, lsr #8 @ r2<- AA 654 GET_VREG(r0, r2) @ r0<- vAA 655 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 656 b common_returnFromMethod 657 658 659/* ------------------------------ */ 660 .balign 64 661.L_OP_RETURN_WIDE: /* 0x10 */ 662/* File: armv5te/OP_RETURN_WIDE.S */ 663 /* 664 * Return a 64-bit value. Copies the return value into the "glue" 665 * structure, then jumps to the return handler. 666 */ 667 /* return-wide vAA */ 668 mov r2, rINST, lsr #8 @ r2<- AA 669 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 670 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 671 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 672 stmia r3, {r0-r1} @ retval<- r0/r1 673 b common_returnFromMethod 674 675 676/* ------------------------------ */ 677 .balign 64 678.L_OP_RETURN_OBJECT: /* 0x11 */ 679/* File: armv5te/OP_RETURN_OBJECT.S */ 680/* File: armv5te/OP_RETURN.S */ 681 /* 682 * Return a 32-bit value. Copies the return value into the "glue" 683 * structure, then jumps to the return handler. 684 * 685 * for: return, return-object 686 */ 687 /* op vAA */ 688 mov r2, rINST, lsr #8 @ r2<- AA 689 GET_VREG(r0, r2) @ r0<- vAA 690 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 691 b common_returnFromMethod 692 693 694 695/* ------------------------------ */ 696 .balign 64 697.L_OP_CONST_4: /* 0x12 */ 698/* File: armv5te/OP_CONST_4.S */ 699 /* const/4 vA, #+B */ 700 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 701 mov r0, rINST, lsr #8 @ r0<- A+ 702 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 703 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 704 and r0, r0, #15 705 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 706 SET_VREG(r1, r0) @ fp[A]<- r1 707 GOTO_OPCODE(ip) @ execute next instruction 708 709 710/* ------------------------------ */ 711 .balign 64 712.L_OP_CONST_16: /* 0x13 */ 713/* File: armv5te/OP_CONST_16.S */ 714 /* const/16 vAA, #+BBBB */ 715 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 716 mov r3, rINST, lsr #8 @ r3<- AA 717 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 718 SET_VREG(r0, r3) @ vAA<- r0 719 GET_INST_OPCODE(ip) @ extract opcode from rINST 720 GOTO_OPCODE(ip) @ jump to next instruction 721 722 723/* ------------------------------ */ 724 .balign 64 725.L_OP_CONST: /* 0x14 */ 726/* File: armv5te/OP_CONST.S */ 727 /* const vAA, #+BBBBbbbb */ 728 mov r3, rINST, lsr #8 @ r3<- AA 729 FETCH(r0, 1) @ r0<- bbbb (low) 730 FETCH(r1, 2) @ r1<- BBBB (high) 731 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 732 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 733 GET_INST_OPCODE(ip) @ extract opcode from rINST 734 SET_VREG(r0, r3) @ vAA<- r0 735 GOTO_OPCODE(ip) @ jump to next instruction 736 737 738/* ------------------------------ */ 739 .balign 64 740.L_OP_CONST_HIGH16: /* 0x15 */ 741/* File: armv5te/OP_CONST_HIGH16.S */ 742 /* const/high16 vAA, #+BBBB0000 */ 743 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 744 mov r3, rINST, lsr #8 @ r3<- AA 745 mov r0, r0, lsl #16 @ r0<- BBBB0000 746 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 747 SET_VREG(r0, r3) @ vAA<- r0 748 GET_INST_OPCODE(ip) @ extract opcode from rINST 749 GOTO_OPCODE(ip) @ jump to next instruction 750 751 752/* ------------------------------ */ 753 .balign 64 754.L_OP_CONST_WIDE_16: /* 0x16 */ 755/* File: armv5te/OP_CONST_WIDE_16.S */ 756 /* const-wide/16 vAA, #+BBBB */ 757 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 758 mov r3, rINST, lsr #8 @ r3<- AA 759 mov r1, r0, asr #31 @ r1<- ssssssss 760 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 761 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 762 GET_INST_OPCODE(ip) @ extract opcode from rINST 763 stmia r3, {r0-r1} @ vAA<- r0/r1 764 GOTO_OPCODE(ip) @ jump to next instruction 765 766 767/* ------------------------------ */ 768 .balign 64 769.L_OP_CONST_WIDE_32: /* 0x17 */ 770/* File: armv5te/OP_CONST_WIDE_32.S */ 771 /* const-wide/32 vAA, #+BBBBbbbb */ 772 FETCH(r0, 1) @ r0<- 0000bbbb (low) 773 mov r3, rINST, lsr #8 @ r3<- AA 774 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 775 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 776 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 777 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 778 mov r1, r0, asr #31 @ r1<- ssssssss 779 GET_INST_OPCODE(ip) @ extract opcode from rINST 780 stmia r3, {r0-r1} @ vAA<- r0/r1 781 GOTO_OPCODE(ip) @ jump to next instruction 782 783 784/* ------------------------------ */ 785 .balign 64 786.L_OP_CONST_WIDE: /* 0x18 */ 787/* File: armv5te/OP_CONST_WIDE.S */ 788 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 789 FETCH(r0, 1) @ r0<- bbbb (low) 790 FETCH(r1, 2) @ r1<- BBBB (low middle) 791 FETCH(r2, 3) @ r2<- hhhh (high middle) 792 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 793 FETCH(r3, 4) @ r3<- HHHH (high) 794 mov r9, rINST, lsr #8 @ r9<- AA 795 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 796 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 797 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 798 GET_INST_OPCODE(ip) @ extract opcode from rINST 799 stmia r9, {r0-r1} @ vAA<- r0/r1 800 GOTO_OPCODE(ip) @ jump to next instruction 801 802 803/* ------------------------------ */ 804 .balign 64 805.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 806/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 807 /* const-wide/high16 vAA, #+BBBB000000000000 */ 808 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 809 mov r3, rINST, lsr #8 @ r3<- AA 810 mov r0, #0 @ r0<- 00000000 811 mov r1, r1, lsl #16 @ r1<- BBBB0000 812 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 813 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 814 GET_INST_OPCODE(ip) @ extract opcode from rINST 815 stmia r3, {r0-r1} @ vAA<- r0/r1 816 GOTO_OPCODE(ip) @ jump to next instruction 817 818 819/* ------------------------------ */ 820 .balign 64 821.L_OP_CONST_STRING: /* 0x1a */ 822/* File: armv5te/OP_CONST_STRING.S */ 823 /* const/string vAA, String@BBBB */ 824 FETCH(r1, 1) @ r1<- BBBB 825 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 826 mov r9, rINST, lsr #8 @ r9<- AA 827 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 828 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 829 cmp r0, #0 @ not yet resolved? 830 beq .LOP_CONST_STRING_resolve 831 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 832 GET_INST_OPCODE(ip) @ extract opcode from rINST 833 SET_VREG(r0, r9) @ vAA<- r0 834 GOTO_OPCODE(ip) @ jump to next instruction 835 836/* ------------------------------ */ 837 .balign 64 838.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 839/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 840 /* const/string vAA, String@BBBBBBBB */ 841 FETCH(r0, 1) @ r0<- bbbb (low) 842 FETCH(r1, 2) @ r1<- BBBB (high) 843 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 844 mov r9, rINST, lsr #8 @ r9<- AA 845 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 846 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 847 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 848 cmp r0, #0 849 beq .LOP_CONST_STRING_JUMBO_resolve 850 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 851 GET_INST_OPCODE(ip) @ extract opcode from rINST 852 SET_VREG(r0, r9) @ vAA<- r0 853 GOTO_OPCODE(ip) @ jump to next instruction 854 855/* ------------------------------ */ 856 .balign 64 857.L_OP_CONST_CLASS: /* 0x1c */ 858/* File: armv5te/OP_CONST_CLASS.S */ 859 /* const/class vAA, Class@BBBB */ 860 FETCH(r1, 1) @ r1<- BBBB 861 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 862 mov r9, rINST, lsr #8 @ r9<- AA 863 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 864 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 865 cmp r0, #0 @ not yet resolved? 866 beq .LOP_CONST_CLASS_resolve 867 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 868 GET_INST_OPCODE(ip) @ extract opcode from rINST 869 SET_VREG(r0, r9) @ vAA<- r0 870 GOTO_OPCODE(ip) @ jump to next instruction 871 872/* ------------------------------ */ 873 .balign 64 874.L_OP_MONITOR_ENTER: /* 0x1d */ 875/* File: armv5te/OP_MONITOR_ENTER.S */ 876 /* 877 * Synchronize on an object. 878 */ 879 /* monitor-enter vAA */ 880 mov r2, rINST, lsr #8 @ r2<- AA 881 GET_VREG(r1, r2) @ r1<- vAA (object) 882 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 883 cmp r1, #0 @ null object? 884 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 885 beq common_errNullObject @ null object, throw an exception 886 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 887 bl dvmLockObject @ call(self, obj) 888#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 889 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 890 ldr r1, [r0, #offThread_exception] @ check for exception 891 cmp r1, #0 892 bne common_exceptionThrown @ exception raised, bail out 893#endif 894 GET_INST_OPCODE(ip) @ extract opcode from rINST 895 GOTO_OPCODE(ip) @ jump to next instruction 896 897 898/* ------------------------------ */ 899 .balign 64 900.L_OP_MONITOR_EXIT: /* 0x1e */ 901/* File: armv5te/OP_MONITOR_EXIT.S */ 902 /* 903 * Unlock an object. 904 * 905 * Exceptions that occur when unlocking a monitor need to appear as 906 * if they happened at the following instruction. See the Dalvik 907 * instruction spec. 908 */ 909 /* monitor-exit vAA */ 910 mov r2, rINST, lsr #8 @ r2<- AA 911 EXPORT_PC() @ before fetch: export the PC 912 GET_VREG(r1, r2) @ r1<- vAA (object) 913 cmp r1, #0 @ null object? 914 beq 1f @ yes 915 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 916 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 917 cmp r0, #0 @ failed? 918 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 919 beq common_exceptionThrown @ yes, exception is pending 920 GET_INST_OPCODE(ip) @ extract opcode from rINST 921 GOTO_OPCODE(ip) @ jump to next instruction 9221: 923 FETCH_ADVANCE_INST(1) @ advance before throw 924 b common_errNullObject 925 926 927/* ------------------------------ */ 928 .balign 64 929.L_OP_CHECK_CAST: /* 0x1f */ 930/* File: armv5te/OP_CHECK_CAST.S */ 931 /* 932 * Check to see if a cast from one class to another is allowed. 933 */ 934 /* check-cast vAA, class@BBBB */ 935 mov r3, rINST, lsr #8 @ r3<- AA 936 FETCH(r2, 1) @ r2<- BBBB 937 GET_VREG(r9, r3) @ r9<- object 938 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 939 cmp r9, #0 @ is object null? 940 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 941 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 942 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 943 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 944 cmp r1, #0 @ have we resolved this before? 945 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 946.LOP_CHECK_CAST_resolved: 947 cmp r0, r1 @ same class (trivial success)? 948 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 949.LOP_CHECK_CAST_okay: 950 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 951 GET_INST_OPCODE(ip) @ extract opcode from rINST 952 GOTO_OPCODE(ip) @ jump to next instruction 953 954/* ------------------------------ */ 955 .balign 64 956.L_OP_INSTANCE_OF: /* 0x20 */ 957/* File: armv5te/OP_INSTANCE_OF.S */ 958 /* 959 * Check to see if an object reference is an instance of a class. 960 * 961 * Most common situation is a non-null object, being compared against 962 * an already-resolved class. 963 */ 964 /* instance-of vA, vB, class@CCCC */ 965 mov r3, rINST, lsr #12 @ r3<- B 966 mov r9, rINST, lsr #8 @ r9<- A+ 967 GET_VREG(r0, r3) @ r0<- vB (object) 968 and r9, r9, #15 @ r9<- A 969 cmp r0, #0 @ is object null? 970 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 971 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 972 FETCH(r3, 1) @ r3<- CCCC 973 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 974 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 975 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 976 cmp r1, #0 @ have we resolved this before? 977 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 978.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 979 cmp r0, r1 @ same class (trivial success)? 980 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 981 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 982 983/* ------------------------------ */ 984 .balign 64 985.L_OP_ARRAY_LENGTH: /* 0x21 */ 986/* File: armv5te/OP_ARRAY_LENGTH.S */ 987 /* 988 * Return the length of an array. 989 */ 990 mov r1, rINST, lsr #12 @ r1<- B 991 mov r2, rINST, lsr #8 @ r2<- A+ 992 GET_VREG(r0, r1) @ r0<- vB (object ref) 993 and r2, r2, #15 @ r2<- A 994 cmp r0, #0 @ is object null? 995 beq common_errNullObject @ yup, fail 996 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 997 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 998 GET_INST_OPCODE(ip) @ extract opcode from rINST 999 SET_VREG(r3, r2) @ vB<- length 1000 GOTO_OPCODE(ip) @ jump to next instruction 1001 1002 1003/* ------------------------------ */ 1004 .balign 64 1005.L_OP_NEW_INSTANCE: /* 0x22 */ 1006/* File: armv5te/OP_NEW_INSTANCE.S */ 1007 /* 1008 * Create a new instance of a class. 1009 */ 1010 /* new-instance vAA, class@BBBB */ 1011 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1012 FETCH(r1, 1) @ r1<- BBBB 1013 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1014 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1015 EXPORT_PC() @ req'd for init, resolve, alloc 1016 cmp r0, #0 @ already resolved? 1017 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1018.LOP_NEW_INSTANCE_resolved: @ r0=class 1019 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1020 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1021 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1022.LOP_NEW_INSTANCE_initialized: @ r0=class 1023 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1024 bl dvmAllocObject @ r0<- new object 1025 b .LOP_NEW_INSTANCE_finish @ continue 1026 1027/* ------------------------------ */ 1028 .balign 64 1029.L_OP_NEW_ARRAY: /* 0x23 */ 1030/* File: armv5te/OP_NEW_ARRAY.S */ 1031 /* 1032 * Allocate an array of objects, specified with the array class 1033 * and a count. 1034 * 1035 * The verifier guarantees that this is an array class, so we don't 1036 * check for it here. 1037 */ 1038 /* new-array vA, vB, class@CCCC */ 1039 mov r0, rINST, lsr #12 @ r0<- B 1040 FETCH(r2, 1) @ r2<- CCCC 1041 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1042 GET_VREG(r1, r0) @ r1<- vB (array length) 1043 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1044 cmp r1, #0 @ check length 1045 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1046 bmi common_errNegativeArraySize @ negative length, bail 1047 cmp r0, #0 @ already resolved? 1048 EXPORT_PC() @ req'd for resolve, alloc 1049 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1050 b .LOP_NEW_ARRAY_resolve @ do resolve now 1051 1052/* ------------------------------ */ 1053 .balign 64 1054.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1055/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1056 /* 1057 * Create a new array with elements filled from registers. 1058 * 1059 * for: filled-new-array, filled-new-array/range 1060 */ 1061 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1062 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1063 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1064 FETCH(r1, 1) @ r1<- BBBB 1065 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1066 EXPORT_PC() @ need for resolve and alloc 1067 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1068 mov r10, rINST, lsr #8 @ r10<- AA or BA 1069 cmp r0, #0 @ already resolved? 1070 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10718: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1072 mov r2, #0 @ r2<- false 1073 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1074 bl dvmResolveClass @ r0<- call(clazz, ref) 1075 cmp r0, #0 @ got null? 1076 beq common_exceptionThrown @ yes, handle exception 1077 b .LOP_FILLED_NEW_ARRAY_continue 1078 1079/* ------------------------------ */ 1080 .balign 64 1081.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1082/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1083/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1084 /* 1085 * Create a new array with elements filled from registers. 1086 * 1087 * for: filled-new-array, filled-new-array/range 1088 */ 1089 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1090 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1091 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1092 FETCH(r1, 1) @ r1<- BBBB 1093 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1094 EXPORT_PC() @ need for resolve and alloc 1095 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1096 mov r10, rINST, lsr #8 @ r10<- AA or BA 1097 cmp r0, #0 @ already resolved? 1098 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10998: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1100 mov r2, #0 @ r2<- false 1101 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1102 bl dvmResolveClass @ r0<- call(clazz, ref) 1103 cmp r0, #0 @ got null? 1104 beq common_exceptionThrown @ yes, handle exception 1105 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1106 1107 1108/* ------------------------------ */ 1109 .balign 64 1110.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1111/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1112 /* fill-array-data vAA, +BBBBBBBB */ 1113 FETCH(r0, 1) @ r0<- bbbb (lo) 1114 FETCH(r1, 2) @ r1<- BBBB (hi) 1115 mov r3, rINST, lsr #8 @ r3<- AA 1116 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1117 GET_VREG(r0, r3) @ r0<- vAA (array object) 1118 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1119 EXPORT_PC(); 1120 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1121 cmp r0, #0 @ 0 means an exception is thrown 1122 beq common_exceptionThrown @ has exception 1123 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1124 GET_INST_OPCODE(ip) @ extract opcode from rINST 1125 GOTO_OPCODE(ip) @ jump to next instruction 1126 1127/* ------------------------------ */ 1128 .balign 64 1129.L_OP_THROW: /* 0x27 */ 1130/* File: armv5te/OP_THROW.S */ 1131 /* 1132 * Throw an exception object in the current thread. 1133 */ 1134 /* throw vAA */ 1135 mov r2, rINST, lsr #8 @ r2<- AA 1136 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1137 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1138 cmp r1, #0 @ null object? 1139 beq common_errNullObject @ yes, throw an NPE instead 1140 @ bypass dvmSetException, just store it 1141 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1142 b common_exceptionThrown 1143 1144 1145/* ------------------------------ */ 1146 .balign 64 1147.L_OP_GOTO: /* 0x28 */ 1148/* File: armv5te/OP_GOTO.S */ 1149 /* 1150 * Unconditional branch, 8-bit offset. 1151 * 1152 * The branch distance is a signed code-unit offset, which we need to 1153 * double to get a byte offset. 1154 */ 1155 /* goto +AA */ 1156 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1157 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1158 mov r9, r9, lsl #1 @ r9<- byte offset 1159 bmi common_backwardBranch @ backward branch, do periodic checks 1160#if defined(WITH_JIT) 1161 GET_JIT_PROF_TABLE(r0) 1162 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1163 cmp r0,#0 1164 bne common_updateProfile 1165 GET_INST_OPCODE(ip) @ extract opcode from rINST 1166 GOTO_OPCODE(ip) @ jump to next instruction 1167#else 1168 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1169 GET_INST_OPCODE(ip) @ extract opcode from rINST 1170 GOTO_OPCODE(ip) @ jump to next instruction 1171#endif 1172 1173/* ------------------------------ */ 1174 .balign 64 1175.L_OP_GOTO_16: /* 0x29 */ 1176/* File: armv5te/OP_GOTO_16.S */ 1177 /* 1178 * Unconditional branch, 16-bit offset. 1179 * 1180 * The branch distance is a signed code-unit offset, which we need to 1181 * double to get a byte offset. 1182 */ 1183 /* goto/16 +AAAA */ 1184 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1185 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1186 bmi common_backwardBranch @ backward branch, do periodic checks 1187#if defined(WITH_JIT) 1188 GET_JIT_PROF_TABLE(r0) 1189 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1190 cmp r0,#0 1191 bne common_updateProfile 1192 GET_INST_OPCODE(ip) @ extract opcode from rINST 1193 GOTO_OPCODE(ip) @ jump to next instruction 1194#else 1195 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1196 GET_INST_OPCODE(ip) @ extract opcode from rINST 1197 GOTO_OPCODE(ip) @ jump to next instruction 1198#endif 1199 1200 1201/* ------------------------------ */ 1202 .balign 64 1203.L_OP_GOTO_32: /* 0x2a */ 1204/* File: armv5te/OP_GOTO_32.S */ 1205 /* 1206 * Unconditional branch, 32-bit offset. 1207 * 1208 * The branch distance is a signed code-unit offset, which we need to 1209 * double to get a byte offset. 1210 * 1211 * Unlike most opcodes, this one is allowed to branch to itself, so 1212 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1213 * instruction doesn't affect the V flag, so we need to clear it 1214 * explicitly. 1215 */ 1216 /* goto/32 +AAAAAAAA */ 1217 FETCH(r0, 1) @ r0<- aaaa (lo) 1218 FETCH(r1, 2) @ r1<- AAAA (hi) 1219 cmp ip, ip @ (clear V flag during stall) 1220 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1221 mov r9, r0, asl #1 @ r9<- byte offset 1222 ble common_backwardBranch @ backward branch, do periodic checks 1223#if defined(WITH_JIT) 1224 GET_JIT_PROF_TABLE(r0) 1225 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1226 cmp r0,#0 1227 bne common_updateProfile 1228 GET_INST_OPCODE(ip) @ extract opcode from rINST 1229 GOTO_OPCODE(ip) @ jump to next instruction 1230#else 1231 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1232 GET_INST_OPCODE(ip) @ extract opcode from rINST 1233 GOTO_OPCODE(ip) @ jump to next instruction 1234#endif 1235 1236/* ------------------------------ */ 1237 .balign 64 1238.L_OP_PACKED_SWITCH: /* 0x2b */ 1239/* File: armv5te/OP_PACKED_SWITCH.S */ 1240 /* 1241 * Handle a packed-switch or sparse-switch instruction. In both cases 1242 * we decode it and hand it off to a helper function. 1243 * 1244 * We don't really expect backward branches in a switch statement, but 1245 * they're perfectly legal, so we check for them here. 1246 * 1247 * for: packed-switch, sparse-switch 1248 */ 1249 /* op vAA, +BBBB */ 1250 FETCH(r0, 1) @ r0<- bbbb (lo) 1251 FETCH(r1, 2) @ r1<- BBBB (hi) 1252 mov r3, rINST, lsr #8 @ r3<- AA 1253 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1254 GET_VREG(r1, r3) @ r1<- vAA 1255 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1256 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1257 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1258 bmi common_backwardBranch @ backward branch, do periodic checks 1259 beq common_backwardBranch @ (want to use BLE but V is unknown) 1260#if defined(WITH_JIT) 1261 GET_JIT_PROF_TABLE(r0) 1262 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1263 cmp r0,#0 1264 bne common_updateProfile 1265 GET_INST_OPCODE(ip) @ extract opcode from rINST 1266 GOTO_OPCODE(ip) @ jump to next instruction 1267#else 1268 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1269 GET_INST_OPCODE(ip) @ extract opcode from rINST 1270 GOTO_OPCODE(ip) @ jump to next instruction 1271#endif 1272 1273 1274/* ------------------------------ */ 1275 .balign 64 1276.L_OP_SPARSE_SWITCH: /* 0x2c */ 1277/* File: armv5te/OP_SPARSE_SWITCH.S */ 1278/* File: armv5te/OP_PACKED_SWITCH.S */ 1279 /* 1280 * Handle a packed-switch or sparse-switch instruction. In both cases 1281 * we decode it and hand it off to a helper function. 1282 * 1283 * We don't really expect backward branches in a switch statement, but 1284 * they're perfectly legal, so we check for them here. 1285 * 1286 * for: packed-switch, sparse-switch 1287 */ 1288 /* op vAA, +BBBB */ 1289 FETCH(r0, 1) @ r0<- bbbb (lo) 1290 FETCH(r1, 2) @ r1<- BBBB (hi) 1291 mov r3, rINST, lsr #8 @ r3<- AA 1292 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1293 GET_VREG(r1, r3) @ r1<- vAA 1294 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1295 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1296 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1297 bmi common_backwardBranch @ backward branch, do periodic checks 1298 beq common_backwardBranch @ (want to use BLE but V is unknown) 1299#if defined(WITH_JIT) 1300 GET_JIT_PROF_TABLE(r0) 1301 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1302 cmp r0,#0 1303 bne common_updateProfile 1304 GET_INST_OPCODE(ip) @ extract opcode from rINST 1305 GOTO_OPCODE(ip) @ jump to next instruction 1306#else 1307 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1308 GET_INST_OPCODE(ip) @ extract opcode from rINST 1309 GOTO_OPCODE(ip) @ jump to next instruction 1310#endif 1311 1312 1313 1314/* ------------------------------ */ 1315 .balign 64 1316.L_OP_CMPL_FLOAT: /* 0x2d */ 1317/* File: armv5te/OP_CMPL_FLOAT.S */ 1318 /* 1319 * Compare two floating-point values. Puts 0, 1, or -1 into the 1320 * destination register based on the results of the comparison. 1321 * 1322 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1323 * on what value we'd like to return when one of the operands is NaN. 1324 * 1325 * The operation we're implementing is: 1326 * if (x == y) 1327 * return 0; 1328 * else if (x < y) 1329 * return -1; 1330 * else if (x > y) 1331 * return 1; 1332 * else 1333 * return {-1,1}; // one or both operands was NaN 1334 * 1335 * The straightforward implementation requires 3 calls to functions 1336 * that return a result in r0. We can do it with two calls if our 1337 * EABI library supports __aeabi_cfcmple (only one if we want to check 1338 * for NaN directly): 1339 * check x <= y 1340 * if <, return -1 1341 * if ==, return 0 1342 * check y <= x 1343 * if <, return 1 1344 * return {-1,1} 1345 * 1346 * for: cmpl-float, cmpg-float 1347 */ 1348 /* op vAA, vBB, vCC */ 1349 FETCH(r0, 1) @ r0<- CCBB 1350 and r2, r0, #255 @ r2<- BB 1351 mov r3, r0, lsr #8 @ r3<- CC 1352 GET_VREG(r9, r2) @ r9<- vBB 1353 GET_VREG(r10, r3) @ r10<- vCC 1354 mov r0, r9 @ copy to arg registers 1355 mov r1, r10 1356 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1357 bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1358 mvncc r1, #0 @ (less than) r1<- -1 1359 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1360.LOP_CMPL_FLOAT_finish: 1361 mov r3, rINST, lsr #8 @ r3<- AA 1362 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1363 SET_VREG(r1, r3) @ vAA<- r1 1364 GET_INST_OPCODE(ip) @ extract opcode from rINST 1365 GOTO_OPCODE(ip) @ jump to next instruction 1366 1367/* ------------------------------ */ 1368 .balign 64 1369.L_OP_CMPG_FLOAT: /* 0x2e */ 1370/* File: armv5te/OP_CMPG_FLOAT.S */ 1371/* File: armv5te/OP_CMPL_FLOAT.S */ 1372 /* 1373 * Compare two floating-point values. Puts 0, 1, or -1 into the 1374 * destination register based on the results of the comparison. 1375 * 1376 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1377 * on what value we'd like to return when one of the operands is NaN. 1378 * 1379 * The operation we're implementing is: 1380 * if (x == y) 1381 * return 0; 1382 * else if (x < y) 1383 * return -1; 1384 * else if (x > y) 1385 * return 1; 1386 * else 1387 * return {-1,1}; // one or both operands was NaN 1388 * 1389 * The straightforward implementation requires 3 calls to functions 1390 * that return a result in r0. We can do it with two calls if our 1391 * EABI library supports __aeabi_cfcmple (only one if we want to check 1392 * for NaN directly): 1393 * check x <= y 1394 * if <, return -1 1395 * if ==, return 0 1396 * check y <= x 1397 * if <, return 1 1398 * return {-1,1} 1399 * 1400 * for: cmpl-float, cmpg-float 1401 */ 1402 /* op vAA, vBB, vCC */ 1403 FETCH(r0, 1) @ r0<- CCBB 1404 and r2, r0, #255 @ r2<- BB 1405 mov r3, r0, lsr #8 @ r3<- CC 1406 GET_VREG(r9, r2) @ r9<- vBB 1407 GET_VREG(r10, r3) @ r10<- vCC 1408 mov r0, r9 @ copy to arg registers 1409 mov r1, r10 1410 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1411 bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1412 mvncc r1, #0 @ (less than) r1<- -1 1413 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1414.LOP_CMPG_FLOAT_finish: 1415 mov r3, rINST, lsr #8 @ r3<- AA 1416 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1417 SET_VREG(r1, r3) @ vAA<- r1 1418 GET_INST_OPCODE(ip) @ extract opcode from rINST 1419 GOTO_OPCODE(ip) @ jump to next instruction 1420 1421 1422/* ------------------------------ */ 1423 .balign 64 1424.L_OP_CMPL_DOUBLE: /* 0x2f */ 1425/* File: armv5te/OP_CMPL_DOUBLE.S */ 1426 /* 1427 * Compare two floating-point values. Puts 0, 1, or -1 into the 1428 * destination register based on the results of the comparison. 1429 * 1430 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1431 * on what value we'd like to return when one of the operands is NaN. 1432 * 1433 * See OP_CMPL_FLOAT for an explanation. 1434 * 1435 * For: cmpl-double, cmpg-double 1436 */ 1437 /* op vAA, vBB, vCC */ 1438 FETCH(r0, 1) @ r0<- CCBB 1439 and r9, r0, #255 @ r9<- BB 1440 mov r10, r0, lsr #8 @ r10<- CC 1441 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1442 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1443 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1444 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1445 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1446 bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1447 mvncc r1, #0 @ (less than) r1<- -1 1448 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1449.LOP_CMPL_DOUBLE_finish: 1450 mov r3, rINST, lsr #8 @ r3<- AA 1451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1452 SET_VREG(r1, r3) @ vAA<- r1 1453 GET_INST_OPCODE(ip) @ extract opcode from rINST 1454 GOTO_OPCODE(ip) @ jump to next instruction 1455 1456/* ------------------------------ */ 1457 .balign 64 1458.L_OP_CMPG_DOUBLE: /* 0x30 */ 1459/* File: armv5te/OP_CMPG_DOUBLE.S */ 1460/* File: armv5te/OP_CMPL_DOUBLE.S */ 1461 /* 1462 * Compare two floating-point values. Puts 0, 1, or -1 into the 1463 * destination register based on the results of the comparison. 1464 * 1465 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1466 * on what value we'd like to return when one of the operands is NaN. 1467 * 1468 * See OP_CMPL_FLOAT for an explanation. 1469 * 1470 * For: cmpl-double, cmpg-double 1471 */ 1472 /* op vAA, vBB, vCC */ 1473 FETCH(r0, 1) @ r0<- CCBB 1474 and r9, r0, #255 @ r9<- BB 1475 mov r10, r0, lsr #8 @ r10<- CC 1476 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1477 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1478 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1479 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1480 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1481 bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1482 mvncc r1, #0 @ (less than) r1<- -1 1483 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1484.LOP_CMPG_DOUBLE_finish: 1485 mov r3, rINST, lsr #8 @ r3<- AA 1486 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1487 SET_VREG(r1, r3) @ vAA<- r1 1488 GET_INST_OPCODE(ip) @ extract opcode from rINST 1489 GOTO_OPCODE(ip) @ jump to next instruction 1490 1491 1492/* ------------------------------ */ 1493 .balign 64 1494.L_OP_CMP_LONG: /* 0x31 */ 1495/* File: armv5te/OP_CMP_LONG.S */ 1496 /* 1497 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1498 * register based on the results of the comparison. 1499 * 1500 * We load the full values with LDM, but in practice many values could 1501 * be resolved by only looking at the high word. This could be made 1502 * faster or slower by splitting the LDM into a pair of LDRs. 1503 * 1504 * If we just wanted to set condition flags, we could do this: 1505 * subs ip, r0, r2 1506 * sbcs ip, r1, r3 1507 * subeqs ip, r0, r2 1508 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1509 * integer value, which we can do with 2 conditional mov/mvn instructions 1510 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1511 * us a constant 5-cycle path plus a branch at the end to the 1512 * instruction epilogue code. The multi-compare approach below needs 1513 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1514 * in the worst case (the 64-bit values are equal). 1515 */ 1516 /* cmp-long vAA, vBB, vCC */ 1517 FETCH(r0, 1) @ r0<- CCBB 1518 mov r9, rINST, lsr #8 @ r9<- AA 1519 and r2, r0, #255 @ r2<- BB 1520 mov r3, r0, lsr #8 @ r3<- CC 1521 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1522 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1523 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1524 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1525 cmp r1, r3 @ compare (vBB+1, vCC+1) 1526 blt .LOP_CMP_LONG_less @ signed compare on high part 1527 bgt .LOP_CMP_LONG_greater 1528 subs r1, r0, r2 @ r1<- r0 - r2 1529 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1530 bne .LOP_CMP_LONG_less 1531 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1532 1533/* ------------------------------ */ 1534 .balign 64 1535.L_OP_IF_EQ: /* 0x32 */ 1536/* File: armv5te/OP_IF_EQ.S */ 1537/* File: armv5te/bincmp.S */ 1538 /* 1539 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1540 * fragment that specifies the *reverse* comparison to perform, e.g. 1541 * for "if-le" you would use "gt". 1542 * 1543 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1544 */ 1545 /* if-cmp vA, vB, +CCCC */ 1546 mov r0, rINST, lsr #8 @ r0<- A+ 1547 mov r1, rINST, lsr #12 @ r1<- B 1548 and r0, r0, #15 1549 GET_VREG(r3, r1) @ r3<- vB 1550 GET_VREG(r2, r0) @ r2<- vA 1551 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1552 cmp r2, r3 @ compare (vA, vB) 1553 bne 1f @ branch to 1 if comparison failed 1554 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1555 movs r9, r9, asl #1 @ convert to bytes, check sign 1556 bmi common_backwardBranch @ yes, do periodic checks 15571: 1558#if defined(WITH_JIT) 1559 GET_JIT_PROF_TABLE(r0) 1560 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1561 b common_testUpdateProfile 1562#else 1563 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1564 GET_INST_OPCODE(ip) @ extract opcode from rINST 1565 GOTO_OPCODE(ip) @ jump to next instruction 1566#endif 1567 1568 1569 1570/* ------------------------------ */ 1571 .balign 64 1572.L_OP_IF_NE: /* 0x33 */ 1573/* File: armv5te/OP_IF_NE.S */ 1574/* File: armv5te/bincmp.S */ 1575 /* 1576 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1577 * fragment that specifies the *reverse* comparison to perform, e.g. 1578 * for "if-le" you would use "gt". 1579 * 1580 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1581 */ 1582 /* if-cmp vA, vB, +CCCC */ 1583 mov r0, rINST, lsr #8 @ r0<- A+ 1584 mov r1, rINST, lsr #12 @ r1<- B 1585 and r0, r0, #15 1586 GET_VREG(r3, r1) @ r3<- vB 1587 GET_VREG(r2, r0) @ r2<- vA 1588 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1589 cmp r2, r3 @ compare (vA, vB) 1590 beq 1f @ branch to 1 if comparison failed 1591 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1592 movs r9, r9, asl #1 @ convert to bytes, check sign 1593 bmi common_backwardBranch @ yes, do periodic checks 15941: 1595#if defined(WITH_JIT) 1596 GET_JIT_PROF_TABLE(r0) 1597 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1598 b common_testUpdateProfile 1599#else 1600 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1601 GET_INST_OPCODE(ip) @ extract opcode from rINST 1602 GOTO_OPCODE(ip) @ jump to next instruction 1603#endif 1604 1605 1606 1607/* ------------------------------ */ 1608 .balign 64 1609.L_OP_IF_LT: /* 0x34 */ 1610/* File: armv5te/OP_IF_LT.S */ 1611/* File: armv5te/bincmp.S */ 1612 /* 1613 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1614 * fragment that specifies the *reverse* comparison to perform, e.g. 1615 * for "if-le" you would use "gt". 1616 * 1617 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1618 */ 1619 /* if-cmp vA, vB, +CCCC */ 1620 mov r0, rINST, lsr #8 @ r0<- A+ 1621 mov r1, rINST, lsr #12 @ r1<- B 1622 and r0, r0, #15 1623 GET_VREG(r3, r1) @ r3<- vB 1624 GET_VREG(r2, r0) @ r2<- vA 1625 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1626 cmp r2, r3 @ compare (vA, vB) 1627 bge 1f @ branch to 1 if comparison failed 1628 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1629 movs r9, r9, asl #1 @ convert to bytes, check sign 1630 bmi common_backwardBranch @ yes, do periodic checks 16311: 1632#if defined(WITH_JIT) 1633 GET_JIT_PROF_TABLE(r0) 1634 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1635 b common_testUpdateProfile 1636#else 1637 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1638 GET_INST_OPCODE(ip) @ extract opcode from rINST 1639 GOTO_OPCODE(ip) @ jump to next instruction 1640#endif 1641 1642 1643 1644/* ------------------------------ */ 1645 .balign 64 1646.L_OP_IF_GE: /* 0x35 */ 1647/* File: armv5te/OP_IF_GE.S */ 1648/* File: armv5te/bincmp.S */ 1649 /* 1650 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1651 * fragment that specifies the *reverse* comparison to perform, e.g. 1652 * for "if-le" you would use "gt". 1653 * 1654 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1655 */ 1656 /* if-cmp vA, vB, +CCCC */ 1657 mov r0, rINST, lsr #8 @ r0<- A+ 1658 mov r1, rINST, lsr #12 @ r1<- B 1659 and r0, r0, #15 1660 GET_VREG(r3, r1) @ r3<- vB 1661 GET_VREG(r2, r0) @ r2<- vA 1662 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1663 cmp r2, r3 @ compare (vA, vB) 1664 blt 1f @ branch to 1 if comparison failed 1665 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1666 movs r9, r9, asl #1 @ convert to bytes, check sign 1667 bmi common_backwardBranch @ yes, do periodic checks 16681: 1669#if defined(WITH_JIT) 1670 GET_JIT_PROF_TABLE(r0) 1671 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1672 b common_testUpdateProfile 1673#else 1674 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1675 GET_INST_OPCODE(ip) @ extract opcode from rINST 1676 GOTO_OPCODE(ip) @ jump to next instruction 1677#endif 1678 1679 1680 1681/* ------------------------------ */ 1682 .balign 64 1683.L_OP_IF_GT: /* 0x36 */ 1684/* File: armv5te/OP_IF_GT.S */ 1685/* File: armv5te/bincmp.S */ 1686 /* 1687 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1688 * fragment that specifies the *reverse* comparison to perform, e.g. 1689 * for "if-le" you would use "gt". 1690 * 1691 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1692 */ 1693 /* if-cmp vA, vB, +CCCC */ 1694 mov r0, rINST, lsr #8 @ r0<- A+ 1695 mov r1, rINST, lsr #12 @ r1<- B 1696 and r0, r0, #15 1697 GET_VREG(r3, r1) @ r3<- vB 1698 GET_VREG(r2, r0) @ r2<- vA 1699 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1700 cmp r2, r3 @ compare (vA, vB) 1701 ble 1f @ branch to 1 if comparison failed 1702 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1703 movs r9, r9, asl #1 @ convert to bytes, check sign 1704 bmi common_backwardBranch @ yes, do periodic checks 17051: 1706#if defined(WITH_JIT) 1707 GET_JIT_PROF_TABLE(r0) 1708 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1709 b common_testUpdateProfile 1710#else 1711 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1712 GET_INST_OPCODE(ip) @ extract opcode from rINST 1713 GOTO_OPCODE(ip) @ jump to next instruction 1714#endif 1715 1716 1717 1718/* ------------------------------ */ 1719 .balign 64 1720.L_OP_IF_LE: /* 0x37 */ 1721/* File: armv5te/OP_IF_LE.S */ 1722/* File: armv5te/bincmp.S */ 1723 /* 1724 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1725 * fragment that specifies the *reverse* comparison to perform, e.g. 1726 * for "if-le" you would use "gt". 1727 * 1728 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1729 */ 1730 /* if-cmp vA, vB, +CCCC */ 1731 mov r0, rINST, lsr #8 @ r0<- A+ 1732 mov r1, rINST, lsr #12 @ r1<- B 1733 and r0, r0, #15 1734 GET_VREG(r3, r1) @ r3<- vB 1735 GET_VREG(r2, r0) @ r2<- vA 1736 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1737 cmp r2, r3 @ compare (vA, vB) 1738 bgt 1f @ branch to 1 if comparison failed 1739 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1740 movs r9, r9, asl #1 @ convert to bytes, check sign 1741 bmi common_backwardBranch @ yes, do periodic checks 17421: 1743#if defined(WITH_JIT) 1744 GET_JIT_PROF_TABLE(r0) 1745 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1746 b common_testUpdateProfile 1747#else 1748 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1749 GET_INST_OPCODE(ip) @ extract opcode from rINST 1750 GOTO_OPCODE(ip) @ jump to next instruction 1751#endif 1752 1753 1754 1755/* ------------------------------ */ 1756 .balign 64 1757.L_OP_IF_EQZ: /* 0x38 */ 1758/* File: armv5te/OP_IF_EQZ.S */ 1759/* File: armv5te/zcmp.S */ 1760 /* 1761 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1762 * fragment that specifies the *reverse* comparison to perform, e.g. 1763 * for "if-le" you would use "gt". 1764 * 1765 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1766 */ 1767 /* if-cmp vAA, +BBBB */ 1768 mov r0, rINST, lsr #8 @ r0<- AA 1769 GET_VREG(r2, r0) @ r2<- vAA 1770 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1771 cmp r2, #0 @ compare (vA, 0) 1772 bne 1f @ branch to 1 if comparison failed 1773 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1774 movs r9, r9, asl #1 @ convert to bytes, check sign 1775 bmi common_backwardBranch @ backward branch, do periodic checks 17761: 1777#if defined(WITH_JIT) 1778 GET_JIT_PROF_TABLE(r0) 1779 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1780 cmp r0,#0 1781 bne common_updateProfile 1782 GET_INST_OPCODE(ip) @ extract opcode from rINST 1783 GOTO_OPCODE(ip) @ jump to next instruction 1784#else 1785 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1786 GET_INST_OPCODE(ip) @ extract opcode from rINST 1787 GOTO_OPCODE(ip) @ jump to next instruction 1788#endif 1789 1790 1791 1792/* ------------------------------ */ 1793 .balign 64 1794.L_OP_IF_NEZ: /* 0x39 */ 1795/* File: armv5te/OP_IF_NEZ.S */ 1796/* File: armv5te/zcmp.S */ 1797 /* 1798 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1799 * fragment that specifies the *reverse* comparison to perform, e.g. 1800 * for "if-le" you would use "gt". 1801 * 1802 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1803 */ 1804 /* if-cmp vAA, +BBBB */ 1805 mov r0, rINST, lsr #8 @ r0<- AA 1806 GET_VREG(r2, r0) @ r2<- vAA 1807 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1808 cmp r2, #0 @ compare (vA, 0) 1809 beq 1f @ branch to 1 if comparison failed 1810 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1811 movs r9, r9, asl #1 @ convert to bytes, check sign 1812 bmi common_backwardBranch @ backward branch, do periodic checks 18131: 1814#if defined(WITH_JIT) 1815 GET_JIT_PROF_TABLE(r0) 1816 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1817 cmp r0,#0 1818 bne common_updateProfile 1819 GET_INST_OPCODE(ip) @ extract opcode from rINST 1820 GOTO_OPCODE(ip) @ jump to next instruction 1821#else 1822 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1823 GET_INST_OPCODE(ip) @ extract opcode from rINST 1824 GOTO_OPCODE(ip) @ jump to next instruction 1825#endif 1826 1827 1828 1829/* ------------------------------ */ 1830 .balign 64 1831.L_OP_IF_LTZ: /* 0x3a */ 1832/* File: armv5te/OP_IF_LTZ.S */ 1833/* File: armv5te/zcmp.S */ 1834 /* 1835 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1836 * fragment that specifies the *reverse* comparison to perform, e.g. 1837 * for "if-le" you would use "gt". 1838 * 1839 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1840 */ 1841 /* if-cmp vAA, +BBBB */ 1842 mov r0, rINST, lsr #8 @ r0<- AA 1843 GET_VREG(r2, r0) @ r2<- vAA 1844 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1845 cmp r2, #0 @ compare (vA, 0) 1846 bge 1f @ branch to 1 if comparison failed 1847 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1848 movs r9, r9, asl #1 @ convert to bytes, check sign 1849 bmi common_backwardBranch @ backward branch, do periodic checks 18501: 1851#if defined(WITH_JIT) 1852 GET_JIT_PROF_TABLE(r0) 1853 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1854 cmp r0,#0 1855 bne common_updateProfile 1856 GET_INST_OPCODE(ip) @ extract opcode from rINST 1857 GOTO_OPCODE(ip) @ jump to next instruction 1858#else 1859 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1860 GET_INST_OPCODE(ip) @ extract opcode from rINST 1861 GOTO_OPCODE(ip) @ jump to next instruction 1862#endif 1863 1864 1865 1866/* ------------------------------ */ 1867 .balign 64 1868.L_OP_IF_GEZ: /* 0x3b */ 1869/* File: armv5te/OP_IF_GEZ.S */ 1870/* File: armv5te/zcmp.S */ 1871 /* 1872 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1873 * fragment that specifies the *reverse* comparison to perform, e.g. 1874 * for "if-le" you would use "gt". 1875 * 1876 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1877 */ 1878 /* if-cmp vAA, +BBBB */ 1879 mov r0, rINST, lsr #8 @ r0<- AA 1880 GET_VREG(r2, r0) @ r2<- vAA 1881 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1882 cmp r2, #0 @ compare (vA, 0) 1883 blt 1f @ branch to 1 if comparison failed 1884 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1885 movs r9, r9, asl #1 @ convert to bytes, check sign 1886 bmi common_backwardBranch @ backward branch, do periodic checks 18871: 1888#if defined(WITH_JIT) 1889 GET_JIT_PROF_TABLE(r0) 1890 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1891 cmp r0,#0 1892 bne common_updateProfile 1893 GET_INST_OPCODE(ip) @ extract opcode from rINST 1894 GOTO_OPCODE(ip) @ jump to next instruction 1895#else 1896 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1897 GET_INST_OPCODE(ip) @ extract opcode from rINST 1898 GOTO_OPCODE(ip) @ jump to next instruction 1899#endif 1900 1901 1902 1903/* ------------------------------ */ 1904 .balign 64 1905.L_OP_IF_GTZ: /* 0x3c */ 1906/* File: armv5te/OP_IF_GTZ.S */ 1907/* File: armv5te/zcmp.S */ 1908 /* 1909 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1910 * fragment that specifies the *reverse* comparison to perform, e.g. 1911 * for "if-le" you would use "gt". 1912 * 1913 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1914 */ 1915 /* if-cmp vAA, +BBBB */ 1916 mov r0, rINST, lsr #8 @ r0<- AA 1917 GET_VREG(r2, r0) @ r2<- vAA 1918 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1919 cmp r2, #0 @ compare (vA, 0) 1920 ble 1f @ branch to 1 if comparison failed 1921 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1922 movs r9, r9, asl #1 @ convert to bytes, check sign 1923 bmi common_backwardBranch @ backward branch, do periodic checks 19241: 1925#if defined(WITH_JIT) 1926 GET_JIT_PROF_TABLE(r0) 1927 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1928 cmp r0,#0 1929 bne common_updateProfile 1930 GET_INST_OPCODE(ip) @ extract opcode from rINST 1931 GOTO_OPCODE(ip) @ jump to next instruction 1932#else 1933 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1934 GET_INST_OPCODE(ip) @ extract opcode from rINST 1935 GOTO_OPCODE(ip) @ jump to next instruction 1936#endif 1937 1938 1939 1940/* ------------------------------ */ 1941 .balign 64 1942.L_OP_IF_LEZ: /* 0x3d */ 1943/* File: armv5te/OP_IF_LEZ.S */ 1944/* File: armv5te/zcmp.S */ 1945 /* 1946 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1947 * fragment that specifies the *reverse* comparison to perform, e.g. 1948 * for "if-le" you would use "gt". 1949 * 1950 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1951 */ 1952 /* if-cmp vAA, +BBBB */ 1953 mov r0, rINST, lsr #8 @ r0<- AA 1954 GET_VREG(r2, r0) @ r2<- vAA 1955 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1956 cmp r2, #0 @ compare (vA, 0) 1957 bgt 1f @ branch to 1 if comparison failed 1958 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1959 movs r9, r9, asl #1 @ convert to bytes, check sign 1960 bmi common_backwardBranch @ backward branch, do periodic checks 19611: 1962#if defined(WITH_JIT) 1963 GET_JIT_PROF_TABLE(r0) 1964 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1965 cmp r0,#0 1966 bne common_updateProfile 1967 GET_INST_OPCODE(ip) @ extract opcode from rINST 1968 GOTO_OPCODE(ip) @ jump to next instruction 1969#else 1970 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1971 GET_INST_OPCODE(ip) @ extract opcode from rINST 1972 GOTO_OPCODE(ip) @ jump to next instruction 1973#endif 1974 1975 1976 1977/* ------------------------------ */ 1978 .balign 64 1979.L_OP_UNUSED_3E: /* 0x3e */ 1980/* File: armv5te/OP_UNUSED_3E.S */ 1981/* File: armv5te/unused.S */ 1982 bl common_abort 1983 1984 1985 1986/* ------------------------------ */ 1987 .balign 64 1988.L_OP_UNUSED_3F: /* 0x3f */ 1989/* File: armv5te/OP_UNUSED_3F.S */ 1990/* File: armv5te/unused.S */ 1991 bl common_abort 1992 1993 1994 1995/* ------------------------------ */ 1996 .balign 64 1997.L_OP_UNUSED_40: /* 0x40 */ 1998/* File: armv5te/OP_UNUSED_40.S */ 1999/* File: armv5te/unused.S */ 2000 bl common_abort 2001 2002 2003 2004/* ------------------------------ */ 2005 .balign 64 2006.L_OP_UNUSED_41: /* 0x41 */ 2007/* File: armv5te/OP_UNUSED_41.S */ 2008/* File: armv5te/unused.S */ 2009 bl common_abort 2010 2011 2012 2013/* ------------------------------ */ 2014 .balign 64 2015.L_OP_UNUSED_42: /* 0x42 */ 2016/* File: armv5te/OP_UNUSED_42.S */ 2017/* File: armv5te/unused.S */ 2018 bl common_abort 2019 2020 2021 2022/* ------------------------------ */ 2023 .balign 64 2024.L_OP_UNUSED_43: /* 0x43 */ 2025/* File: armv5te/OP_UNUSED_43.S */ 2026/* File: armv5te/unused.S */ 2027 bl common_abort 2028 2029 2030 2031/* ------------------------------ */ 2032 .balign 64 2033.L_OP_AGET: /* 0x44 */ 2034/* File: armv5te/OP_AGET.S */ 2035 /* 2036 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2037 * 2038 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2039 * instructions. We use a pair of FETCH_Bs instead. 2040 * 2041 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2042 */ 2043 /* op vAA, vBB, vCC */ 2044 FETCH_B(r2, 1, 0) @ r2<- BB 2045 mov r9, rINST, lsr #8 @ r9<- AA 2046 FETCH_B(r3, 1, 1) @ r3<- CC 2047 GET_VREG(r0, r2) @ r0<- vBB (array object) 2048 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2049 cmp r0, #0 @ null array object? 2050 beq common_errNullObject @ yes, bail 2051 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2052 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2053 cmp r1, r3 @ compare unsigned index, length 2054 bcs common_errArrayIndex @ index >= length, bail 2055 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2056 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2057 GET_INST_OPCODE(ip) @ extract opcode from rINST 2058 SET_VREG(r2, r9) @ vAA<- r2 2059 GOTO_OPCODE(ip) @ jump to next instruction 2060 2061 2062/* ------------------------------ */ 2063 .balign 64 2064.L_OP_AGET_WIDE: /* 0x45 */ 2065/* File: armv5te/OP_AGET_WIDE.S */ 2066 /* 2067 * Array get, 64 bits. vAA <- vBB[vCC]. 2068 * 2069 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2070 */ 2071 /* aget-wide vAA, vBB, vCC */ 2072 FETCH(r0, 1) @ r0<- CCBB 2073 mov r9, rINST, lsr #8 @ r9<- AA 2074 and r2, r0, #255 @ r2<- BB 2075 mov r3, r0, lsr #8 @ r3<- CC 2076 GET_VREG(r0, r2) @ r0<- vBB (array object) 2077 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2078 cmp r0, #0 @ null array object? 2079 beq common_errNullObject @ yes, bail 2080 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2081 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2082 cmp r1, r3 @ compare unsigned index, length 2083 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2084 b common_errArrayIndex @ index >= length, bail 2085 @ May want to swap the order of these two branches depending on how the 2086 @ branch prediction (if any) handles conditional forward branches vs. 2087 @ unconditional forward branches. 2088 2089/* ------------------------------ */ 2090 .balign 64 2091.L_OP_AGET_OBJECT: /* 0x46 */ 2092/* File: armv5te/OP_AGET_OBJECT.S */ 2093/* File: armv5te/OP_AGET.S */ 2094 /* 2095 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2096 * 2097 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2098 * instructions. We use a pair of FETCH_Bs instead. 2099 * 2100 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2101 */ 2102 /* op vAA, vBB, vCC */ 2103 FETCH_B(r2, 1, 0) @ r2<- BB 2104 mov r9, rINST, lsr #8 @ r9<- AA 2105 FETCH_B(r3, 1, 1) @ r3<- CC 2106 GET_VREG(r0, r2) @ r0<- vBB (array object) 2107 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2108 cmp r0, #0 @ null array object? 2109 beq common_errNullObject @ yes, bail 2110 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2111 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2112 cmp r1, r3 @ compare unsigned index, length 2113 bcs common_errArrayIndex @ index >= length, bail 2114 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2115 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2116 GET_INST_OPCODE(ip) @ extract opcode from rINST 2117 SET_VREG(r2, r9) @ vAA<- r2 2118 GOTO_OPCODE(ip) @ jump to next instruction 2119 2120 2121 2122/* ------------------------------ */ 2123 .balign 64 2124.L_OP_AGET_BOOLEAN: /* 0x47 */ 2125/* File: armv5te/OP_AGET_BOOLEAN.S */ 2126/* File: armv5te/OP_AGET.S */ 2127 /* 2128 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2129 * 2130 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2131 * instructions. We use a pair of FETCH_Bs instead. 2132 * 2133 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2134 */ 2135 /* op vAA, vBB, vCC */ 2136 FETCH_B(r2, 1, 0) @ r2<- BB 2137 mov r9, rINST, lsr #8 @ r9<- AA 2138 FETCH_B(r3, 1, 1) @ r3<- CC 2139 GET_VREG(r0, r2) @ r0<- vBB (array object) 2140 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2141 cmp r0, #0 @ null array object? 2142 beq common_errNullObject @ yes, bail 2143 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2144 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2145 cmp r1, r3 @ compare unsigned index, length 2146 bcs common_errArrayIndex @ index >= length, bail 2147 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2148 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2149 GET_INST_OPCODE(ip) @ extract opcode from rINST 2150 SET_VREG(r2, r9) @ vAA<- r2 2151 GOTO_OPCODE(ip) @ jump to next instruction 2152 2153 2154 2155/* ------------------------------ */ 2156 .balign 64 2157.L_OP_AGET_BYTE: /* 0x48 */ 2158/* File: armv5te/OP_AGET_BYTE.S */ 2159/* File: armv5te/OP_AGET.S */ 2160 /* 2161 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2162 * 2163 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2164 * instructions. We use a pair of FETCH_Bs instead. 2165 * 2166 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2167 */ 2168 /* op vAA, vBB, vCC */ 2169 FETCH_B(r2, 1, 0) @ r2<- BB 2170 mov r9, rINST, lsr #8 @ r9<- AA 2171 FETCH_B(r3, 1, 1) @ r3<- CC 2172 GET_VREG(r0, r2) @ r0<- vBB (array object) 2173 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2174 cmp r0, #0 @ null array object? 2175 beq common_errNullObject @ yes, bail 2176 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2177 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2178 cmp r1, r3 @ compare unsigned index, length 2179 bcs common_errArrayIndex @ index >= length, bail 2180 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2181 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2182 GET_INST_OPCODE(ip) @ extract opcode from rINST 2183 SET_VREG(r2, r9) @ vAA<- r2 2184 GOTO_OPCODE(ip) @ jump to next instruction 2185 2186 2187 2188/* ------------------------------ */ 2189 .balign 64 2190.L_OP_AGET_CHAR: /* 0x49 */ 2191/* File: armv5te/OP_AGET_CHAR.S */ 2192/* File: armv5te/OP_AGET.S */ 2193 /* 2194 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2195 * 2196 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2197 * instructions. We use a pair of FETCH_Bs instead. 2198 * 2199 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2200 */ 2201 /* op vAA, vBB, vCC */ 2202 FETCH_B(r2, 1, 0) @ r2<- BB 2203 mov r9, rINST, lsr #8 @ r9<- AA 2204 FETCH_B(r3, 1, 1) @ r3<- CC 2205 GET_VREG(r0, r2) @ r0<- vBB (array object) 2206 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2207 cmp r0, #0 @ null array object? 2208 beq common_errNullObject @ yes, bail 2209 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2210 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2211 cmp r1, r3 @ compare unsigned index, length 2212 bcs common_errArrayIndex @ index >= length, bail 2213 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2214 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2215 GET_INST_OPCODE(ip) @ extract opcode from rINST 2216 SET_VREG(r2, r9) @ vAA<- r2 2217 GOTO_OPCODE(ip) @ jump to next instruction 2218 2219 2220 2221/* ------------------------------ */ 2222 .balign 64 2223.L_OP_AGET_SHORT: /* 0x4a */ 2224/* File: armv5te/OP_AGET_SHORT.S */ 2225/* File: armv5te/OP_AGET.S */ 2226 /* 2227 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2228 * 2229 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2230 * instructions. We use a pair of FETCH_Bs instead. 2231 * 2232 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2233 */ 2234 /* op vAA, vBB, vCC */ 2235 FETCH_B(r2, 1, 0) @ r2<- BB 2236 mov r9, rINST, lsr #8 @ r9<- AA 2237 FETCH_B(r3, 1, 1) @ r3<- CC 2238 GET_VREG(r0, r2) @ r0<- vBB (array object) 2239 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2240 cmp r0, #0 @ null array object? 2241 beq common_errNullObject @ yes, bail 2242 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2243 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2244 cmp r1, r3 @ compare unsigned index, length 2245 bcs common_errArrayIndex @ index >= length, bail 2246 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2247 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2248 GET_INST_OPCODE(ip) @ extract opcode from rINST 2249 SET_VREG(r2, r9) @ vAA<- r2 2250 GOTO_OPCODE(ip) @ jump to next instruction 2251 2252 2253 2254/* ------------------------------ */ 2255 .balign 64 2256.L_OP_APUT: /* 0x4b */ 2257/* File: armv5te/OP_APUT.S */ 2258 /* 2259 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2260 * 2261 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2262 * instructions. We use a pair of FETCH_Bs instead. 2263 * 2264 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2265 */ 2266 /* op vAA, vBB, vCC */ 2267 FETCH_B(r2, 1, 0) @ r2<- BB 2268 mov r9, rINST, lsr #8 @ r9<- AA 2269 FETCH_B(r3, 1, 1) @ r3<- CC 2270 GET_VREG(r0, r2) @ r0<- vBB (array object) 2271 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2272 cmp r0, #0 @ null array object? 2273 beq common_errNullObject @ yes, bail 2274 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2275 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2276 cmp r1, r3 @ compare unsigned index, length 2277 bcs common_errArrayIndex @ index >= length, bail 2278 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2279 GET_VREG(r2, r9) @ r2<- vAA 2280 GET_INST_OPCODE(ip) @ extract opcode from rINST 2281 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2282 GOTO_OPCODE(ip) @ jump to next instruction 2283 2284 2285/* ------------------------------ */ 2286 .balign 64 2287.L_OP_APUT_WIDE: /* 0x4c */ 2288/* File: armv5te/OP_APUT_WIDE.S */ 2289 /* 2290 * Array put, 64 bits. vBB[vCC] <- vAA. 2291 * 2292 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2293 */ 2294 /* aput-wide vAA, vBB, vCC */ 2295 FETCH(r0, 1) @ r0<- CCBB 2296 mov r9, rINST, lsr #8 @ r9<- AA 2297 and r2, r0, #255 @ r2<- BB 2298 mov r3, r0, lsr #8 @ r3<- CC 2299 GET_VREG(r0, r2) @ r0<- vBB (array object) 2300 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2301 cmp r0, #0 @ null array object? 2302 beq common_errNullObject @ yes, bail 2303 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2304 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2305 cmp r1, r3 @ compare unsigned index, length 2306 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2307 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2308 b common_errArrayIndex @ index >= length, bail 2309 @ May want to swap the order of these two branches depending on how the 2310 @ branch prediction (if any) handles conditional forward branches vs. 2311 @ unconditional forward branches. 2312 2313/* ------------------------------ */ 2314 .balign 64 2315.L_OP_APUT_OBJECT: /* 0x4d */ 2316/* File: armv5te/OP_APUT_OBJECT.S */ 2317 /* 2318 * Store an object into an array. vBB[vCC] <- vAA. 2319 * 2320 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2321 * instructions. We use a pair of FETCH_Bs instead. 2322 */ 2323 /* op vAA, vBB, vCC */ 2324 FETCH(r0, 1) @ r0<- CCBB 2325 mov r9, rINST, lsr #8 @ r9<- AA 2326 and r2, r0, #255 @ r2<- BB 2327 mov r3, r0, lsr #8 @ r3<- CC 2328 GET_VREG(r1, r2) @ r1<- vBB (array object) 2329 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2330 cmp r1, #0 @ null array object? 2331 GET_VREG(r9, r9) @ r9<- vAA 2332 beq common_errNullObject @ yes, bail 2333 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2334 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2335 cmp r0, r3 @ compare unsigned index, length 2336 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2337 b common_errArrayIndex @ index >= length, bail 2338 2339 2340/* ------------------------------ */ 2341 .balign 64 2342.L_OP_APUT_BOOLEAN: /* 0x4e */ 2343/* File: armv5te/OP_APUT_BOOLEAN.S */ 2344/* File: armv5te/OP_APUT.S */ 2345 /* 2346 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2347 * 2348 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2349 * instructions. We use a pair of FETCH_Bs instead. 2350 * 2351 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2352 */ 2353 /* op vAA, vBB, vCC */ 2354 FETCH_B(r2, 1, 0) @ r2<- BB 2355 mov r9, rINST, lsr #8 @ r9<- AA 2356 FETCH_B(r3, 1, 1) @ r3<- CC 2357 GET_VREG(r0, r2) @ r0<- vBB (array object) 2358 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2359 cmp r0, #0 @ null array object? 2360 beq common_errNullObject @ yes, bail 2361 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2362 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2363 cmp r1, r3 @ compare unsigned index, length 2364 bcs common_errArrayIndex @ index >= length, bail 2365 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2366 GET_VREG(r2, r9) @ r2<- vAA 2367 GET_INST_OPCODE(ip) @ extract opcode from rINST 2368 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2369 GOTO_OPCODE(ip) @ jump to next instruction 2370 2371 2372 2373/* ------------------------------ */ 2374 .balign 64 2375.L_OP_APUT_BYTE: /* 0x4f */ 2376/* File: armv5te/OP_APUT_BYTE.S */ 2377/* File: armv5te/OP_APUT.S */ 2378 /* 2379 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2380 * 2381 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2382 * instructions. We use a pair of FETCH_Bs instead. 2383 * 2384 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2385 */ 2386 /* op vAA, vBB, vCC */ 2387 FETCH_B(r2, 1, 0) @ r2<- BB 2388 mov r9, rINST, lsr #8 @ r9<- AA 2389 FETCH_B(r3, 1, 1) @ r3<- CC 2390 GET_VREG(r0, r2) @ r0<- vBB (array object) 2391 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2392 cmp r0, #0 @ null array object? 2393 beq common_errNullObject @ yes, bail 2394 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2395 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2396 cmp r1, r3 @ compare unsigned index, length 2397 bcs common_errArrayIndex @ index >= length, bail 2398 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2399 GET_VREG(r2, r9) @ r2<- vAA 2400 GET_INST_OPCODE(ip) @ extract opcode from rINST 2401 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2402 GOTO_OPCODE(ip) @ jump to next instruction 2403 2404 2405 2406/* ------------------------------ */ 2407 .balign 64 2408.L_OP_APUT_CHAR: /* 0x50 */ 2409/* File: armv5te/OP_APUT_CHAR.S */ 2410/* File: armv5te/OP_APUT.S */ 2411 /* 2412 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2413 * 2414 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2415 * instructions. We use a pair of FETCH_Bs instead. 2416 * 2417 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2418 */ 2419 /* op vAA, vBB, vCC */ 2420 FETCH_B(r2, 1, 0) @ r2<- BB 2421 mov r9, rINST, lsr #8 @ r9<- AA 2422 FETCH_B(r3, 1, 1) @ r3<- CC 2423 GET_VREG(r0, r2) @ r0<- vBB (array object) 2424 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2425 cmp r0, #0 @ null array object? 2426 beq common_errNullObject @ yes, bail 2427 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2428 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2429 cmp r1, r3 @ compare unsigned index, length 2430 bcs common_errArrayIndex @ index >= length, bail 2431 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2432 GET_VREG(r2, r9) @ r2<- vAA 2433 GET_INST_OPCODE(ip) @ extract opcode from rINST 2434 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2435 GOTO_OPCODE(ip) @ jump to next instruction 2436 2437 2438 2439/* ------------------------------ */ 2440 .balign 64 2441.L_OP_APUT_SHORT: /* 0x51 */ 2442/* File: armv5te/OP_APUT_SHORT.S */ 2443/* File: armv5te/OP_APUT.S */ 2444 /* 2445 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2446 * 2447 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2448 * instructions. We use a pair of FETCH_Bs instead. 2449 * 2450 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2451 */ 2452 /* op vAA, vBB, vCC */ 2453 FETCH_B(r2, 1, 0) @ r2<- BB 2454 mov r9, rINST, lsr #8 @ r9<- AA 2455 FETCH_B(r3, 1, 1) @ r3<- CC 2456 GET_VREG(r0, r2) @ r0<- vBB (array object) 2457 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2458 cmp r0, #0 @ null array object? 2459 beq common_errNullObject @ yes, bail 2460 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2461 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2462 cmp r1, r3 @ compare unsigned index, length 2463 bcs common_errArrayIndex @ index >= length, bail 2464 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2465 GET_VREG(r2, r9) @ r2<- vAA 2466 GET_INST_OPCODE(ip) @ extract opcode from rINST 2467 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2468 GOTO_OPCODE(ip) @ jump to next instruction 2469 2470 2471 2472/* ------------------------------ */ 2473 .balign 64 2474.L_OP_IGET: /* 0x52 */ 2475/* File: armv5te/OP_IGET.S */ 2476 /* 2477 * General 32-bit instance field get. 2478 * 2479 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2480 */ 2481 /* op vA, vB, field@CCCC */ 2482 mov r0, rINST, lsr #12 @ r0<- B 2483 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2484 FETCH(r1, 1) @ r1<- field ref CCCC 2485 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2486 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2487 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2488 cmp r0, #0 @ is resolved entry null? 2489 bne .LOP_IGET_finish @ no, already resolved 24908: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2491 EXPORT_PC() @ resolve() could throw 2492 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2493 bl dvmResolveInstField @ r0<- resolved InstField ptr 2494 cmp r0, #0 2495 bne .LOP_IGET_finish 2496 b common_exceptionThrown 2497 2498/* ------------------------------ */ 2499 .balign 64 2500.L_OP_IGET_WIDE: /* 0x53 */ 2501/* File: armv5te/OP_IGET_WIDE.S */ 2502 /* 2503 * Wide 32-bit instance field get. 2504 */ 2505 /* iget-wide vA, vB, field@CCCC */ 2506 mov r0, rINST, lsr #12 @ r0<- B 2507 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2508 FETCH(r1, 1) @ r1<- field ref CCCC 2509 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2510 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2511 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2512 cmp r0, #0 @ is resolved entry null? 2513 bne .LOP_IGET_WIDE_finish @ no, already resolved 25148: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2515 EXPORT_PC() @ resolve() could throw 2516 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2517 bl dvmResolveInstField @ r0<- resolved InstField ptr 2518 cmp r0, #0 2519 bne .LOP_IGET_WIDE_finish 2520 b common_exceptionThrown 2521 2522/* ------------------------------ */ 2523 .balign 64 2524.L_OP_IGET_OBJECT: /* 0x54 */ 2525/* File: armv5te/OP_IGET_OBJECT.S */ 2526/* File: armv5te/OP_IGET.S */ 2527 /* 2528 * General 32-bit instance field get. 2529 * 2530 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2531 */ 2532 /* op vA, vB, field@CCCC */ 2533 mov r0, rINST, lsr #12 @ r0<- B 2534 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2535 FETCH(r1, 1) @ r1<- field ref CCCC 2536 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2537 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2538 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2539 cmp r0, #0 @ is resolved entry null? 2540 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25418: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2542 EXPORT_PC() @ resolve() could throw 2543 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2544 bl dvmResolveInstField @ r0<- resolved InstField ptr 2545 cmp r0, #0 2546 bne .LOP_IGET_OBJECT_finish 2547 b common_exceptionThrown 2548 2549 2550/* ------------------------------ */ 2551 .balign 64 2552.L_OP_IGET_BOOLEAN: /* 0x55 */ 2553/* File: armv5te/OP_IGET_BOOLEAN.S */ 2554@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2555/* File: armv5te/OP_IGET.S */ 2556 /* 2557 * General 32-bit instance field get. 2558 * 2559 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2560 */ 2561 /* op vA, vB, field@CCCC */ 2562 mov r0, rINST, lsr #12 @ r0<- B 2563 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2564 FETCH(r1, 1) @ r1<- field ref CCCC 2565 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2566 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2567 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2568 cmp r0, #0 @ is resolved entry null? 2569 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25708: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2571 EXPORT_PC() @ resolve() could throw 2572 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2573 bl dvmResolveInstField @ r0<- resolved InstField ptr 2574 cmp r0, #0 2575 bne .LOP_IGET_BOOLEAN_finish 2576 b common_exceptionThrown 2577 2578 2579/* ------------------------------ */ 2580 .balign 64 2581.L_OP_IGET_BYTE: /* 0x56 */ 2582/* File: armv5te/OP_IGET_BYTE.S */ 2583@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2584/* File: armv5te/OP_IGET.S */ 2585 /* 2586 * General 32-bit instance field get. 2587 * 2588 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2589 */ 2590 /* op vA, vB, field@CCCC */ 2591 mov r0, rINST, lsr #12 @ r0<- B 2592 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2593 FETCH(r1, 1) @ r1<- field ref CCCC 2594 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2595 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2596 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2597 cmp r0, #0 @ is resolved entry null? 2598 bne .LOP_IGET_BYTE_finish @ no, already resolved 25998: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2600 EXPORT_PC() @ resolve() could throw 2601 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2602 bl dvmResolveInstField @ r0<- resolved InstField ptr 2603 cmp r0, #0 2604 bne .LOP_IGET_BYTE_finish 2605 b common_exceptionThrown 2606 2607 2608/* ------------------------------ */ 2609 .balign 64 2610.L_OP_IGET_CHAR: /* 0x57 */ 2611/* File: armv5te/OP_IGET_CHAR.S */ 2612@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2613/* File: armv5te/OP_IGET.S */ 2614 /* 2615 * General 32-bit instance field get. 2616 * 2617 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2618 */ 2619 /* op vA, vB, field@CCCC */ 2620 mov r0, rINST, lsr #12 @ r0<- B 2621 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2622 FETCH(r1, 1) @ r1<- field ref CCCC 2623 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2624 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2625 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2626 cmp r0, #0 @ is resolved entry null? 2627 bne .LOP_IGET_CHAR_finish @ no, already resolved 26288: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2629 EXPORT_PC() @ resolve() could throw 2630 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2631 bl dvmResolveInstField @ r0<- resolved InstField ptr 2632 cmp r0, #0 2633 bne .LOP_IGET_CHAR_finish 2634 b common_exceptionThrown 2635 2636 2637/* ------------------------------ */ 2638 .balign 64 2639.L_OP_IGET_SHORT: /* 0x58 */ 2640/* File: armv5te/OP_IGET_SHORT.S */ 2641@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2642/* File: armv5te/OP_IGET.S */ 2643 /* 2644 * General 32-bit instance field get. 2645 * 2646 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2647 */ 2648 /* op vA, vB, field@CCCC */ 2649 mov r0, rINST, lsr #12 @ r0<- B 2650 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2651 FETCH(r1, 1) @ r1<- field ref CCCC 2652 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2653 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2654 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2655 cmp r0, #0 @ is resolved entry null? 2656 bne .LOP_IGET_SHORT_finish @ no, already resolved 26578: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2658 EXPORT_PC() @ resolve() could throw 2659 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2660 bl dvmResolveInstField @ r0<- resolved InstField ptr 2661 cmp r0, #0 2662 bne .LOP_IGET_SHORT_finish 2663 b common_exceptionThrown 2664 2665 2666/* ------------------------------ */ 2667 .balign 64 2668.L_OP_IPUT: /* 0x59 */ 2669/* File: armv5te/OP_IPUT.S */ 2670 /* 2671 * General 32-bit instance field put. 2672 * 2673 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2674 */ 2675 /* op vA, vB, field@CCCC */ 2676 mov r0, rINST, lsr #12 @ r0<- B 2677 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2678 FETCH(r1, 1) @ r1<- field ref CCCC 2679 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2680 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2681 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2682 cmp r0, #0 @ is resolved entry null? 2683 bne .LOP_IPUT_finish @ no, already resolved 26848: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2685 EXPORT_PC() @ resolve() could throw 2686 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2687 bl dvmResolveInstField @ r0<- resolved InstField ptr 2688 cmp r0, #0 @ success? 2689 bne .LOP_IPUT_finish @ yes, finish up 2690 b common_exceptionThrown 2691 2692/* ------------------------------ */ 2693 .balign 64 2694.L_OP_IPUT_WIDE: /* 0x5a */ 2695/* File: armv5te/OP_IPUT_WIDE.S */ 2696 /* iput-wide vA, vB, field@CCCC */ 2697 mov r0, rINST, lsr #12 @ r0<- B 2698 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2699 FETCH(r1, 1) @ r1<- field ref CCCC 2700 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2701 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2702 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2703 cmp r0, #0 @ is resolved entry null? 2704 bne .LOP_IPUT_WIDE_finish @ no, already resolved 27058: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2706 EXPORT_PC() @ resolve() could throw 2707 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2708 bl dvmResolveInstField @ r0<- resolved InstField ptr 2709 cmp r0, #0 @ success? 2710 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2711 b common_exceptionThrown 2712 2713/* ------------------------------ */ 2714 .balign 64 2715.L_OP_IPUT_OBJECT: /* 0x5b */ 2716/* File: armv5te/OP_IPUT_OBJECT.S */ 2717/* File: armv5te/OP_IPUT.S */ 2718 /* 2719 * General 32-bit instance field put. 2720 * 2721 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2722 */ 2723 /* op vA, vB, field@CCCC */ 2724 mov r0, rINST, lsr #12 @ r0<- B 2725 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2726 FETCH(r1, 1) @ r1<- field ref CCCC 2727 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2728 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2729 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2730 cmp r0, #0 @ is resolved entry null? 2731 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 27328: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2733 EXPORT_PC() @ resolve() could throw 2734 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2735 bl dvmResolveInstField @ r0<- resolved InstField ptr 2736 cmp r0, #0 @ success? 2737 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2738 b common_exceptionThrown 2739 2740 2741/* ------------------------------ */ 2742 .balign 64 2743.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2744/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2745@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2746/* File: armv5te/OP_IPUT.S */ 2747 /* 2748 * General 32-bit instance field put. 2749 * 2750 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2751 */ 2752 /* op vA, vB, field@CCCC */ 2753 mov r0, rINST, lsr #12 @ r0<- B 2754 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2755 FETCH(r1, 1) @ r1<- field ref CCCC 2756 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2757 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2758 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2759 cmp r0, #0 @ is resolved entry null? 2760 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27618: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2762 EXPORT_PC() @ resolve() could throw 2763 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2764 bl dvmResolveInstField @ r0<- resolved InstField ptr 2765 cmp r0, #0 @ success? 2766 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2767 b common_exceptionThrown 2768 2769 2770/* ------------------------------ */ 2771 .balign 64 2772.L_OP_IPUT_BYTE: /* 0x5d */ 2773/* File: armv5te/OP_IPUT_BYTE.S */ 2774@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2775/* File: armv5te/OP_IPUT.S */ 2776 /* 2777 * General 32-bit instance field put. 2778 * 2779 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2780 */ 2781 /* op vA, vB, field@CCCC */ 2782 mov r0, rINST, lsr #12 @ r0<- B 2783 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2784 FETCH(r1, 1) @ r1<- field ref CCCC 2785 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2786 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2787 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2788 cmp r0, #0 @ is resolved entry null? 2789 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27908: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2791 EXPORT_PC() @ resolve() could throw 2792 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2793 bl dvmResolveInstField @ r0<- resolved InstField ptr 2794 cmp r0, #0 @ success? 2795 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2796 b common_exceptionThrown 2797 2798 2799/* ------------------------------ */ 2800 .balign 64 2801.L_OP_IPUT_CHAR: /* 0x5e */ 2802/* File: armv5te/OP_IPUT_CHAR.S */ 2803@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2804/* File: armv5te/OP_IPUT.S */ 2805 /* 2806 * General 32-bit instance field put. 2807 * 2808 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2809 */ 2810 /* op vA, vB, field@CCCC */ 2811 mov r0, rINST, lsr #12 @ r0<- B 2812 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2813 FETCH(r1, 1) @ r1<- field ref CCCC 2814 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2815 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2816 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2817 cmp r0, #0 @ is resolved entry null? 2818 bne .LOP_IPUT_CHAR_finish @ no, already resolved 28198: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2820 EXPORT_PC() @ resolve() could throw 2821 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2822 bl dvmResolveInstField @ r0<- resolved InstField ptr 2823 cmp r0, #0 @ success? 2824 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2825 b common_exceptionThrown 2826 2827 2828/* ------------------------------ */ 2829 .balign 64 2830.L_OP_IPUT_SHORT: /* 0x5f */ 2831/* File: armv5te/OP_IPUT_SHORT.S */ 2832@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2833/* File: armv5te/OP_IPUT.S */ 2834 /* 2835 * General 32-bit instance field put. 2836 * 2837 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2838 */ 2839 /* op vA, vB, field@CCCC */ 2840 mov r0, rINST, lsr #12 @ r0<- B 2841 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2842 FETCH(r1, 1) @ r1<- field ref CCCC 2843 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2844 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2845 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2846 cmp r0, #0 @ is resolved entry null? 2847 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28488: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2849 EXPORT_PC() @ resolve() could throw 2850 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2851 bl dvmResolveInstField @ r0<- resolved InstField ptr 2852 cmp r0, #0 @ success? 2853 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2854 b common_exceptionThrown 2855 2856 2857/* ------------------------------ */ 2858 .balign 64 2859.L_OP_SGET: /* 0x60 */ 2860/* File: armv5te/OP_SGET.S */ 2861 /* 2862 * General 32-bit SGET handler. 2863 * 2864 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2865 */ 2866 /* op vAA, field@BBBB */ 2867 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2868 FETCH(r1, 1) @ r1<- field ref BBBB 2869 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2870 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2871 cmp r0, #0 @ is resolved entry null? 2872 beq .LOP_SGET_resolve @ yes, do resolve 2873.LOP_SGET_finish: @ field ptr in r0 2874 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2875 mov r2, rINST, lsr #8 @ r2<- AA 2876 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2877 SET_VREG(r1, r2) @ fp[AA]<- r1 2878 GET_INST_OPCODE(ip) @ extract opcode from rINST 2879 GOTO_OPCODE(ip) @ jump to next instruction 2880 2881/* ------------------------------ */ 2882 .balign 64 2883.L_OP_SGET_WIDE: /* 0x61 */ 2884/* File: armv5te/OP_SGET_WIDE.S */ 2885 /* 2886 * 64-bit SGET handler. 2887 */ 2888 /* sget-wide vAA, field@BBBB */ 2889 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2890 FETCH(r1, 1) @ r1<- field ref BBBB 2891 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2892 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2893 cmp r0, #0 @ is resolved entry null? 2894 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2895.LOP_SGET_WIDE_finish: 2896 mov r1, rINST, lsr #8 @ r1<- AA 2897 ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned) 2898 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2899 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2900 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2901 GET_INST_OPCODE(ip) @ extract opcode from rINST 2902 GOTO_OPCODE(ip) @ jump to next instruction 2903 2904/* ------------------------------ */ 2905 .balign 64 2906.L_OP_SGET_OBJECT: /* 0x62 */ 2907/* File: armv5te/OP_SGET_OBJECT.S */ 2908/* File: armv5te/OP_SGET.S */ 2909 /* 2910 * General 32-bit SGET handler. 2911 * 2912 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2913 */ 2914 /* op vAA, field@BBBB */ 2915 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2916 FETCH(r1, 1) @ r1<- field ref BBBB 2917 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2918 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2919 cmp r0, #0 @ is resolved entry null? 2920 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2921.LOP_SGET_OBJECT_finish: @ field ptr in r0 2922 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2923 mov r2, rINST, lsr #8 @ r2<- AA 2924 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2925 SET_VREG(r1, r2) @ fp[AA]<- r1 2926 GET_INST_OPCODE(ip) @ extract opcode from rINST 2927 GOTO_OPCODE(ip) @ jump to next instruction 2928 2929 2930/* ------------------------------ */ 2931 .balign 64 2932.L_OP_SGET_BOOLEAN: /* 0x63 */ 2933/* File: armv5te/OP_SGET_BOOLEAN.S */ 2934/* File: armv5te/OP_SGET.S */ 2935 /* 2936 * General 32-bit SGET handler. 2937 * 2938 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2939 */ 2940 /* op vAA, field@BBBB */ 2941 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2942 FETCH(r1, 1) @ r1<- field ref BBBB 2943 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2944 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2945 cmp r0, #0 @ is resolved entry null? 2946 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2947.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2948 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2949 mov r2, rINST, lsr #8 @ r2<- AA 2950 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2951 SET_VREG(r1, r2) @ fp[AA]<- r1 2952 GET_INST_OPCODE(ip) @ extract opcode from rINST 2953 GOTO_OPCODE(ip) @ jump to next instruction 2954 2955 2956/* ------------------------------ */ 2957 .balign 64 2958.L_OP_SGET_BYTE: /* 0x64 */ 2959/* File: armv5te/OP_SGET_BYTE.S */ 2960/* File: armv5te/OP_SGET.S */ 2961 /* 2962 * General 32-bit SGET handler. 2963 * 2964 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2965 */ 2966 /* op vAA, field@BBBB */ 2967 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2968 FETCH(r1, 1) @ r1<- field ref BBBB 2969 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2970 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2971 cmp r0, #0 @ is resolved entry null? 2972 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2973.LOP_SGET_BYTE_finish: @ field ptr in r0 2974 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2975 mov r2, rINST, lsr #8 @ r2<- AA 2976 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2977 SET_VREG(r1, r2) @ fp[AA]<- r1 2978 GET_INST_OPCODE(ip) @ extract opcode from rINST 2979 GOTO_OPCODE(ip) @ jump to next instruction 2980 2981 2982/* ------------------------------ */ 2983 .balign 64 2984.L_OP_SGET_CHAR: /* 0x65 */ 2985/* File: armv5te/OP_SGET_CHAR.S */ 2986/* File: armv5te/OP_SGET.S */ 2987 /* 2988 * General 32-bit SGET handler. 2989 * 2990 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2991 */ 2992 /* op vAA, field@BBBB */ 2993 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2994 FETCH(r1, 1) @ r1<- field ref BBBB 2995 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2996 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2997 cmp r0, #0 @ is resolved entry null? 2998 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2999.LOP_SGET_CHAR_finish: @ field ptr in r0 3000 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3001 mov r2, rINST, lsr #8 @ r2<- AA 3002 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3003 SET_VREG(r1, r2) @ fp[AA]<- r1 3004 GET_INST_OPCODE(ip) @ extract opcode from rINST 3005 GOTO_OPCODE(ip) @ jump to next instruction 3006 3007 3008/* ------------------------------ */ 3009 .balign 64 3010.L_OP_SGET_SHORT: /* 0x66 */ 3011/* File: armv5te/OP_SGET_SHORT.S */ 3012/* File: armv5te/OP_SGET.S */ 3013 /* 3014 * General 32-bit SGET handler. 3015 * 3016 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 3017 */ 3018 /* op vAA, field@BBBB */ 3019 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3020 FETCH(r1, 1) @ r1<- field ref BBBB 3021 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3022 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3023 cmp r0, #0 @ is resolved entry null? 3024 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 3025.LOP_SGET_SHORT_finish: @ field ptr in r0 3026 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3027 mov r2, rINST, lsr #8 @ r2<- AA 3028 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3029 SET_VREG(r1, r2) @ fp[AA]<- r1 3030 GET_INST_OPCODE(ip) @ extract opcode from rINST 3031 GOTO_OPCODE(ip) @ jump to next instruction 3032 3033 3034/* ------------------------------ */ 3035 .balign 64 3036.L_OP_SPUT: /* 0x67 */ 3037/* File: armv5te/OP_SPUT.S */ 3038 /* 3039 * General 32-bit SPUT handler. 3040 * 3041 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3042 */ 3043 /* op vAA, field@BBBB */ 3044 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3045 FETCH(r1, 1) @ r1<- field ref BBBB 3046 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3047 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3048 cmp r0, #0 @ is resolved entry null? 3049 beq .LOP_SPUT_resolve @ yes, do resolve 3050.LOP_SPUT_finish: @ field ptr in r0 3051 mov r2, rINST, lsr #8 @ r2<- AA 3052 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3053 GET_VREG(r1, r2) @ r1<- fp[AA] 3054 GET_INST_OPCODE(ip) @ extract opcode from rINST 3055 str r1, [r0, #offStaticField_value] @ field<- vAA 3056 GOTO_OPCODE(ip) @ jump to next instruction 3057 3058/* ------------------------------ */ 3059 .balign 64 3060.L_OP_SPUT_WIDE: /* 0x68 */ 3061/* File: armv5te/OP_SPUT_WIDE.S */ 3062 /* 3063 * 64-bit SPUT handler. 3064 */ 3065 /* sput-wide vAA, field@BBBB */ 3066 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3067 FETCH(r1, 1) @ r1<- field ref BBBB 3068 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3069 mov r9, rINST, lsr #8 @ r9<- AA 3070 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3071 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3072 cmp r0, #0 @ is resolved entry null? 3073 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3074.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3075 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3076 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3077 GET_INST_OPCODE(ip) @ extract opcode from rINST 3078 strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1 3079 GOTO_OPCODE(ip) @ jump to next instruction 3080 3081/* ------------------------------ */ 3082 .balign 64 3083.L_OP_SPUT_OBJECT: /* 0x69 */ 3084/* File: armv5te/OP_SPUT_OBJECT.S */ 3085/* File: armv5te/OP_SPUT.S */ 3086 /* 3087 * General 32-bit SPUT handler. 3088 * 3089 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3090 */ 3091 /* op vAA, field@BBBB */ 3092 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3093 FETCH(r1, 1) @ r1<- field ref BBBB 3094 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3095 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3096 cmp r0, #0 @ is resolved entry null? 3097 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3098.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3099 mov r2, rINST, lsr #8 @ r2<- AA 3100 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3101 GET_VREG(r1, r2) @ r1<- fp[AA] 3102 GET_INST_OPCODE(ip) @ extract opcode from rINST 3103 str r1, [r0, #offStaticField_value] @ field<- vAA 3104 GOTO_OPCODE(ip) @ jump to next instruction 3105 3106 3107/* ------------------------------ */ 3108 .balign 64 3109.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3110/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3111/* File: armv5te/OP_SPUT.S */ 3112 /* 3113 * General 32-bit SPUT handler. 3114 * 3115 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3116 */ 3117 /* op vAA, field@BBBB */ 3118 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3119 FETCH(r1, 1) @ r1<- field ref BBBB 3120 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3121 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3122 cmp r0, #0 @ is resolved entry null? 3123 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3124.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3125 mov r2, rINST, lsr #8 @ r2<- AA 3126 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3127 GET_VREG(r1, r2) @ r1<- fp[AA] 3128 GET_INST_OPCODE(ip) @ extract opcode from rINST 3129 str r1, [r0, #offStaticField_value] @ field<- vAA 3130 GOTO_OPCODE(ip) @ jump to next instruction 3131 3132 3133/* ------------------------------ */ 3134 .balign 64 3135.L_OP_SPUT_BYTE: /* 0x6b */ 3136/* File: armv5te/OP_SPUT_BYTE.S */ 3137/* File: armv5te/OP_SPUT.S */ 3138 /* 3139 * General 32-bit SPUT handler. 3140 * 3141 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3142 */ 3143 /* op vAA, field@BBBB */ 3144 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3145 FETCH(r1, 1) @ r1<- field ref BBBB 3146 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3147 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3148 cmp r0, #0 @ is resolved entry null? 3149 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3150.LOP_SPUT_BYTE_finish: @ field ptr in r0 3151 mov r2, rINST, lsr #8 @ r2<- AA 3152 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3153 GET_VREG(r1, r2) @ r1<- fp[AA] 3154 GET_INST_OPCODE(ip) @ extract opcode from rINST 3155 str r1, [r0, #offStaticField_value] @ field<- vAA 3156 GOTO_OPCODE(ip) @ jump to next instruction 3157 3158 3159/* ------------------------------ */ 3160 .balign 64 3161.L_OP_SPUT_CHAR: /* 0x6c */ 3162/* File: armv5te/OP_SPUT_CHAR.S */ 3163/* File: armv5te/OP_SPUT.S */ 3164 /* 3165 * General 32-bit SPUT handler. 3166 * 3167 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3168 */ 3169 /* op vAA, field@BBBB */ 3170 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3171 FETCH(r1, 1) @ r1<- field ref BBBB 3172 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3173 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3174 cmp r0, #0 @ is resolved entry null? 3175 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3176.LOP_SPUT_CHAR_finish: @ field ptr in r0 3177 mov r2, rINST, lsr #8 @ r2<- AA 3178 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3179 GET_VREG(r1, r2) @ r1<- fp[AA] 3180 GET_INST_OPCODE(ip) @ extract opcode from rINST 3181 str r1, [r0, #offStaticField_value] @ field<- vAA 3182 GOTO_OPCODE(ip) @ jump to next instruction 3183 3184 3185/* ------------------------------ */ 3186 .balign 64 3187.L_OP_SPUT_SHORT: /* 0x6d */ 3188/* File: armv5te/OP_SPUT_SHORT.S */ 3189/* File: armv5te/OP_SPUT.S */ 3190 /* 3191 * General 32-bit SPUT handler. 3192 * 3193 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3194 */ 3195 /* op vAA, field@BBBB */ 3196 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3197 FETCH(r1, 1) @ r1<- field ref BBBB 3198 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3199 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3200 cmp r0, #0 @ is resolved entry null? 3201 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3202.LOP_SPUT_SHORT_finish: @ field ptr in r0 3203 mov r2, rINST, lsr #8 @ r2<- AA 3204 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3205 GET_VREG(r1, r2) @ r1<- fp[AA] 3206 GET_INST_OPCODE(ip) @ extract opcode from rINST 3207 str r1, [r0, #offStaticField_value] @ field<- vAA 3208 GOTO_OPCODE(ip) @ jump to next instruction 3209 3210 3211/* ------------------------------ */ 3212 .balign 64 3213.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3214/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3215 /* 3216 * Handle a virtual method call. 3217 * 3218 * for: invoke-virtual, invoke-virtual/range 3219 */ 3220 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3221 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3222 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3223 FETCH(r1, 1) @ r1<- BBBB 3224 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3225 FETCH(r10, 2) @ r10<- GFED or CCCC 3226 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3227 .if (!0) 3228 and r10, r10, #15 @ r10<- D (or stays CCCC) 3229 .endif 3230 cmp r0, #0 @ already resolved? 3231 EXPORT_PC() @ must export for invoke 3232 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3233 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3234 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3235 mov r2, #METHOD_VIRTUAL @ resolver method type 3236 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3237 cmp r0, #0 @ got null? 3238 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3239 b common_exceptionThrown @ yes, handle exception 3240 3241/* ------------------------------ */ 3242 .balign 64 3243.L_OP_INVOKE_SUPER: /* 0x6f */ 3244/* File: armv5te/OP_INVOKE_SUPER.S */ 3245 /* 3246 * Handle a "super" method call. 3247 * 3248 * for: invoke-super, invoke-super/range 3249 */ 3250 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3251 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3252 FETCH(r10, 2) @ r10<- GFED or CCCC 3253 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3254 .if (!0) 3255 and r10, r10, #15 @ r10<- D (or stays CCCC) 3256 .endif 3257 FETCH(r1, 1) @ r1<- BBBB 3258 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3259 GET_VREG(r2, r10) @ r2<- "this" ptr 3260 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3261 cmp r2, #0 @ null "this"? 3262 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3263 beq common_errNullObject @ null "this", throw exception 3264 cmp r0, #0 @ already resolved? 3265 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3266 EXPORT_PC() @ must export for invoke 3267 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3268 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3269 3270/* ------------------------------ */ 3271 .balign 64 3272.L_OP_INVOKE_DIRECT: /* 0x70 */ 3273/* File: armv5te/OP_INVOKE_DIRECT.S */ 3274 /* 3275 * Handle a direct method call. 3276 * 3277 * (We could defer the "is 'this' pointer null" test to the common 3278 * method invocation code, and use a flag to indicate that static 3279 * calls don't count. If we do this as part of copying the arguments 3280 * out we could avoiding loading the first arg twice.) 3281 * 3282 * for: invoke-direct, invoke-direct/range 3283 */ 3284 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3285 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3286 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3287 FETCH(r1, 1) @ r1<- BBBB 3288 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3289 FETCH(r10, 2) @ r10<- GFED or CCCC 3290 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3291 .if (!0) 3292 and r10, r10, #15 @ r10<- D (or stays CCCC) 3293 .endif 3294 cmp r0, #0 @ already resolved? 3295 EXPORT_PC() @ must export for invoke 3296 GET_VREG(r2, r10) @ r2<- "this" ptr 3297 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3298.LOP_INVOKE_DIRECT_finish: 3299 cmp r2, #0 @ null "this" ref? 3300 bne common_invokeMethodNoRange @ no, continue on 3301 b common_errNullObject @ yes, throw exception 3302 3303/* ------------------------------ */ 3304 .balign 64 3305.L_OP_INVOKE_STATIC: /* 0x71 */ 3306/* File: armv5te/OP_INVOKE_STATIC.S */ 3307 /* 3308 * Handle a static method call. 3309 * 3310 * for: invoke-static, invoke-static/range 3311 */ 3312 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3313 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3314 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3315 FETCH(r1, 1) @ r1<- BBBB 3316 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3317 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3318 cmp r0, #0 @ already resolved? 3319 EXPORT_PC() @ must export for invoke 3320 bne common_invokeMethodNoRange @ yes, continue on 33210: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3322 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3323 mov r2, #METHOD_STATIC @ resolver method type 3324 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3325 cmp r0, #0 @ got null? 3326 bne common_invokeMethodNoRange @ no, continue 3327 b common_exceptionThrown @ yes, handle exception 3328 3329 3330/* ------------------------------ */ 3331 .balign 64 3332.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3333/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3334 /* 3335 * Handle an interface method call. 3336 * 3337 * for: invoke-interface, invoke-interface/range 3338 */ 3339 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3340 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3341 FETCH(r2, 2) @ r2<- FEDC or CCCC 3342 FETCH(r1, 1) @ r1<- BBBB 3343 .if (!0) 3344 and r2, r2, #15 @ r2<- C (or stays CCCC) 3345 .endif 3346 EXPORT_PC() @ must export for invoke 3347 GET_VREG(r0, r2) @ r0<- first arg ("this") 3348 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3349 cmp r0, #0 @ null obj? 3350 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3351 beq common_errNullObject @ yes, fail 3352 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3353 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3354 cmp r0, #0 @ failed? 3355 beq common_exceptionThrown @ yes, handle exception 3356 b common_invokeMethodNoRange @ jump to common handler 3357 3358 3359/* ------------------------------ */ 3360 .balign 64 3361.L_OP_UNUSED_73: /* 0x73 */ 3362/* File: armv5te/OP_UNUSED_73.S */ 3363/* File: armv5te/unused.S */ 3364 bl common_abort 3365 3366 3367 3368/* ------------------------------ */ 3369 .balign 64 3370.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3371/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3372/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3373 /* 3374 * Handle a virtual method call. 3375 * 3376 * for: invoke-virtual, invoke-virtual/range 3377 */ 3378 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3379 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3380 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3381 FETCH(r1, 1) @ r1<- BBBB 3382 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3383 FETCH(r10, 2) @ r10<- GFED or CCCC 3384 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3385 .if (!1) 3386 and r10, r10, #15 @ r10<- D (or stays CCCC) 3387 .endif 3388 cmp r0, #0 @ already resolved? 3389 EXPORT_PC() @ must export for invoke 3390 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3391 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3392 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3393 mov r2, #METHOD_VIRTUAL @ resolver method type 3394 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3395 cmp r0, #0 @ got null? 3396 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3397 b common_exceptionThrown @ yes, handle exception 3398 3399 3400/* ------------------------------ */ 3401 .balign 64 3402.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3403/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3404/* File: armv5te/OP_INVOKE_SUPER.S */ 3405 /* 3406 * Handle a "super" method call. 3407 * 3408 * for: invoke-super, invoke-super/range 3409 */ 3410 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3411 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3412 FETCH(r10, 2) @ r10<- GFED or CCCC 3413 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3414 .if (!1) 3415 and r10, r10, #15 @ r10<- D (or stays CCCC) 3416 .endif 3417 FETCH(r1, 1) @ r1<- BBBB 3418 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3419 GET_VREG(r2, r10) @ r2<- "this" ptr 3420 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3421 cmp r2, #0 @ null "this"? 3422 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3423 beq common_errNullObject @ null "this", throw exception 3424 cmp r0, #0 @ already resolved? 3425 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3426 EXPORT_PC() @ must export for invoke 3427 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3428 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3429 3430 3431/* ------------------------------ */ 3432 .balign 64 3433.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3434/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3435/* File: armv5te/OP_INVOKE_DIRECT.S */ 3436 /* 3437 * Handle a direct method call. 3438 * 3439 * (We could defer the "is 'this' pointer null" test to the common 3440 * method invocation code, and use a flag to indicate that static 3441 * calls don't count. If we do this as part of copying the arguments 3442 * out we could avoiding loading the first arg twice.) 3443 * 3444 * for: invoke-direct, invoke-direct/range 3445 */ 3446 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3447 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3448 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3449 FETCH(r1, 1) @ r1<- BBBB 3450 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3451 FETCH(r10, 2) @ r10<- GFED or CCCC 3452 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3453 .if (!1) 3454 and r10, r10, #15 @ r10<- D (or stays CCCC) 3455 .endif 3456 cmp r0, #0 @ already resolved? 3457 EXPORT_PC() @ must export for invoke 3458 GET_VREG(r2, r10) @ r2<- "this" ptr 3459 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3460.LOP_INVOKE_DIRECT_RANGE_finish: 3461 cmp r2, #0 @ null "this" ref? 3462 bne common_invokeMethodRange @ no, continue on 3463 b common_errNullObject @ yes, throw exception 3464 3465 3466/* ------------------------------ */ 3467 .balign 64 3468.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3469/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3470/* File: armv5te/OP_INVOKE_STATIC.S */ 3471 /* 3472 * Handle a static method call. 3473 * 3474 * for: invoke-static, invoke-static/range 3475 */ 3476 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3477 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3478 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3479 FETCH(r1, 1) @ r1<- BBBB 3480 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3481 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3482 cmp r0, #0 @ already resolved? 3483 EXPORT_PC() @ must export for invoke 3484 bne common_invokeMethodRange @ yes, continue on 34850: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3486 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3487 mov r2, #METHOD_STATIC @ resolver method type 3488 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3489 cmp r0, #0 @ got null? 3490 bne common_invokeMethodRange @ no, continue 3491 b common_exceptionThrown @ yes, handle exception 3492 3493 3494 3495/* ------------------------------ */ 3496 .balign 64 3497.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3498/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3499/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3500 /* 3501 * Handle an interface method call. 3502 * 3503 * for: invoke-interface, invoke-interface/range 3504 */ 3505 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3506 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3507 FETCH(r2, 2) @ r2<- FEDC or CCCC 3508 FETCH(r1, 1) @ r1<- BBBB 3509 .if (!1) 3510 and r2, r2, #15 @ r2<- C (or stays CCCC) 3511 .endif 3512 EXPORT_PC() @ must export for invoke 3513 GET_VREG(r0, r2) @ r0<- first arg ("this") 3514 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3515 cmp r0, #0 @ null obj? 3516 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3517 beq common_errNullObject @ yes, fail 3518 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3519 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3520 cmp r0, #0 @ failed? 3521 beq common_exceptionThrown @ yes, handle exception 3522 b common_invokeMethodRange @ jump to common handler 3523 3524 3525 3526/* ------------------------------ */ 3527 .balign 64 3528.L_OP_UNUSED_79: /* 0x79 */ 3529/* File: armv5te/OP_UNUSED_79.S */ 3530/* File: armv5te/unused.S */ 3531 bl common_abort 3532 3533 3534 3535/* ------------------------------ */ 3536 .balign 64 3537.L_OP_UNUSED_7A: /* 0x7a */ 3538/* File: armv5te/OP_UNUSED_7A.S */ 3539/* File: armv5te/unused.S */ 3540 bl common_abort 3541 3542 3543 3544/* ------------------------------ */ 3545 .balign 64 3546.L_OP_NEG_INT: /* 0x7b */ 3547/* File: armv5te/OP_NEG_INT.S */ 3548/* File: armv5te/unop.S */ 3549 /* 3550 * Generic 32-bit unary operation. Provide an "instr" line that 3551 * specifies an instruction that performs "result = op r0". 3552 * This could be an ARM instruction or a function call. 3553 * 3554 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3555 * int-to-byte, int-to-char, int-to-short 3556 */ 3557 /* unop vA, vB */ 3558 mov r3, rINST, lsr #12 @ r3<- B 3559 mov r9, rINST, lsr #8 @ r9<- A+ 3560 GET_VREG(r0, r3) @ r0<- vB 3561 and r9, r9, #15 3562 @ optional op; may set condition codes 3563 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3564 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3565 GET_INST_OPCODE(ip) @ extract opcode from rINST 3566 SET_VREG(r0, r9) @ vAA<- r0 3567 GOTO_OPCODE(ip) @ jump to next instruction 3568 /* 9-10 instructions */ 3569 3570 3571/* ------------------------------ */ 3572 .balign 64 3573.L_OP_NOT_INT: /* 0x7c */ 3574/* File: armv5te/OP_NOT_INT.S */ 3575/* File: armv5te/unop.S */ 3576 /* 3577 * Generic 32-bit unary operation. Provide an "instr" line that 3578 * specifies an instruction that performs "result = op r0". 3579 * This could be an ARM instruction or a function call. 3580 * 3581 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3582 * int-to-byte, int-to-char, int-to-short 3583 */ 3584 /* unop vA, vB */ 3585 mov r3, rINST, lsr #12 @ r3<- B 3586 mov r9, rINST, lsr #8 @ r9<- A+ 3587 GET_VREG(r0, r3) @ r0<- vB 3588 and r9, r9, #15 3589 @ optional op; may set condition codes 3590 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3591 mvn r0, r0 @ r0<- op, r0-r3 changed 3592 GET_INST_OPCODE(ip) @ extract opcode from rINST 3593 SET_VREG(r0, r9) @ vAA<- r0 3594 GOTO_OPCODE(ip) @ jump to next instruction 3595 /* 9-10 instructions */ 3596 3597 3598/* ------------------------------ */ 3599 .balign 64 3600.L_OP_NEG_LONG: /* 0x7d */ 3601/* File: armv5te/OP_NEG_LONG.S */ 3602/* File: armv5te/unopWide.S */ 3603 /* 3604 * Generic 64-bit unary operation. Provide an "instr" line that 3605 * specifies an instruction that performs "result = op r0/r1". 3606 * This could be an ARM instruction or a function call. 3607 * 3608 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3609 */ 3610 /* unop vA, vB */ 3611 mov r9, rINST, lsr #8 @ r9<- A+ 3612 mov r3, rINST, lsr #12 @ r3<- B 3613 and r9, r9, #15 3614 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3615 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3616 ldmia r3, {r0-r1} @ r0/r1<- vAA 3617 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3618 rsbs r0, r0, #0 @ optional op; may set condition codes 3619 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3620 GET_INST_OPCODE(ip) @ extract opcode from rINST 3621 stmia r9, {r0-r1} @ vAA<- r0/r1 3622 GOTO_OPCODE(ip) @ jump to next instruction 3623 /* 12-13 instructions */ 3624 3625 3626 3627/* ------------------------------ */ 3628 .balign 64 3629.L_OP_NOT_LONG: /* 0x7e */ 3630/* File: armv5te/OP_NOT_LONG.S */ 3631/* File: armv5te/unopWide.S */ 3632 /* 3633 * Generic 64-bit unary operation. Provide an "instr" line that 3634 * specifies an instruction that performs "result = op r0/r1". 3635 * This could be an ARM instruction or a function call. 3636 * 3637 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3638 */ 3639 /* unop vA, vB */ 3640 mov r9, rINST, lsr #8 @ r9<- A+ 3641 mov r3, rINST, lsr #12 @ r3<- B 3642 and r9, r9, #15 3643 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3644 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3645 ldmia r3, {r0-r1} @ r0/r1<- vAA 3646 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3647 mvn r0, r0 @ optional op; may set condition codes 3648 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3649 GET_INST_OPCODE(ip) @ extract opcode from rINST 3650 stmia r9, {r0-r1} @ vAA<- r0/r1 3651 GOTO_OPCODE(ip) @ jump to next instruction 3652 /* 12-13 instructions */ 3653 3654 3655 3656/* ------------------------------ */ 3657 .balign 64 3658.L_OP_NEG_FLOAT: /* 0x7f */ 3659/* File: armv5te/OP_NEG_FLOAT.S */ 3660/* File: armv5te/unop.S */ 3661 /* 3662 * Generic 32-bit unary operation. Provide an "instr" line that 3663 * specifies an instruction that performs "result = op r0". 3664 * This could be an ARM instruction or a function call. 3665 * 3666 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3667 * int-to-byte, int-to-char, int-to-short 3668 */ 3669 /* unop vA, vB */ 3670 mov r3, rINST, lsr #12 @ r3<- B 3671 mov r9, rINST, lsr #8 @ r9<- A+ 3672 GET_VREG(r0, r3) @ r0<- vB 3673 and r9, r9, #15 3674 @ optional op; may set condition codes 3675 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3676 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3677 GET_INST_OPCODE(ip) @ extract opcode from rINST 3678 SET_VREG(r0, r9) @ vAA<- r0 3679 GOTO_OPCODE(ip) @ jump to next instruction 3680 /* 9-10 instructions */ 3681 3682 3683/* ------------------------------ */ 3684 .balign 64 3685.L_OP_NEG_DOUBLE: /* 0x80 */ 3686/* File: armv5te/OP_NEG_DOUBLE.S */ 3687/* File: armv5te/unopWide.S */ 3688 /* 3689 * Generic 64-bit unary operation. Provide an "instr" line that 3690 * specifies an instruction that performs "result = op r0/r1". 3691 * This could be an ARM instruction or a function call. 3692 * 3693 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3694 */ 3695 /* unop vA, vB */ 3696 mov r9, rINST, lsr #8 @ r9<- A+ 3697 mov r3, rINST, lsr #12 @ r3<- B 3698 and r9, r9, #15 3699 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3700 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3701 ldmia r3, {r0-r1} @ r0/r1<- vAA 3702 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3703 @ optional op; may set condition codes 3704 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3705 GET_INST_OPCODE(ip) @ extract opcode from rINST 3706 stmia r9, {r0-r1} @ vAA<- r0/r1 3707 GOTO_OPCODE(ip) @ jump to next instruction 3708 /* 12-13 instructions */ 3709 3710 3711 3712/* ------------------------------ */ 3713 .balign 64 3714.L_OP_INT_TO_LONG: /* 0x81 */ 3715/* File: armv5te/OP_INT_TO_LONG.S */ 3716/* File: armv5te/unopWider.S */ 3717 /* 3718 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3719 * that specifies an instruction that performs "result = op r0", where 3720 * "result" is a 64-bit quantity in r0/r1. 3721 * 3722 * For: int-to-long, int-to-double, float-to-long, float-to-double 3723 */ 3724 /* unop vA, vB */ 3725 mov r9, rINST, lsr #8 @ r9<- A+ 3726 mov r3, rINST, lsr #12 @ r3<- B 3727 and r9, r9, #15 3728 GET_VREG(r0, r3) @ r0<- vB 3729 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3730 @ optional op; may set condition codes 3731 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3732 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3733 GET_INST_OPCODE(ip) @ extract opcode from rINST 3734 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3735 GOTO_OPCODE(ip) @ jump to next instruction 3736 /* 10-11 instructions */ 3737 3738 3739/* ------------------------------ */ 3740 .balign 64 3741.L_OP_INT_TO_FLOAT: /* 0x82 */ 3742/* File: armv5te/OP_INT_TO_FLOAT.S */ 3743/* File: armv5te/unop.S */ 3744 /* 3745 * Generic 32-bit unary operation. Provide an "instr" line that 3746 * specifies an instruction that performs "result = op r0". 3747 * This could be an ARM instruction or a function call. 3748 * 3749 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3750 * int-to-byte, int-to-char, int-to-short 3751 */ 3752 /* unop vA, vB */ 3753 mov r3, rINST, lsr #12 @ r3<- B 3754 mov r9, rINST, lsr #8 @ r9<- A+ 3755 GET_VREG(r0, r3) @ r0<- vB 3756 and r9, r9, #15 3757 @ optional op; may set condition codes 3758 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3759 bl __aeabi_i2f @ r0<- op, r0-r3 changed 3760 GET_INST_OPCODE(ip) @ extract opcode from rINST 3761 SET_VREG(r0, r9) @ vAA<- r0 3762 GOTO_OPCODE(ip) @ jump to next instruction 3763 /* 9-10 instructions */ 3764 3765 3766/* ------------------------------ */ 3767 .balign 64 3768.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3769/* File: armv5te/OP_INT_TO_DOUBLE.S */ 3770/* File: armv5te/unopWider.S */ 3771 /* 3772 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3773 * that specifies an instruction that performs "result = op r0", where 3774 * "result" is a 64-bit quantity in r0/r1. 3775 * 3776 * For: int-to-long, int-to-double, float-to-long, float-to-double 3777 */ 3778 /* unop vA, vB */ 3779 mov r9, rINST, lsr #8 @ r9<- A+ 3780 mov r3, rINST, lsr #12 @ r3<- B 3781 and r9, r9, #15 3782 GET_VREG(r0, r3) @ r0<- vB 3783 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3784 @ optional op; may set condition codes 3785 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3786 bl __aeabi_i2d @ r0<- op, r0-r3 changed 3787 GET_INST_OPCODE(ip) @ extract opcode from rINST 3788 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3789 GOTO_OPCODE(ip) @ jump to next instruction 3790 /* 10-11 instructions */ 3791 3792 3793/* ------------------------------ */ 3794 .balign 64 3795.L_OP_LONG_TO_INT: /* 0x84 */ 3796/* File: armv5te/OP_LONG_TO_INT.S */ 3797/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3798/* File: armv5te/OP_MOVE.S */ 3799 /* for move, move-object, long-to-int */ 3800 /* op vA, vB */ 3801 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3802 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3803 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3804 GET_VREG(r2, r1) @ r2<- fp[B] 3805 and r0, r0, #15 3806 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3807 SET_VREG(r2, r0) @ fp[A]<- r2 3808 GOTO_OPCODE(ip) @ execute next instruction 3809 3810 3811 3812/* ------------------------------ */ 3813 .balign 64 3814.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3815/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3816/* File: armv5te/unopNarrower.S */ 3817 /* 3818 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3819 * that specifies an instruction that performs "result = op r0/r1", where 3820 * "result" is a 32-bit quantity in r0. 3821 * 3822 * For: long-to-float, double-to-int, double-to-float 3823 * 3824 * (This would work for long-to-int, but that instruction is actually 3825 * an exact match for OP_MOVE.) 3826 */ 3827 /* unop vA, vB */ 3828 mov r3, rINST, lsr #12 @ r3<- B 3829 mov r9, rINST, lsr #8 @ r9<- A+ 3830 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3831 and r9, r9, #15 3832 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3833 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3834 @ optional op; may set condition codes 3835 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3836 GET_INST_OPCODE(ip) @ extract opcode from rINST 3837 SET_VREG(r0, r9) @ vA<- r0 3838 GOTO_OPCODE(ip) @ jump to next instruction 3839 /* 10-11 instructions */ 3840 3841 3842/* ------------------------------ */ 3843 .balign 64 3844.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3845/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3846/* File: armv5te/unopWide.S */ 3847 /* 3848 * Generic 64-bit unary operation. Provide an "instr" line that 3849 * specifies an instruction that performs "result = op r0/r1". 3850 * This could be an ARM instruction or a function call. 3851 * 3852 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3853 */ 3854 /* unop vA, vB */ 3855 mov r9, rINST, lsr #8 @ r9<- A+ 3856 mov r3, rINST, lsr #12 @ r3<- B 3857 and r9, r9, #15 3858 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3859 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3860 ldmia r3, {r0-r1} @ r0/r1<- vAA 3861 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3862 @ optional op; may set condition codes 3863 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3864 GET_INST_OPCODE(ip) @ extract opcode from rINST 3865 stmia r9, {r0-r1} @ vAA<- r0/r1 3866 GOTO_OPCODE(ip) @ jump to next instruction 3867 /* 12-13 instructions */ 3868 3869 3870 3871/* ------------------------------ */ 3872 .balign 64 3873.L_OP_FLOAT_TO_INT: /* 0x87 */ 3874/* File: armv5te/OP_FLOAT_TO_INT.S */ 3875/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3876/* File: armv5te/unop.S */ 3877 /* 3878 * Generic 32-bit unary operation. Provide an "instr" line that 3879 * specifies an instruction that performs "result = op r0". 3880 * This could be an ARM instruction or a function call. 3881 * 3882 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3883 * int-to-byte, int-to-char, int-to-short 3884 */ 3885 /* unop vA, vB */ 3886 mov r3, rINST, lsr #12 @ r3<- B 3887 mov r9, rINST, lsr #8 @ r9<- A+ 3888 GET_VREG(r0, r3) @ r0<- vB 3889 and r9, r9, #15 3890 @ optional op; may set condition codes 3891 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3892 bl __aeabi_f2iz @ r0<- op, r0-r3 changed 3893 GET_INST_OPCODE(ip) @ extract opcode from rINST 3894 SET_VREG(r0, r9) @ vAA<- r0 3895 GOTO_OPCODE(ip) @ jump to next instruction 3896 /* 9-10 instructions */ 3897 3898 3899#if 0 3900@include "armv5te/unop.S" {"instr":"bl f2i_doconv"} 3901@break 3902/* 3903 * Convert the float in r0 to an int in r0. 3904 * 3905 * We have to clip values to int min/max per the specification. The 3906 * expected common case is a "reasonable" value that converts directly 3907 * to modest integer. The EABI convert function isn't doing this for us. 3908 */ 3909f2i_doconv: 3910 stmfd sp!, {r4, lr} 3911 mov r1, #0x4f000000 @ (float)maxint 3912 mov r4, r0 3913 bl __aeabi_fcmpge @ is arg >= maxint? 3914 cmp r0, #0 @ nonzero == yes 3915 mvnne r0, #0x80000000 @ return maxint (7fffffff) 3916 ldmnefd sp!, {r4, pc} 3917 3918 mov r0, r4 @ recover arg 3919 mov r1, #0xcf000000 @ (float)minint 3920 bl __aeabi_fcmple @ is arg <= minint? 3921 cmp r0, #0 @ nonzero == yes 3922 movne r0, #0x80000000 @ return minint (80000000) 3923 ldmnefd sp!, {r4, pc} 3924 3925 mov r0, r4 @ recover arg 3926 mov r1, r4 3927 bl __aeabi_fcmpeq @ is arg == self? 3928 cmp r0, #0 @ zero == no 3929 ldmeqfd sp!, {r4, pc} @ return zero for NaN 3930 3931 mov r0, r4 @ recover arg 3932 bl __aeabi_f2iz @ convert float to int 3933 ldmfd sp!, {r4, pc} 3934#endif 3935 3936 3937/* ------------------------------ */ 3938 .balign 64 3939.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3940/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3941@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3942/* File: armv5te/unopWider.S */ 3943 /* 3944 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3945 * that specifies an instruction that performs "result = op r0", where 3946 * "result" is a 64-bit quantity in r0/r1. 3947 * 3948 * For: int-to-long, int-to-double, float-to-long, float-to-double 3949 */ 3950 /* unop vA, vB */ 3951 mov r9, rINST, lsr #8 @ r9<- A+ 3952 mov r3, rINST, lsr #12 @ r3<- B 3953 and r9, r9, #15 3954 GET_VREG(r0, r3) @ r0<- vB 3955 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3956 @ optional op; may set condition codes 3957 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3958 bl f2l_doconv @ r0<- op, r0-r3 changed 3959 GET_INST_OPCODE(ip) @ extract opcode from rINST 3960 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3961 GOTO_OPCODE(ip) @ jump to next instruction 3962 /* 10-11 instructions */ 3963 3964 3965 3966/* ------------------------------ */ 3967 .balign 64 3968.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3969/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */ 3970/* File: armv5te/unopWider.S */ 3971 /* 3972 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3973 * that specifies an instruction that performs "result = op r0", where 3974 * "result" is a 64-bit quantity in r0/r1. 3975 * 3976 * For: int-to-long, int-to-double, float-to-long, float-to-double 3977 */ 3978 /* unop vA, vB */ 3979 mov r9, rINST, lsr #8 @ r9<- A+ 3980 mov r3, rINST, lsr #12 @ r3<- B 3981 and r9, r9, #15 3982 GET_VREG(r0, r3) @ r0<- vB 3983 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3984 @ optional op; may set condition codes 3985 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3986 bl __aeabi_f2d @ r0<- op, r0-r3 changed 3987 GET_INST_OPCODE(ip) @ extract opcode from rINST 3988 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3989 GOTO_OPCODE(ip) @ jump to next instruction 3990 /* 10-11 instructions */ 3991 3992 3993/* ------------------------------ */ 3994 .balign 64 3995.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3996/* File: armv5te/OP_DOUBLE_TO_INT.S */ 3997/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3998/* File: armv5te/unopNarrower.S */ 3999 /* 4000 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4001 * that specifies an instruction that performs "result = op r0/r1", where 4002 * "result" is a 32-bit quantity in r0. 4003 * 4004 * For: long-to-float, double-to-int, double-to-float 4005 * 4006 * (This would work for long-to-int, but that instruction is actually 4007 * an exact match for OP_MOVE.) 4008 */ 4009 /* unop vA, vB */ 4010 mov r3, rINST, lsr #12 @ r3<- B 4011 mov r9, rINST, lsr #8 @ r9<- A+ 4012 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4013 and r9, r9, #15 4014 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4015 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4016 @ optional op; may set condition codes 4017 bl __aeabi_d2iz @ r0<- op, r0-r3 changed 4018 GET_INST_OPCODE(ip) @ extract opcode from rINST 4019 SET_VREG(r0, r9) @ vA<- r0 4020 GOTO_OPCODE(ip) @ jump to next instruction 4021 /* 10-11 instructions */ 4022 4023 4024#if 0 4025@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"} 4026@break 4027/* 4028 * Convert the double in r0/r1 to an int in r0. 4029 * 4030 * We have to clip values to int min/max per the specification. The 4031 * expected common case is a "reasonable" value that converts directly 4032 * to modest integer. The EABI convert function isn't doing this for us. 4033 */ 4034d2i_doconv: 4035 stmfd sp!, {r4, r5, lr} @ save regs 4036 mov r2, #0x80000000 @ maxint, as a double (low word) 4037 mov r2, r2, asr #9 @ 0xffc00000 4038 sub sp, sp, #4 @ align for EABI 4039 mvn r3, #0xbe000000 @ maxint, as a double (high word) 4040 sub r3, r3, #0x00200000 @ 0x41dfffff 4041 mov r4, r0 @ save a copy of r0 4042 mov r5, r1 @ and r1 4043 bl __aeabi_dcmpge @ is arg >= maxint? 4044 cmp r0, #0 @ nonzero == yes 4045 mvnne r0, #0x80000000 @ return maxint (0x7fffffff) 4046 bne 1f 4047 4048 mov r0, r4 @ recover arg 4049 mov r1, r5 4050 mov r3, #0xc1000000 @ minint, as a double (high word) 4051 add r3, r3, #0x00e00000 @ 0xc1e00000 4052 mov r2, #0 @ minint, as a double (low word) 4053 bl __aeabi_dcmple @ is arg <= minint? 4054 cmp r0, #0 @ nonzero == yes 4055 movne r0, #0x80000000 @ return minint (80000000) 4056 bne 1f 4057 4058 mov r0, r4 @ recover arg 4059 mov r1, r5 4060 mov r2, r4 @ compare against self 4061 mov r3, r5 4062 bl __aeabi_dcmpeq @ is arg == self? 4063 cmp r0, #0 @ zero == no 4064 beq 1f @ return zero for NaN 4065 4066 mov r0, r4 @ recover arg 4067 mov r1, r5 4068 bl __aeabi_d2iz @ convert double to int 4069 40701: 4071 add sp, sp, #4 4072 ldmfd sp!, {r4, r5, pc} 4073#endif 4074 4075 4076/* ------------------------------ */ 4077 .balign 64 4078.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 4079/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 4080@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 4081/* File: armv5te/unopWide.S */ 4082 /* 4083 * Generic 64-bit unary operation. Provide an "instr" line that 4084 * specifies an instruction that performs "result = op r0/r1". 4085 * This could be an ARM instruction or a function call. 4086 * 4087 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 4088 */ 4089 /* unop vA, vB */ 4090 mov r9, rINST, lsr #8 @ r9<- A+ 4091 mov r3, rINST, lsr #12 @ r3<- B 4092 and r9, r9, #15 4093 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4094 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 4095 ldmia r3, {r0-r1} @ r0/r1<- vAA 4096 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4097 @ optional op; may set condition codes 4098 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 4099 GET_INST_OPCODE(ip) @ extract opcode from rINST 4100 stmia r9, {r0-r1} @ vAA<- r0/r1 4101 GOTO_OPCODE(ip) @ jump to next instruction 4102 /* 12-13 instructions */ 4103 4104 4105 4106 4107/* ------------------------------ */ 4108 .balign 64 4109.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 4110/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */ 4111/* File: armv5te/unopNarrower.S */ 4112 /* 4113 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4114 * that specifies an instruction that performs "result = op r0/r1", where 4115 * "result" is a 32-bit quantity in r0. 4116 * 4117 * For: long-to-float, double-to-int, double-to-float 4118 * 4119 * (This would work for long-to-int, but that instruction is actually 4120 * an exact match for OP_MOVE.) 4121 */ 4122 /* unop vA, vB */ 4123 mov r3, rINST, lsr #12 @ r3<- B 4124 mov r9, rINST, lsr #8 @ r9<- A+ 4125 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4126 and r9, r9, #15 4127 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4128 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4129 @ optional op; may set condition codes 4130 bl __aeabi_d2f @ r0<- op, r0-r3 changed 4131 GET_INST_OPCODE(ip) @ extract opcode from rINST 4132 SET_VREG(r0, r9) @ vA<- r0 4133 GOTO_OPCODE(ip) @ jump to next instruction 4134 /* 10-11 instructions */ 4135 4136 4137/* ------------------------------ */ 4138 .balign 64 4139.L_OP_INT_TO_BYTE: /* 0x8d */ 4140/* File: armv5te/OP_INT_TO_BYTE.S */ 4141/* File: armv5te/unop.S */ 4142 /* 4143 * Generic 32-bit unary operation. Provide an "instr" line that 4144 * specifies an instruction that performs "result = op r0". 4145 * This could be an ARM instruction or a function call. 4146 * 4147 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4148 * int-to-byte, int-to-char, int-to-short 4149 */ 4150 /* unop vA, vB */ 4151 mov r3, rINST, lsr #12 @ r3<- B 4152 mov r9, rINST, lsr #8 @ r9<- A+ 4153 GET_VREG(r0, r3) @ r0<- vB 4154 and r9, r9, #15 4155 mov r0, r0, asl #24 @ optional op; may set condition codes 4156 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4157 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4158 GET_INST_OPCODE(ip) @ extract opcode from rINST 4159 SET_VREG(r0, r9) @ vAA<- r0 4160 GOTO_OPCODE(ip) @ jump to next instruction 4161 /* 9-10 instructions */ 4162 4163 4164/* ------------------------------ */ 4165 .balign 64 4166.L_OP_INT_TO_CHAR: /* 0x8e */ 4167/* File: armv5te/OP_INT_TO_CHAR.S */ 4168/* File: armv5te/unop.S */ 4169 /* 4170 * Generic 32-bit unary operation. Provide an "instr" line that 4171 * specifies an instruction that performs "result = op r0". 4172 * This could be an ARM instruction or a function call. 4173 * 4174 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4175 * int-to-byte, int-to-char, int-to-short 4176 */ 4177 /* unop vA, vB */ 4178 mov r3, rINST, lsr #12 @ r3<- B 4179 mov r9, rINST, lsr #8 @ r9<- A+ 4180 GET_VREG(r0, r3) @ r0<- vB 4181 and r9, r9, #15 4182 mov r0, r0, asl #16 @ optional op; may set condition codes 4183 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4184 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4185 GET_INST_OPCODE(ip) @ extract opcode from rINST 4186 SET_VREG(r0, r9) @ vAA<- r0 4187 GOTO_OPCODE(ip) @ jump to next instruction 4188 /* 9-10 instructions */ 4189 4190 4191/* ------------------------------ */ 4192 .balign 64 4193.L_OP_INT_TO_SHORT: /* 0x8f */ 4194/* File: armv5te/OP_INT_TO_SHORT.S */ 4195/* File: armv5te/unop.S */ 4196 /* 4197 * Generic 32-bit unary operation. Provide an "instr" line that 4198 * specifies an instruction that performs "result = op r0". 4199 * This could be an ARM instruction or a function call. 4200 * 4201 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4202 * int-to-byte, int-to-char, int-to-short 4203 */ 4204 /* unop vA, vB */ 4205 mov r3, rINST, lsr #12 @ r3<- B 4206 mov r9, rINST, lsr #8 @ r9<- A+ 4207 GET_VREG(r0, r3) @ r0<- vB 4208 and r9, r9, #15 4209 mov r0, r0, asl #16 @ optional op; may set condition codes 4210 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4211 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4212 GET_INST_OPCODE(ip) @ extract opcode from rINST 4213 SET_VREG(r0, r9) @ vAA<- r0 4214 GOTO_OPCODE(ip) @ jump to next instruction 4215 /* 9-10 instructions */ 4216 4217 4218/* ------------------------------ */ 4219 .balign 64 4220.L_OP_ADD_INT: /* 0x90 */ 4221/* File: armv5te/OP_ADD_INT.S */ 4222/* File: armv5te/binop.S */ 4223 /* 4224 * Generic 32-bit binary operation. Provide an "instr" line that 4225 * specifies an instruction that performs "result = r0 op r1". 4226 * This could be an ARM instruction or a function call. (If the result 4227 * comes back in a register other than r0, you can override "result".) 4228 * 4229 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4230 * vCC (r1). Useful for integer division and modulus. Note that we 4231 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4232 * handles it correctly. 4233 * 4234 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4235 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4236 * mul-float, div-float, rem-float 4237 */ 4238 /* binop vAA, vBB, vCC */ 4239 FETCH(r0, 1) @ r0<- CCBB 4240 mov r9, rINST, lsr #8 @ r9<- AA 4241 mov r3, r0, lsr #8 @ r3<- CC 4242 and r2, r0, #255 @ r2<- BB 4243 GET_VREG(r1, r3) @ r1<- vCC 4244 GET_VREG(r0, r2) @ r0<- vBB 4245 .if 0 4246 cmp r1, #0 @ is second operand zero? 4247 beq common_errDivideByZero 4248 .endif 4249 4250 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4251 @ optional op; may set condition codes 4252 add r0, r0, r1 @ r0<- op, r0-r3 changed 4253 GET_INST_OPCODE(ip) @ extract opcode from rINST 4254 SET_VREG(r0, r9) @ vAA<- r0 4255 GOTO_OPCODE(ip) @ jump to next instruction 4256 /* 11-14 instructions */ 4257 4258 4259 4260/* ------------------------------ */ 4261 .balign 64 4262.L_OP_SUB_INT: /* 0x91 */ 4263/* File: armv5te/OP_SUB_INT.S */ 4264/* File: armv5te/binop.S */ 4265 /* 4266 * Generic 32-bit binary operation. Provide an "instr" line that 4267 * specifies an instruction that performs "result = r0 op r1". 4268 * This could be an ARM instruction or a function call. (If the result 4269 * comes back in a register other than r0, you can override "result".) 4270 * 4271 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4272 * vCC (r1). Useful for integer division and modulus. Note that we 4273 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4274 * handles it correctly. 4275 * 4276 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4277 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4278 * mul-float, div-float, rem-float 4279 */ 4280 /* binop vAA, vBB, vCC */ 4281 FETCH(r0, 1) @ r0<- CCBB 4282 mov r9, rINST, lsr #8 @ r9<- AA 4283 mov r3, r0, lsr #8 @ r3<- CC 4284 and r2, r0, #255 @ r2<- BB 4285 GET_VREG(r1, r3) @ r1<- vCC 4286 GET_VREG(r0, r2) @ r0<- vBB 4287 .if 0 4288 cmp r1, #0 @ is second operand zero? 4289 beq common_errDivideByZero 4290 .endif 4291 4292 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4293 @ optional op; may set condition codes 4294 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4295 GET_INST_OPCODE(ip) @ extract opcode from rINST 4296 SET_VREG(r0, r9) @ vAA<- r0 4297 GOTO_OPCODE(ip) @ jump to next instruction 4298 /* 11-14 instructions */ 4299 4300 4301 4302/* ------------------------------ */ 4303 .balign 64 4304.L_OP_MUL_INT: /* 0x92 */ 4305/* File: armv5te/OP_MUL_INT.S */ 4306/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4307/* File: armv5te/binop.S */ 4308 /* 4309 * Generic 32-bit binary operation. Provide an "instr" line that 4310 * specifies an instruction that performs "result = r0 op r1". 4311 * This could be an ARM instruction or a function call. (If the result 4312 * comes back in a register other than r0, you can override "result".) 4313 * 4314 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4315 * vCC (r1). Useful for integer division and modulus. Note that we 4316 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4317 * handles it correctly. 4318 * 4319 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4320 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4321 * mul-float, div-float, rem-float 4322 */ 4323 /* binop vAA, vBB, vCC */ 4324 FETCH(r0, 1) @ r0<- CCBB 4325 mov r9, rINST, lsr #8 @ r9<- AA 4326 mov r3, r0, lsr #8 @ r3<- CC 4327 and r2, r0, #255 @ r2<- BB 4328 GET_VREG(r1, r3) @ r1<- vCC 4329 GET_VREG(r0, r2) @ r0<- vBB 4330 .if 0 4331 cmp r1, #0 @ is second operand zero? 4332 beq common_errDivideByZero 4333 .endif 4334 4335 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4336 @ optional op; may set condition codes 4337 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4338 GET_INST_OPCODE(ip) @ extract opcode from rINST 4339 SET_VREG(r0, r9) @ vAA<- r0 4340 GOTO_OPCODE(ip) @ jump to next instruction 4341 /* 11-14 instructions */ 4342 4343 4344 4345/* ------------------------------ */ 4346 .balign 64 4347.L_OP_DIV_INT: /* 0x93 */ 4348/* File: armv5te/OP_DIV_INT.S */ 4349/* File: armv5te/binop.S */ 4350 /* 4351 * Generic 32-bit binary operation. Provide an "instr" line that 4352 * specifies an instruction that performs "result = r0 op r1". 4353 * This could be an ARM instruction or a function call. (If the result 4354 * comes back in a register other than r0, you can override "result".) 4355 * 4356 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4357 * vCC (r1). Useful for integer division and modulus. Note that we 4358 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4359 * handles it correctly. 4360 * 4361 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4362 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4363 * mul-float, div-float, rem-float 4364 */ 4365 /* binop vAA, vBB, vCC */ 4366 FETCH(r0, 1) @ r0<- CCBB 4367 mov r9, rINST, lsr #8 @ r9<- AA 4368 mov r3, r0, lsr #8 @ r3<- CC 4369 and r2, r0, #255 @ r2<- BB 4370 GET_VREG(r1, r3) @ r1<- vCC 4371 GET_VREG(r0, r2) @ r0<- vBB 4372 .if 1 4373 cmp r1, #0 @ is second operand zero? 4374 beq common_errDivideByZero 4375 .endif 4376 4377 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4378 @ optional op; may set condition codes 4379 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4380 GET_INST_OPCODE(ip) @ extract opcode from rINST 4381 SET_VREG(r0, r9) @ vAA<- r0 4382 GOTO_OPCODE(ip) @ jump to next instruction 4383 /* 11-14 instructions */ 4384 4385 4386 4387/* ------------------------------ */ 4388 .balign 64 4389.L_OP_REM_INT: /* 0x94 */ 4390/* File: armv5te/OP_REM_INT.S */ 4391/* idivmod returns quotient in r0 and remainder in r1 */ 4392/* File: armv5te/binop.S */ 4393 /* 4394 * Generic 32-bit binary operation. Provide an "instr" line that 4395 * specifies an instruction that performs "result = r0 op r1". 4396 * This could be an ARM instruction or a function call. (If the result 4397 * comes back in a register other than r0, you can override "result".) 4398 * 4399 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4400 * vCC (r1). Useful for integer division and modulus. Note that we 4401 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4402 * handles it correctly. 4403 * 4404 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4405 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4406 * mul-float, div-float, rem-float 4407 */ 4408 /* binop vAA, vBB, vCC */ 4409 FETCH(r0, 1) @ r0<- CCBB 4410 mov r9, rINST, lsr #8 @ r9<- AA 4411 mov r3, r0, lsr #8 @ r3<- CC 4412 and r2, r0, #255 @ r2<- BB 4413 GET_VREG(r1, r3) @ r1<- vCC 4414 GET_VREG(r0, r2) @ r0<- vBB 4415 .if 1 4416 cmp r1, #0 @ is second operand zero? 4417 beq common_errDivideByZero 4418 .endif 4419 4420 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4421 @ optional op; may set condition codes 4422 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4423 GET_INST_OPCODE(ip) @ extract opcode from rINST 4424 SET_VREG(r1, r9) @ vAA<- r1 4425 GOTO_OPCODE(ip) @ jump to next instruction 4426 /* 11-14 instructions */ 4427 4428 4429 4430/* ------------------------------ */ 4431 .balign 64 4432.L_OP_AND_INT: /* 0x95 */ 4433/* File: armv5te/OP_AND_INT.S */ 4434/* File: armv5te/binop.S */ 4435 /* 4436 * Generic 32-bit binary operation. Provide an "instr" line that 4437 * specifies an instruction that performs "result = r0 op r1". 4438 * This could be an ARM instruction or a function call. (If the result 4439 * comes back in a register other than r0, you can override "result".) 4440 * 4441 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4442 * vCC (r1). Useful for integer division and modulus. Note that we 4443 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4444 * handles it correctly. 4445 * 4446 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4447 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4448 * mul-float, div-float, rem-float 4449 */ 4450 /* binop vAA, vBB, vCC */ 4451 FETCH(r0, 1) @ r0<- CCBB 4452 mov r9, rINST, lsr #8 @ r9<- AA 4453 mov r3, r0, lsr #8 @ r3<- CC 4454 and r2, r0, #255 @ r2<- BB 4455 GET_VREG(r1, r3) @ r1<- vCC 4456 GET_VREG(r0, r2) @ r0<- vBB 4457 .if 0 4458 cmp r1, #0 @ is second operand zero? 4459 beq common_errDivideByZero 4460 .endif 4461 4462 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4463 @ optional op; may set condition codes 4464 and r0, r0, r1 @ r0<- op, r0-r3 changed 4465 GET_INST_OPCODE(ip) @ extract opcode from rINST 4466 SET_VREG(r0, r9) @ vAA<- r0 4467 GOTO_OPCODE(ip) @ jump to next instruction 4468 /* 11-14 instructions */ 4469 4470 4471 4472/* ------------------------------ */ 4473 .balign 64 4474.L_OP_OR_INT: /* 0x96 */ 4475/* File: armv5te/OP_OR_INT.S */ 4476/* File: armv5te/binop.S */ 4477 /* 4478 * Generic 32-bit binary operation. Provide an "instr" line that 4479 * specifies an instruction that performs "result = r0 op r1". 4480 * This could be an ARM instruction or a function call. (If the result 4481 * comes back in a register other than r0, you can override "result".) 4482 * 4483 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4484 * vCC (r1). Useful for integer division and modulus. Note that we 4485 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4486 * handles it correctly. 4487 * 4488 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4489 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4490 * mul-float, div-float, rem-float 4491 */ 4492 /* binop vAA, vBB, vCC */ 4493 FETCH(r0, 1) @ r0<- CCBB 4494 mov r9, rINST, lsr #8 @ r9<- AA 4495 mov r3, r0, lsr #8 @ r3<- CC 4496 and r2, r0, #255 @ r2<- BB 4497 GET_VREG(r1, r3) @ r1<- vCC 4498 GET_VREG(r0, r2) @ r0<- vBB 4499 .if 0 4500 cmp r1, #0 @ is second operand zero? 4501 beq common_errDivideByZero 4502 .endif 4503 4504 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4505 @ optional op; may set condition codes 4506 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4507 GET_INST_OPCODE(ip) @ extract opcode from rINST 4508 SET_VREG(r0, r9) @ vAA<- r0 4509 GOTO_OPCODE(ip) @ jump to next instruction 4510 /* 11-14 instructions */ 4511 4512 4513 4514/* ------------------------------ */ 4515 .balign 64 4516.L_OP_XOR_INT: /* 0x97 */ 4517/* File: armv5te/OP_XOR_INT.S */ 4518/* File: armv5te/binop.S */ 4519 /* 4520 * Generic 32-bit binary operation. Provide an "instr" line that 4521 * specifies an instruction that performs "result = r0 op r1". 4522 * This could be an ARM instruction or a function call. (If the result 4523 * comes back in a register other than r0, you can override "result".) 4524 * 4525 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4526 * vCC (r1). Useful for integer division and modulus. Note that we 4527 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4528 * handles it correctly. 4529 * 4530 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4531 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4532 * mul-float, div-float, rem-float 4533 */ 4534 /* binop vAA, vBB, vCC */ 4535 FETCH(r0, 1) @ r0<- CCBB 4536 mov r9, rINST, lsr #8 @ r9<- AA 4537 mov r3, r0, lsr #8 @ r3<- CC 4538 and r2, r0, #255 @ r2<- BB 4539 GET_VREG(r1, r3) @ r1<- vCC 4540 GET_VREG(r0, r2) @ r0<- vBB 4541 .if 0 4542 cmp r1, #0 @ is second operand zero? 4543 beq common_errDivideByZero 4544 .endif 4545 4546 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4547 @ optional op; may set condition codes 4548 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4549 GET_INST_OPCODE(ip) @ extract opcode from rINST 4550 SET_VREG(r0, r9) @ vAA<- r0 4551 GOTO_OPCODE(ip) @ jump to next instruction 4552 /* 11-14 instructions */ 4553 4554 4555 4556/* ------------------------------ */ 4557 .balign 64 4558.L_OP_SHL_INT: /* 0x98 */ 4559/* File: armv5te/OP_SHL_INT.S */ 4560/* File: armv5te/binop.S */ 4561 /* 4562 * Generic 32-bit binary operation. Provide an "instr" line that 4563 * specifies an instruction that performs "result = r0 op r1". 4564 * This could be an ARM instruction or a function call. (If the result 4565 * comes back in a register other than r0, you can override "result".) 4566 * 4567 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4568 * vCC (r1). Useful for integer division and modulus. Note that we 4569 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4570 * handles it correctly. 4571 * 4572 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4573 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4574 * mul-float, div-float, rem-float 4575 */ 4576 /* binop vAA, vBB, vCC */ 4577 FETCH(r0, 1) @ r0<- CCBB 4578 mov r9, rINST, lsr #8 @ r9<- AA 4579 mov r3, r0, lsr #8 @ r3<- CC 4580 and r2, r0, #255 @ r2<- BB 4581 GET_VREG(r1, r3) @ r1<- vCC 4582 GET_VREG(r0, r2) @ r0<- vBB 4583 .if 0 4584 cmp r1, #0 @ is second operand zero? 4585 beq common_errDivideByZero 4586 .endif 4587 4588 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4589 and r1, r1, #31 @ optional op; may set condition codes 4590 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4591 GET_INST_OPCODE(ip) @ extract opcode from rINST 4592 SET_VREG(r0, r9) @ vAA<- r0 4593 GOTO_OPCODE(ip) @ jump to next instruction 4594 /* 11-14 instructions */ 4595 4596 4597 4598/* ------------------------------ */ 4599 .balign 64 4600.L_OP_SHR_INT: /* 0x99 */ 4601/* File: armv5te/OP_SHR_INT.S */ 4602/* File: armv5te/binop.S */ 4603 /* 4604 * Generic 32-bit binary operation. Provide an "instr" line that 4605 * specifies an instruction that performs "result = r0 op r1". 4606 * This could be an ARM instruction or a function call. (If the result 4607 * comes back in a register other than r0, you can override "result".) 4608 * 4609 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4610 * vCC (r1). Useful for integer division and modulus. Note that we 4611 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4612 * handles it correctly. 4613 * 4614 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4615 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4616 * mul-float, div-float, rem-float 4617 */ 4618 /* binop vAA, vBB, vCC */ 4619 FETCH(r0, 1) @ r0<- CCBB 4620 mov r9, rINST, lsr #8 @ r9<- AA 4621 mov r3, r0, lsr #8 @ r3<- CC 4622 and r2, r0, #255 @ r2<- BB 4623 GET_VREG(r1, r3) @ r1<- vCC 4624 GET_VREG(r0, r2) @ r0<- vBB 4625 .if 0 4626 cmp r1, #0 @ is second operand zero? 4627 beq common_errDivideByZero 4628 .endif 4629 4630 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4631 and r1, r1, #31 @ optional op; may set condition codes 4632 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4633 GET_INST_OPCODE(ip) @ extract opcode from rINST 4634 SET_VREG(r0, r9) @ vAA<- r0 4635 GOTO_OPCODE(ip) @ jump to next instruction 4636 /* 11-14 instructions */ 4637 4638 4639 4640/* ------------------------------ */ 4641 .balign 64 4642.L_OP_USHR_INT: /* 0x9a */ 4643/* File: armv5te/OP_USHR_INT.S */ 4644/* File: armv5te/binop.S */ 4645 /* 4646 * Generic 32-bit binary operation. Provide an "instr" line that 4647 * specifies an instruction that performs "result = r0 op r1". 4648 * This could be an ARM instruction or a function call. (If the result 4649 * comes back in a register other than r0, you can override "result".) 4650 * 4651 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4652 * vCC (r1). Useful for integer division and modulus. Note that we 4653 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4654 * handles it correctly. 4655 * 4656 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4657 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4658 * mul-float, div-float, rem-float 4659 */ 4660 /* binop vAA, vBB, vCC */ 4661 FETCH(r0, 1) @ r0<- CCBB 4662 mov r9, rINST, lsr #8 @ r9<- AA 4663 mov r3, r0, lsr #8 @ r3<- CC 4664 and r2, r0, #255 @ r2<- BB 4665 GET_VREG(r1, r3) @ r1<- vCC 4666 GET_VREG(r0, r2) @ r0<- vBB 4667 .if 0 4668 cmp r1, #0 @ is second operand zero? 4669 beq common_errDivideByZero 4670 .endif 4671 4672 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4673 and r1, r1, #31 @ optional op; may set condition codes 4674 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4675 GET_INST_OPCODE(ip) @ extract opcode from rINST 4676 SET_VREG(r0, r9) @ vAA<- r0 4677 GOTO_OPCODE(ip) @ jump to next instruction 4678 /* 11-14 instructions */ 4679 4680 4681 4682/* ------------------------------ */ 4683 .balign 64 4684.L_OP_ADD_LONG: /* 0x9b */ 4685/* File: armv5te/OP_ADD_LONG.S */ 4686/* File: armv5te/binopWide.S */ 4687 /* 4688 * Generic 64-bit binary operation. Provide an "instr" line that 4689 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4690 * This could be an ARM instruction or a function call. (If the result 4691 * comes back in a register other than r0, you can override "result".) 4692 * 4693 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4694 * vCC (r1). Useful for integer division and modulus. 4695 * 4696 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4697 * xor-long, add-double, sub-double, mul-double, div-double, 4698 * rem-double 4699 * 4700 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4701 */ 4702 /* binop vAA, vBB, vCC */ 4703 FETCH(r0, 1) @ r0<- CCBB 4704 mov r9, rINST, lsr #8 @ r9<- AA 4705 and r2, r0, #255 @ r2<- BB 4706 mov r3, r0, lsr #8 @ r3<- CC 4707 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4708 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4709 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4710 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4711 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4712 .if 0 4713 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4714 beq common_errDivideByZero 4715 .endif 4716 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4717 4718 adds r0, r0, r2 @ optional op; may set condition codes 4719 adc r1, r1, r3 @ result<- op, r0-r3 changed 4720 GET_INST_OPCODE(ip) @ extract opcode from rINST 4721 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4722 GOTO_OPCODE(ip) @ jump to next instruction 4723 /* 14-17 instructions */ 4724 4725 4726 4727/* ------------------------------ */ 4728 .balign 64 4729.L_OP_SUB_LONG: /* 0x9c */ 4730/* File: armv5te/OP_SUB_LONG.S */ 4731/* File: armv5te/binopWide.S */ 4732 /* 4733 * Generic 64-bit binary operation. Provide an "instr" line that 4734 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4735 * This could be an ARM instruction or a function call. (If the result 4736 * comes back in a register other than r0, you can override "result".) 4737 * 4738 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4739 * vCC (r1). Useful for integer division and modulus. 4740 * 4741 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4742 * xor-long, add-double, sub-double, mul-double, div-double, 4743 * rem-double 4744 * 4745 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4746 */ 4747 /* binop vAA, vBB, vCC */ 4748 FETCH(r0, 1) @ r0<- CCBB 4749 mov r9, rINST, lsr #8 @ r9<- AA 4750 and r2, r0, #255 @ r2<- BB 4751 mov r3, r0, lsr #8 @ r3<- CC 4752 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4753 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4754 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4755 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4756 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4757 .if 0 4758 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4759 beq common_errDivideByZero 4760 .endif 4761 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4762 4763 subs r0, r0, r2 @ optional op; may set condition codes 4764 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4765 GET_INST_OPCODE(ip) @ extract opcode from rINST 4766 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4767 GOTO_OPCODE(ip) @ jump to next instruction 4768 /* 14-17 instructions */ 4769 4770 4771 4772/* ------------------------------ */ 4773 .balign 64 4774.L_OP_MUL_LONG: /* 0x9d */ 4775/* File: armv5te/OP_MUL_LONG.S */ 4776 /* 4777 * Signed 64-bit integer multiply. 4778 * 4779 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4780 * WX 4781 * x YZ 4782 * -------- 4783 * ZW ZX 4784 * YW YX 4785 * 4786 * The low word of the result holds ZX, the high word holds 4787 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4788 * it doesn't fit in the low 64 bits. 4789 * 4790 * Unlike most ARM math operations, multiply instructions have 4791 * restrictions on using the same register more than once (Rd and Rm 4792 * cannot be the same). 4793 */ 4794 /* mul-long vAA, vBB, vCC */ 4795 FETCH(r0, 1) @ r0<- CCBB 4796 and r2, r0, #255 @ r2<- BB 4797 mov r3, r0, lsr #8 @ r3<- CC 4798 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4799 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4800 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4801 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4802 mul ip, r2, r1 @ ip<- ZxW 4803 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4804 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4805 mov r0, rINST, lsr #8 @ r0<- AA 4806 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4807 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4808 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4809 b .LOP_MUL_LONG_finish 4810 4811/* ------------------------------ */ 4812 .balign 64 4813.L_OP_DIV_LONG: /* 0x9e */ 4814/* File: armv5te/OP_DIV_LONG.S */ 4815/* File: armv5te/binopWide.S */ 4816 /* 4817 * Generic 64-bit binary operation. Provide an "instr" line that 4818 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4819 * This could be an ARM instruction or a function call. (If the result 4820 * comes back in a register other than r0, you can override "result".) 4821 * 4822 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4823 * vCC (r1). Useful for integer division and modulus. 4824 * 4825 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4826 * xor-long, add-double, sub-double, mul-double, div-double, 4827 * rem-double 4828 * 4829 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4830 */ 4831 /* binop vAA, vBB, vCC */ 4832 FETCH(r0, 1) @ r0<- CCBB 4833 mov r9, rINST, lsr #8 @ r9<- AA 4834 and r2, r0, #255 @ r2<- BB 4835 mov r3, r0, lsr #8 @ r3<- CC 4836 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4837 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4838 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4839 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4840 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4841 .if 1 4842 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4843 beq common_errDivideByZero 4844 .endif 4845 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4846 4847 @ optional op; may set condition codes 4848 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4849 GET_INST_OPCODE(ip) @ extract opcode from rINST 4850 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4851 GOTO_OPCODE(ip) @ jump to next instruction 4852 /* 14-17 instructions */ 4853 4854 4855 4856/* ------------------------------ */ 4857 .balign 64 4858.L_OP_REM_LONG: /* 0x9f */ 4859/* File: armv5te/OP_REM_LONG.S */ 4860/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4861/* File: armv5te/binopWide.S */ 4862 /* 4863 * Generic 64-bit binary operation. Provide an "instr" line that 4864 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4865 * This could be an ARM instruction or a function call. (If the result 4866 * comes back in a register other than r0, you can override "result".) 4867 * 4868 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4869 * vCC (r1). Useful for integer division and modulus. 4870 * 4871 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4872 * xor-long, add-double, sub-double, mul-double, div-double, 4873 * rem-double 4874 * 4875 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4876 */ 4877 /* binop vAA, vBB, vCC */ 4878 FETCH(r0, 1) @ r0<- CCBB 4879 mov r9, rINST, lsr #8 @ r9<- AA 4880 and r2, r0, #255 @ r2<- BB 4881 mov r3, r0, lsr #8 @ r3<- CC 4882 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4883 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4884 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4885 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4886 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4887 .if 1 4888 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4889 beq common_errDivideByZero 4890 .endif 4891 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4892 4893 @ optional op; may set condition codes 4894 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4895 GET_INST_OPCODE(ip) @ extract opcode from rINST 4896 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4897 GOTO_OPCODE(ip) @ jump to next instruction 4898 /* 14-17 instructions */ 4899 4900 4901 4902/* ------------------------------ */ 4903 .balign 64 4904.L_OP_AND_LONG: /* 0xa0 */ 4905/* File: armv5te/OP_AND_LONG.S */ 4906/* File: armv5te/binopWide.S */ 4907 /* 4908 * Generic 64-bit binary operation. Provide an "instr" line that 4909 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4910 * This could be an ARM instruction or a function call. (If the result 4911 * comes back in a register other than r0, you can override "result".) 4912 * 4913 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4914 * vCC (r1). Useful for integer division and modulus. 4915 * 4916 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4917 * xor-long, add-double, sub-double, mul-double, div-double, 4918 * rem-double 4919 * 4920 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4921 */ 4922 /* binop vAA, vBB, vCC */ 4923 FETCH(r0, 1) @ r0<- CCBB 4924 mov r9, rINST, lsr #8 @ r9<- AA 4925 and r2, r0, #255 @ r2<- BB 4926 mov r3, r0, lsr #8 @ r3<- CC 4927 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4928 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4929 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4930 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4931 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4932 .if 0 4933 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4934 beq common_errDivideByZero 4935 .endif 4936 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4937 4938 and r0, r0, r2 @ optional op; may set condition codes 4939 and r1, r1, r3 @ result<- op, r0-r3 changed 4940 GET_INST_OPCODE(ip) @ extract opcode from rINST 4941 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4942 GOTO_OPCODE(ip) @ jump to next instruction 4943 /* 14-17 instructions */ 4944 4945 4946 4947/* ------------------------------ */ 4948 .balign 64 4949.L_OP_OR_LONG: /* 0xa1 */ 4950/* File: armv5te/OP_OR_LONG.S */ 4951/* File: armv5te/binopWide.S */ 4952 /* 4953 * Generic 64-bit binary operation. Provide an "instr" line that 4954 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4955 * This could be an ARM instruction or a function call. (If the result 4956 * comes back in a register other than r0, you can override "result".) 4957 * 4958 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4959 * vCC (r1). Useful for integer division and modulus. 4960 * 4961 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4962 * xor-long, add-double, sub-double, mul-double, div-double, 4963 * rem-double 4964 * 4965 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4966 */ 4967 /* binop vAA, vBB, vCC */ 4968 FETCH(r0, 1) @ r0<- CCBB 4969 mov r9, rINST, lsr #8 @ r9<- AA 4970 and r2, r0, #255 @ r2<- BB 4971 mov r3, r0, lsr #8 @ r3<- CC 4972 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4973 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4974 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4975 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4976 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4977 .if 0 4978 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4979 beq common_errDivideByZero 4980 .endif 4981 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4982 4983 orr r0, r0, r2 @ optional op; may set condition codes 4984 orr r1, r1, r3 @ result<- op, r0-r3 changed 4985 GET_INST_OPCODE(ip) @ extract opcode from rINST 4986 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4987 GOTO_OPCODE(ip) @ jump to next instruction 4988 /* 14-17 instructions */ 4989 4990 4991 4992/* ------------------------------ */ 4993 .balign 64 4994.L_OP_XOR_LONG: /* 0xa2 */ 4995/* File: armv5te/OP_XOR_LONG.S */ 4996/* File: armv5te/binopWide.S */ 4997 /* 4998 * Generic 64-bit binary operation. Provide an "instr" line that 4999 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5000 * This could be an ARM instruction or a function call. (If the result 5001 * comes back in a register other than r0, you can override "result".) 5002 * 5003 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5004 * vCC (r1). Useful for integer division and modulus. 5005 * 5006 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5007 * xor-long, add-double, sub-double, mul-double, div-double, 5008 * rem-double 5009 * 5010 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5011 */ 5012 /* binop vAA, vBB, vCC */ 5013 FETCH(r0, 1) @ r0<- CCBB 5014 mov r9, rINST, lsr #8 @ r9<- AA 5015 and r2, r0, #255 @ r2<- BB 5016 mov r3, r0, lsr #8 @ r3<- CC 5017 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5018 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5019 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5020 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5021 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5022 .if 0 5023 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5024 beq common_errDivideByZero 5025 .endif 5026 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5027 5028 eor r0, r0, r2 @ optional op; may set condition codes 5029 eor r1, r1, r3 @ result<- op, r0-r3 changed 5030 GET_INST_OPCODE(ip) @ extract opcode from rINST 5031 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5032 GOTO_OPCODE(ip) @ jump to next instruction 5033 /* 14-17 instructions */ 5034 5035 5036 5037/* ------------------------------ */ 5038 .balign 64 5039.L_OP_SHL_LONG: /* 0xa3 */ 5040/* File: armv5te/OP_SHL_LONG.S */ 5041 /* 5042 * Long integer shift. This is different from the generic 32/64-bit 5043 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5044 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5045 * 6 bits of the shift distance. 5046 */ 5047 /* shl-long vAA, vBB, vCC */ 5048 FETCH(r0, 1) @ r0<- CCBB 5049 mov r9, rINST, lsr #8 @ r9<- AA 5050 and r3, r0, #255 @ r3<- BB 5051 mov r0, r0, lsr #8 @ r0<- CC 5052 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5053 GET_VREG(r2, r0) @ r2<- vCC 5054 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5055 and r2, r2, #63 @ r2<- r2 & 0x3f 5056 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5057 5058 mov r1, r1, asl r2 @ r1<- r1 << r2 5059 rsb r3, r2, #32 @ r3<- 32 - r2 5060 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 5061 subs ip, r2, #32 @ ip<- r2 - 32 5062 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 5063 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5064 b .LOP_SHL_LONG_finish 5065 5066/* ------------------------------ */ 5067 .balign 64 5068.L_OP_SHR_LONG: /* 0xa4 */ 5069/* File: armv5te/OP_SHR_LONG.S */ 5070 /* 5071 * Long integer shift. This is different from the generic 32/64-bit 5072 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5073 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5074 * 6 bits of the shift distance. 5075 */ 5076 /* shr-long vAA, vBB, vCC */ 5077 FETCH(r0, 1) @ r0<- CCBB 5078 mov r9, rINST, lsr #8 @ r9<- AA 5079 and r3, r0, #255 @ r3<- BB 5080 mov r0, r0, lsr #8 @ r0<- CC 5081 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5082 GET_VREG(r2, r0) @ r2<- vCC 5083 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5084 and r2, r2, #63 @ r0<- r0 & 0x3f 5085 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5086 5087 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5088 rsb r3, r2, #32 @ r3<- 32 - r2 5089 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5090 subs ip, r2, #32 @ ip<- r2 - 32 5091 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 5092 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5093 b .LOP_SHR_LONG_finish 5094 5095/* ------------------------------ */ 5096 .balign 64 5097.L_OP_USHR_LONG: /* 0xa5 */ 5098/* File: armv5te/OP_USHR_LONG.S */ 5099 /* 5100 * Long integer shift. This is different from the generic 32/64-bit 5101 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5102 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5103 * 6 bits of the shift distance. 5104 */ 5105 /* ushr-long vAA, vBB, vCC */ 5106 FETCH(r0, 1) @ r0<- CCBB 5107 mov r9, rINST, lsr #8 @ r9<- AA 5108 and r3, r0, #255 @ r3<- BB 5109 mov r0, r0, lsr #8 @ r0<- CC 5110 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5111 GET_VREG(r2, r0) @ r2<- vCC 5112 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5113 and r2, r2, #63 @ r0<- r0 & 0x3f 5114 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5115 5116 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5117 rsb r3, r2, #32 @ r3<- 32 - r2 5118 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5119 subs ip, r2, #32 @ ip<- r2 - 32 5120 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 5121 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5122 b .LOP_USHR_LONG_finish 5123 5124/* ------------------------------ */ 5125 .balign 64 5126.L_OP_ADD_FLOAT: /* 0xa6 */ 5127/* File: armv5te/OP_ADD_FLOAT.S */ 5128/* File: armv5te/binop.S */ 5129 /* 5130 * Generic 32-bit binary operation. Provide an "instr" line that 5131 * specifies an instruction that performs "result = r0 op r1". 5132 * This could be an ARM instruction or a function call. (If the result 5133 * comes back in a register other than r0, you can override "result".) 5134 * 5135 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5136 * vCC (r1). Useful for integer division and modulus. Note that we 5137 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5138 * handles it correctly. 5139 * 5140 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5141 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5142 * mul-float, div-float, rem-float 5143 */ 5144 /* binop vAA, vBB, vCC */ 5145 FETCH(r0, 1) @ r0<- CCBB 5146 mov r9, rINST, lsr #8 @ r9<- AA 5147 mov r3, r0, lsr #8 @ r3<- CC 5148 and r2, r0, #255 @ r2<- BB 5149 GET_VREG(r1, r3) @ r1<- vCC 5150 GET_VREG(r0, r2) @ r0<- vBB 5151 .if 0 5152 cmp r1, #0 @ is second operand zero? 5153 beq common_errDivideByZero 5154 .endif 5155 5156 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5157 @ optional op; may set condition codes 5158 bl __aeabi_fadd @ r0<- op, r0-r3 changed 5159 GET_INST_OPCODE(ip) @ extract opcode from rINST 5160 SET_VREG(r0, r9) @ vAA<- r0 5161 GOTO_OPCODE(ip) @ jump to next instruction 5162 /* 11-14 instructions */ 5163 5164 5165 5166/* ------------------------------ */ 5167 .balign 64 5168.L_OP_SUB_FLOAT: /* 0xa7 */ 5169/* File: armv5te/OP_SUB_FLOAT.S */ 5170/* File: armv5te/binop.S */ 5171 /* 5172 * Generic 32-bit binary operation. Provide an "instr" line that 5173 * specifies an instruction that performs "result = r0 op r1". 5174 * This could be an ARM instruction or a function call. (If the result 5175 * comes back in a register other than r0, you can override "result".) 5176 * 5177 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5178 * vCC (r1). Useful for integer division and modulus. Note that we 5179 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5180 * handles it correctly. 5181 * 5182 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5183 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5184 * mul-float, div-float, rem-float 5185 */ 5186 /* binop vAA, vBB, vCC */ 5187 FETCH(r0, 1) @ r0<- CCBB 5188 mov r9, rINST, lsr #8 @ r9<- AA 5189 mov r3, r0, lsr #8 @ r3<- CC 5190 and r2, r0, #255 @ r2<- BB 5191 GET_VREG(r1, r3) @ r1<- vCC 5192 GET_VREG(r0, r2) @ r0<- vBB 5193 .if 0 5194 cmp r1, #0 @ is second operand zero? 5195 beq common_errDivideByZero 5196 .endif 5197 5198 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5199 @ optional op; may set condition codes 5200 bl __aeabi_fsub @ r0<- op, r0-r3 changed 5201 GET_INST_OPCODE(ip) @ extract opcode from rINST 5202 SET_VREG(r0, r9) @ vAA<- r0 5203 GOTO_OPCODE(ip) @ jump to next instruction 5204 /* 11-14 instructions */ 5205 5206 5207 5208/* ------------------------------ */ 5209 .balign 64 5210.L_OP_MUL_FLOAT: /* 0xa8 */ 5211/* File: armv5te/OP_MUL_FLOAT.S */ 5212/* File: armv5te/binop.S */ 5213 /* 5214 * Generic 32-bit binary operation. Provide an "instr" line that 5215 * specifies an instruction that performs "result = r0 op r1". 5216 * This could be an ARM instruction or a function call. (If the result 5217 * comes back in a register other than r0, you can override "result".) 5218 * 5219 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5220 * vCC (r1). Useful for integer division and modulus. Note that we 5221 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5222 * handles it correctly. 5223 * 5224 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5225 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5226 * mul-float, div-float, rem-float 5227 */ 5228 /* binop vAA, vBB, vCC */ 5229 FETCH(r0, 1) @ r0<- CCBB 5230 mov r9, rINST, lsr #8 @ r9<- AA 5231 mov r3, r0, lsr #8 @ r3<- CC 5232 and r2, r0, #255 @ r2<- BB 5233 GET_VREG(r1, r3) @ r1<- vCC 5234 GET_VREG(r0, r2) @ r0<- vBB 5235 .if 0 5236 cmp r1, #0 @ is second operand zero? 5237 beq common_errDivideByZero 5238 .endif 5239 5240 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5241 @ optional op; may set condition codes 5242 bl __aeabi_fmul @ r0<- op, r0-r3 changed 5243 GET_INST_OPCODE(ip) @ extract opcode from rINST 5244 SET_VREG(r0, r9) @ vAA<- r0 5245 GOTO_OPCODE(ip) @ jump to next instruction 5246 /* 11-14 instructions */ 5247 5248 5249 5250/* ------------------------------ */ 5251 .balign 64 5252.L_OP_DIV_FLOAT: /* 0xa9 */ 5253/* File: armv5te/OP_DIV_FLOAT.S */ 5254/* File: armv5te/binop.S */ 5255 /* 5256 * Generic 32-bit binary operation. Provide an "instr" line that 5257 * specifies an instruction that performs "result = r0 op r1". 5258 * This could be an ARM instruction or a function call. (If the result 5259 * comes back in a register other than r0, you can override "result".) 5260 * 5261 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5262 * vCC (r1). Useful for integer division and modulus. Note that we 5263 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5264 * handles it correctly. 5265 * 5266 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5267 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5268 * mul-float, div-float, rem-float 5269 */ 5270 /* binop vAA, vBB, vCC */ 5271 FETCH(r0, 1) @ r0<- CCBB 5272 mov r9, rINST, lsr #8 @ r9<- AA 5273 mov r3, r0, lsr #8 @ r3<- CC 5274 and r2, r0, #255 @ r2<- BB 5275 GET_VREG(r1, r3) @ r1<- vCC 5276 GET_VREG(r0, r2) @ r0<- vBB 5277 .if 0 5278 cmp r1, #0 @ is second operand zero? 5279 beq common_errDivideByZero 5280 .endif 5281 5282 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5283 @ optional op; may set condition codes 5284 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 5285 GET_INST_OPCODE(ip) @ extract opcode from rINST 5286 SET_VREG(r0, r9) @ vAA<- r0 5287 GOTO_OPCODE(ip) @ jump to next instruction 5288 /* 11-14 instructions */ 5289 5290 5291 5292/* ------------------------------ */ 5293 .balign 64 5294.L_OP_REM_FLOAT: /* 0xaa */ 5295/* File: armv5te/OP_REM_FLOAT.S */ 5296/* EABI doesn't define a float remainder function, but libm does */ 5297/* File: armv5te/binop.S */ 5298 /* 5299 * Generic 32-bit binary operation. Provide an "instr" line that 5300 * specifies an instruction that performs "result = r0 op r1". 5301 * This could be an ARM instruction or a function call. (If the result 5302 * comes back in a register other than r0, you can override "result".) 5303 * 5304 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5305 * vCC (r1). Useful for integer division and modulus. Note that we 5306 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5307 * handles it correctly. 5308 * 5309 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5310 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5311 * mul-float, div-float, rem-float 5312 */ 5313 /* binop vAA, vBB, vCC */ 5314 FETCH(r0, 1) @ r0<- CCBB 5315 mov r9, rINST, lsr #8 @ r9<- AA 5316 mov r3, r0, lsr #8 @ r3<- CC 5317 and r2, r0, #255 @ r2<- BB 5318 GET_VREG(r1, r3) @ r1<- vCC 5319 GET_VREG(r0, r2) @ r0<- vBB 5320 .if 0 5321 cmp r1, #0 @ is second operand zero? 5322 beq common_errDivideByZero 5323 .endif 5324 5325 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5326 @ optional op; may set condition codes 5327 bl fmodf @ r0<- op, r0-r3 changed 5328 GET_INST_OPCODE(ip) @ extract opcode from rINST 5329 SET_VREG(r0, r9) @ vAA<- r0 5330 GOTO_OPCODE(ip) @ jump to next instruction 5331 /* 11-14 instructions */ 5332 5333 5334 5335/* ------------------------------ */ 5336 .balign 64 5337.L_OP_ADD_DOUBLE: /* 0xab */ 5338/* File: armv5te/OP_ADD_DOUBLE.S */ 5339/* File: armv5te/binopWide.S */ 5340 /* 5341 * Generic 64-bit binary operation. Provide an "instr" line that 5342 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5343 * This could be an ARM instruction or a function call. (If the result 5344 * comes back in a register other than r0, you can override "result".) 5345 * 5346 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5347 * vCC (r1). Useful for integer division and modulus. 5348 * 5349 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5350 * xor-long, add-double, sub-double, mul-double, div-double, 5351 * rem-double 5352 * 5353 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5354 */ 5355 /* binop vAA, vBB, vCC */ 5356 FETCH(r0, 1) @ r0<- CCBB 5357 mov r9, rINST, lsr #8 @ r9<- AA 5358 and r2, r0, #255 @ r2<- BB 5359 mov r3, r0, lsr #8 @ r3<- CC 5360 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5361 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5362 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5363 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5364 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5365 .if 0 5366 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5367 beq common_errDivideByZero 5368 .endif 5369 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5370 5371 @ optional op; may set condition codes 5372 bl __aeabi_dadd @ result<- op, r0-r3 changed 5373 GET_INST_OPCODE(ip) @ extract opcode from rINST 5374 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5375 GOTO_OPCODE(ip) @ jump to next instruction 5376 /* 14-17 instructions */ 5377 5378 5379 5380/* ------------------------------ */ 5381 .balign 64 5382.L_OP_SUB_DOUBLE: /* 0xac */ 5383/* File: armv5te/OP_SUB_DOUBLE.S */ 5384/* File: armv5te/binopWide.S */ 5385 /* 5386 * Generic 64-bit binary operation. Provide an "instr" line that 5387 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5388 * This could be an ARM instruction or a function call. (If the result 5389 * comes back in a register other than r0, you can override "result".) 5390 * 5391 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5392 * vCC (r1). Useful for integer division and modulus. 5393 * 5394 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5395 * xor-long, add-double, sub-double, mul-double, div-double, 5396 * rem-double 5397 * 5398 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5399 */ 5400 /* binop vAA, vBB, vCC */ 5401 FETCH(r0, 1) @ r0<- CCBB 5402 mov r9, rINST, lsr #8 @ r9<- AA 5403 and r2, r0, #255 @ r2<- BB 5404 mov r3, r0, lsr #8 @ r3<- CC 5405 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5406 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5407 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5408 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5409 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5410 .if 0 5411 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5412 beq common_errDivideByZero 5413 .endif 5414 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5415 5416 @ optional op; may set condition codes 5417 bl __aeabi_dsub @ result<- op, r0-r3 changed 5418 GET_INST_OPCODE(ip) @ extract opcode from rINST 5419 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5420 GOTO_OPCODE(ip) @ jump to next instruction 5421 /* 14-17 instructions */ 5422 5423 5424 5425/* ------------------------------ */ 5426 .balign 64 5427.L_OP_MUL_DOUBLE: /* 0xad */ 5428/* File: armv5te/OP_MUL_DOUBLE.S */ 5429/* File: armv5te/binopWide.S */ 5430 /* 5431 * Generic 64-bit binary operation. Provide an "instr" line that 5432 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5433 * This could be an ARM instruction or a function call. (If the result 5434 * comes back in a register other than r0, you can override "result".) 5435 * 5436 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5437 * vCC (r1). Useful for integer division and modulus. 5438 * 5439 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5440 * xor-long, add-double, sub-double, mul-double, div-double, 5441 * rem-double 5442 * 5443 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5444 */ 5445 /* binop vAA, vBB, vCC */ 5446 FETCH(r0, 1) @ r0<- CCBB 5447 mov r9, rINST, lsr #8 @ r9<- AA 5448 and r2, r0, #255 @ r2<- BB 5449 mov r3, r0, lsr #8 @ r3<- CC 5450 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5451 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5452 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5453 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5454 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5455 .if 0 5456 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5457 beq common_errDivideByZero 5458 .endif 5459 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5460 5461 @ optional op; may set condition codes 5462 bl __aeabi_dmul @ result<- op, r0-r3 changed 5463 GET_INST_OPCODE(ip) @ extract opcode from rINST 5464 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5465 GOTO_OPCODE(ip) @ jump to next instruction 5466 /* 14-17 instructions */ 5467 5468 5469 5470/* ------------------------------ */ 5471 .balign 64 5472.L_OP_DIV_DOUBLE: /* 0xae */ 5473/* File: armv5te/OP_DIV_DOUBLE.S */ 5474/* File: armv5te/binopWide.S */ 5475 /* 5476 * Generic 64-bit binary operation. Provide an "instr" line that 5477 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5478 * This could be an ARM instruction or a function call. (If the result 5479 * comes back in a register other than r0, you can override "result".) 5480 * 5481 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5482 * vCC (r1). Useful for integer division and modulus. 5483 * 5484 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5485 * xor-long, add-double, sub-double, mul-double, div-double, 5486 * rem-double 5487 * 5488 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5489 */ 5490 /* binop vAA, vBB, vCC */ 5491 FETCH(r0, 1) @ r0<- CCBB 5492 mov r9, rINST, lsr #8 @ r9<- AA 5493 and r2, r0, #255 @ r2<- BB 5494 mov r3, r0, lsr #8 @ r3<- CC 5495 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5496 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5497 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5498 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5499 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5500 .if 0 5501 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5502 beq common_errDivideByZero 5503 .endif 5504 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5505 5506 @ optional op; may set condition codes 5507 bl __aeabi_ddiv @ result<- op, r0-r3 changed 5508 GET_INST_OPCODE(ip) @ extract opcode from rINST 5509 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5510 GOTO_OPCODE(ip) @ jump to next instruction 5511 /* 14-17 instructions */ 5512 5513 5514 5515/* ------------------------------ */ 5516 .balign 64 5517.L_OP_REM_DOUBLE: /* 0xaf */ 5518/* File: armv5te/OP_REM_DOUBLE.S */ 5519/* EABI doesn't define a double remainder function, but libm does */ 5520/* File: armv5te/binopWide.S */ 5521 /* 5522 * Generic 64-bit binary operation. Provide an "instr" line that 5523 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5524 * This could be an ARM instruction or a function call. (If the result 5525 * comes back in a register other than r0, you can override "result".) 5526 * 5527 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5528 * vCC (r1). Useful for integer division and modulus. 5529 * 5530 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5531 * xor-long, add-double, sub-double, mul-double, div-double, 5532 * rem-double 5533 * 5534 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5535 */ 5536 /* binop vAA, vBB, vCC */ 5537 FETCH(r0, 1) @ r0<- CCBB 5538 mov r9, rINST, lsr #8 @ r9<- AA 5539 and r2, r0, #255 @ r2<- BB 5540 mov r3, r0, lsr #8 @ r3<- CC 5541 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5542 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5543 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5544 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5545 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5546 .if 0 5547 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5548 beq common_errDivideByZero 5549 .endif 5550 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5551 5552 @ optional op; may set condition codes 5553 bl fmod @ result<- op, r0-r3 changed 5554 GET_INST_OPCODE(ip) @ extract opcode from rINST 5555 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5556 GOTO_OPCODE(ip) @ jump to next instruction 5557 /* 14-17 instructions */ 5558 5559 5560 5561/* ------------------------------ */ 5562 .balign 64 5563.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5564/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5565/* File: armv5te/binop2addr.S */ 5566 /* 5567 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5568 * that specifies an instruction that performs "result = r0 op r1". 5569 * This could be an ARM instruction or a function call. (If the result 5570 * comes back in a register other than r0, you can override "result".) 5571 * 5572 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5573 * vCC (r1). Useful for integer division and modulus. 5574 * 5575 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5576 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5577 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5578 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5579 */ 5580 /* binop/2addr vA, vB */ 5581 mov r9, rINST, lsr #8 @ r9<- A+ 5582 mov r3, rINST, lsr #12 @ r3<- B 5583 and r9, r9, #15 5584 GET_VREG(r1, r3) @ r1<- vB 5585 GET_VREG(r0, r9) @ r0<- vA 5586 .if 0 5587 cmp r1, #0 @ is second operand zero? 5588 beq common_errDivideByZero 5589 .endif 5590 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5591 5592 @ optional op; may set condition codes 5593 add r0, r0, r1 @ r0<- op, r0-r3 changed 5594 GET_INST_OPCODE(ip) @ extract opcode from rINST 5595 SET_VREG(r0, r9) @ vAA<- r0 5596 GOTO_OPCODE(ip) @ jump to next instruction 5597 /* 10-13 instructions */ 5598 5599 5600 5601/* ------------------------------ */ 5602 .balign 64 5603.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5604/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5605/* File: armv5te/binop2addr.S */ 5606 /* 5607 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5608 * that specifies an instruction that performs "result = r0 op r1". 5609 * This could be an ARM instruction or a function call. (If the result 5610 * comes back in a register other than r0, you can override "result".) 5611 * 5612 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5613 * vCC (r1). Useful for integer division and modulus. 5614 * 5615 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5616 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5617 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5618 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5619 */ 5620 /* binop/2addr vA, vB */ 5621 mov r9, rINST, lsr #8 @ r9<- A+ 5622 mov r3, rINST, lsr #12 @ r3<- B 5623 and r9, r9, #15 5624 GET_VREG(r1, r3) @ r1<- vB 5625 GET_VREG(r0, r9) @ r0<- vA 5626 .if 0 5627 cmp r1, #0 @ is second operand zero? 5628 beq common_errDivideByZero 5629 .endif 5630 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5631 5632 @ optional op; may set condition codes 5633 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5634 GET_INST_OPCODE(ip) @ extract opcode from rINST 5635 SET_VREG(r0, r9) @ vAA<- r0 5636 GOTO_OPCODE(ip) @ jump to next instruction 5637 /* 10-13 instructions */ 5638 5639 5640 5641/* ------------------------------ */ 5642 .balign 64 5643.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5644/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5645/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5646/* File: armv5te/binop2addr.S */ 5647 /* 5648 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5649 * that specifies an instruction that performs "result = r0 op r1". 5650 * This could be an ARM instruction or a function call. (If the result 5651 * comes back in a register other than r0, you can override "result".) 5652 * 5653 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5654 * vCC (r1). Useful for integer division and modulus. 5655 * 5656 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5657 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5658 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5659 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5660 */ 5661 /* binop/2addr vA, vB */ 5662 mov r9, rINST, lsr #8 @ r9<- A+ 5663 mov r3, rINST, lsr #12 @ r3<- B 5664 and r9, r9, #15 5665 GET_VREG(r1, r3) @ r1<- vB 5666 GET_VREG(r0, r9) @ r0<- vA 5667 .if 0 5668 cmp r1, #0 @ is second operand zero? 5669 beq common_errDivideByZero 5670 .endif 5671 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5672 5673 @ optional op; may set condition codes 5674 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5675 GET_INST_OPCODE(ip) @ extract opcode from rINST 5676 SET_VREG(r0, r9) @ vAA<- r0 5677 GOTO_OPCODE(ip) @ jump to next instruction 5678 /* 10-13 instructions */ 5679 5680 5681 5682/* ------------------------------ */ 5683 .balign 64 5684.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5685/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5686/* File: armv5te/binop2addr.S */ 5687 /* 5688 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5689 * that specifies an instruction that performs "result = r0 op r1". 5690 * This could be an ARM instruction or a function call. (If the result 5691 * comes back in a register other than r0, you can override "result".) 5692 * 5693 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5694 * vCC (r1). Useful for integer division and modulus. 5695 * 5696 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5697 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5698 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5699 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5700 */ 5701 /* binop/2addr vA, vB */ 5702 mov r9, rINST, lsr #8 @ r9<- A+ 5703 mov r3, rINST, lsr #12 @ r3<- B 5704 and r9, r9, #15 5705 GET_VREG(r1, r3) @ r1<- vB 5706 GET_VREG(r0, r9) @ r0<- vA 5707 .if 1 5708 cmp r1, #0 @ is second operand zero? 5709 beq common_errDivideByZero 5710 .endif 5711 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5712 5713 @ optional op; may set condition codes 5714 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5715 GET_INST_OPCODE(ip) @ extract opcode from rINST 5716 SET_VREG(r0, r9) @ vAA<- r0 5717 GOTO_OPCODE(ip) @ jump to next instruction 5718 /* 10-13 instructions */ 5719 5720 5721 5722/* ------------------------------ */ 5723 .balign 64 5724.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5725/* File: armv5te/OP_REM_INT_2ADDR.S */ 5726/* idivmod returns quotient in r0 and remainder in r1 */ 5727/* File: armv5te/binop2addr.S */ 5728 /* 5729 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5730 * that specifies an instruction that performs "result = r0 op r1". 5731 * This could be an ARM instruction or a function call. (If the result 5732 * comes back in a register other than r0, you can override "result".) 5733 * 5734 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5735 * vCC (r1). Useful for integer division and modulus. 5736 * 5737 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5738 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5739 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5740 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5741 */ 5742 /* binop/2addr vA, vB */ 5743 mov r9, rINST, lsr #8 @ r9<- A+ 5744 mov r3, rINST, lsr #12 @ r3<- B 5745 and r9, r9, #15 5746 GET_VREG(r1, r3) @ r1<- vB 5747 GET_VREG(r0, r9) @ r0<- vA 5748 .if 1 5749 cmp r1, #0 @ is second operand zero? 5750 beq common_errDivideByZero 5751 .endif 5752 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5753 5754 @ optional op; may set condition codes 5755 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5756 GET_INST_OPCODE(ip) @ extract opcode from rINST 5757 SET_VREG(r1, r9) @ vAA<- r1 5758 GOTO_OPCODE(ip) @ jump to next instruction 5759 /* 10-13 instructions */ 5760 5761 5762 5763/* ------------------------------ */ 5764 .balign 64 5765.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5766/* File: armv5te/OP_AND_INT_2ADDR.S */ 5767/* File: armv5te/binop2addr.S */ 5768 /* 5769 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5770 * that specifies an instruction that performs "result = r0 op r1". 5771 * This could be an ARM instruction or a function call. (If the result 5772 * comes back in a register other than r0, you can override "result".) 5773 * 5774 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5775 * vCC (r1). Useful for integer division and modulus. 5776 * 5777 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5778 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5779 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5780 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5781 */ 5782 /* binop/2addr vA, vB */ 5783 mov r9, rINST, lsr #8 @ r9<- A+ 5784 mov r3, rINST, lsr #12 @ r3<- B 5785 and r9, r9, #15 5786 GET_VREG(r1, r3) @ r1<- vB 5787 GET_VREG(r0, r9) @ r0<- vA 5788 .if 0 5789 cmp r1, #0 @ is second operand zero? 5790 beq common_errDivideByZero 5791 .endif 5792 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5793 5794 @ optional op; may set condition codes 5795 and r0, r0, r1 @ r0<- op, r0-r3 changed 5796 GET_INST_OPCODE(ip) @ extract opcode from rINST 5797 SET_VREG(r0, r9) @ vAA<- r0 5798 GOTO_OPCODE(ip) @ jump to next instruction 5799 /* 10-13 instructions */ 5800 5801 5802 5803/* ------------------------------ */ 5804 .balign 64 5805.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5806/* File: armv5te/OP_OR_INT_2ADDR.S */ 5807/* File: armv5te/binop2addr.S */ 5808 /* 5809 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5810 * that specifies an instruction that performs "result = r0 op r1". 5811 * This could be an ARM instruction or a function call. (If the result 5812 * comes back in a register other than r0, you can override "result".) 5813 * 5814 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5815 * vCC (r1). Useful for integer division and modulus. 5816 * 5817 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5818 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5819 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5820 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5821 */ 5822 /* binop/2addr vA, vB */ 5823 mov r9, rINST, lsr #8 @ r9<- A+ 5824 mov r3, rINST, lsr #12 @ r3<- B 5825 and r9, r9, #15 5826 GET_VREG(r1, r3) @ r1<- vB 5827 GET_VREG(r0, r9) @ r0<- vA 5828 .if 0 5829 cmp r1, #0 @ is second operand zero? 5830 beq common_errDivideByZero 5831 .endif 5832 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5833 5834 @ optional op; may set condition codes 5835 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5836 GET_INST_OPCODE(ip) @ extract opcode from rINST 5837 SET_VREG(r0, r9) @ vAA<- r0 5838 GOTO_OPCODE(ip) @ jump to next instruction 5839 /* 10-13 instructions */ 5840 5841 5842 5843/* ------------------------------ */ 5844 .balign 64 5845.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5846/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5847/* File: armv5te/binop2addr.S */ 5848 /* 5849 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5850 * that specifies an instruction that performs "result = r0 op r1". 5851 * This could be an ARM instruction or a function call. (If the result 5852 * comes back in a register other than r0, you can override "result".) 5853 * 5854 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5855 * vCC (r1). Useful for integer division and modulus. 5856 * 5857 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5858 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5859 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5860 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5861 */ 5862 /* binop/2addr vA, vB */ 5863 mov r9, rINST, lsr #8 @ r9<- A+ 5864 mov r3, rINST, lsr #12 @ r3<- B 5865 and r9, r9, #15 5866 GET_VREG(r1, r3) @ r1<- vB 5867 GET_VREG(r0, r9) @ r0<- vA 5868 .if 0 5869 cmp r1, #0 @ is second operand zero? 5870 beq common_errDivideByZero 5871 .endif 5872 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5873 5874 @ optional op; may set condition codes 5875 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5876 GET_INST_OPCODE(ip) @ extract opcode from rINST 5877 SET_VREG(r0, r9) @ vAA<- r0 5878 GOTO_OPCODE(ip) @ jump to next instruction 5879 /* 10-13 instructions */ 5880 5881 5882 5883/* ------------------------------ */ 5884 .balign 64 5885.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5886/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5887/* File: armv5te/binop2addr.S */ 5888 /* 5889 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5890 * that specifies an instruction that performs "result = r0 op r1". 5891 * This could be an ARM instruction or a function call. (If the result 5892 * comes back in a register other than r0, you can override "result".) 5893 * 5894 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5895 * vCC (r1). Useful for integer division and modulus. 5896 * 5897 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5898 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5899 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5900 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5901 */ 5902 /* binop/2addr vA, vB */ 5903 mov r9, rINST, lsr #8 @ r9<- A+ 5904 mov r3, rINST, lsr #12 @ r3<- B 5905 and r9, r9, #15 5906 GET_VREG(r1, r3) @ r1<- vB 5907 GET_VREG(r0, r9) @ r0<- vA 5908 .if 0 5909 cmp r1, #0 @ is second operand zero? 5910 beq common_errDivideByZero 5911 .endif 5912 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5913 5914 and r1, r1, #31 @ optional op; may set condition codes 5915 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5916 GET_INST_OPCODE(ip) @ extract opcode from rINST 5917 SET_VREG(r0, r9) @ vAA<- r0 5918 GOTO_OPCODE(ip) @ jump to next instruction 5919 /* 10-13 instructions */ 5920 5921 5922 5923/* ------------------------------ */ 5924 .balign 64 5925.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5926/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5927/* File: armv5te/binop2addr.S */ 5928 /* 5929 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5930 * that specifies an instruction that performs "result = r0 op r1". 5931 * This could be an ARM instruction or a function call. (If the result 5932 * comes back in a register other than r0, you can override "result".) 5933 * 5934 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5935 * vCC (r1). Useful for integer division and modulus. 5936 * 5937 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5938 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5939 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5940 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5941 */ 5942 /* binop/2addr vA, vB */ 5943 mov r9, rINST, lsr #8 @ r9<- A+ 5944 mov r3, rINST, lsr #12 @ r3<- B 5945 and r9, r9, #15 5946 GET_VREG(r1, r3) @ r1<- vB 5947 GET_VREG(r0, r9) @ r0<- vA 5948 .if 0 5949 cmp r1, #0 @ is second operand zero? 5950 beq common_errDivideByZero 5951 .endif 5952 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5953 5954 and r1, r1, #31 @ optional op; may set condition codes 5955 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5956 GET_INST_OPCODE(ip) @ extract opcode from rINST 5957 SET_VREG(r0, r9) @ vAA<- r0 5958 GOTO_OPCODE(ip) @ jump to next instruction 5959 /* 10-13 instructions */ 5960 5961 5962 5963/* ------------------------------ */ 5964 .balign 64 5965.L_OP_USHR_INT_2ADDR: /* 0xba */ 5966/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5967/* File: armv5te/binop2addr.S */ 5968 /* 5969 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5970 * that specifies an instruction that performs "result = r0 op r1". 5971 * This could be an ARM instruction or a function call. (If the result 5972 * comes back in a register other than r0, you can override "result".) 5973 * 5974 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5975 * vCC (r1). Useful for integer division and modulus. 5976 * 5977 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5978 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5979 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5980 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5981 */ 5982 /* binop/2addr vA, vB */ 5983 mov r9, rINST, lsr #8 @ r9<- A+ 5984 mov r3, rINST, lsr #12 @ r3<- B 5985 and r9, r9, #15 5986 GET_VREG(r1, r3) @ r1<- vB 5987 GET_VREG(r0, r9) @ r0<- vA 5988 .if 0 5989 cmp r1, #0 @ is second operand zero? 5990 beq common_errDivideByZero 5991 .endif 5992 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5993 5994 and r1, r1, #31 @ optional op; may set condition codes 5995 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5996 GET_INST_OPCODE(ip) @ extract opcode from rINST 5997 SET_VREG(r0, r9) @ vAA<- r0 5998 GOTO_OPCODE(ip) @ jump to next instruction 5999 /* 10-13 instructions */ 6000 6001 6002 6003/* ------------------------------ */ 6004 .balign 64 6005.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 6006/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 6007/* File: armv5te/binopWide2addr.S */ 6008 /* 6009 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6010 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6011 * This could be an ARM instruction or a function call. (If the result 6012 * comes back in a register other than r0, you can override "result".) 6013 * 6014 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6015 * vCC (r1). Useful for integer division and modulus. 6016 * 6017 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6018 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6019 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6020 * rem-double/2addr 6021 */ 6022 /* binop/2addr vA, vB */ 6023 mov r9, rINST, lsr #8 @ r9<- A+ 6024 mov r1, rINST, lsr #12 @ r1<- B 6025 and r9, r9, #15 6026 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6027 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6028 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6029 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6030 .if 0 6031 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6032 beq common_errDivideByZero 6033 .endif 6034 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6035 6036 adds r0, r0, r2 @ optional op; may set condition codes 6037 adc r1, r1, r3 @ result<- op, r0-r3 changed 6038 GET_INST_OPCODE(ip) @ extract opcode from rINST 6039 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6040 GOTO_OPCODE(ip) @ jump to next instruction 6041 /* 12-15 instructions */ 6042 6043 6044 6045/* ------------------------------ */ 6046 .balign 64 6047.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 6048/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 6049/* File: armv5te/binopWide2addr.S */ 6050 /* 6051 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6052 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6053 * This could be an ARM instruction or a function call. (If the result 6054 * comes back in a register other than r0, you can override "result".) 6055 * 6056 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6057 * vCC (r1). Useful for integer division and modulus. 6058 * 6059 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6060 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6061 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6062 * rem-double/2addr 6063 */ 6064 /* binop/2addr vA, vB */ 6065 mov r9, rINST, lsr #8 @ r9<- A+ 6066 mov r1, rINST, lsr #12 @ r1<- B 6067 and r9, r9, #15 6068 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6069 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6070 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6071 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6072 .if 0 6073 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6074 beq common_errDivideByZero 6075 .endif 6076 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6077 6078 subs r0, r0, r2 @ optional op; may set condition codes 6079 sbc r1, r1, r3 @ result<- op, r0-r3 changed 6080 GET_INST_OPCODE(ip) @ extract opcode from rINST 6081 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6082 GOTO_OPCODE(ip) @ jump to next instruction 6083 /* 12-15 instructions */ 6084 6085 6086 6087/* ------------------------------ */ 6088 .balign 64 6089.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 6090/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 6091 /* 6092 * Signed 64-bit integer multiply, "/2addr" version. 6093 * 6094 * See OP_MUL_LONG for an explanation. 6095 * 6096 * We get a little tight on registers, so to avoid looking up &fp[A] 6097 * again we stuff it into rINST. 6098 */ 6099 /* mul-long/2addr vA, vB */ 6100 mov r9, rINST, lsr #8 @ r9<- A+ 6101 mov r1, rINST, lsr #12 @ r1<- B 6102 and r9, r9, #15 6103 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6104 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 6105 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6106 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 6107 mul ip, r2, r1 @ ip<- ZxW 6108 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 6109 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 6110 mov r0, rINST @ r0<- &fp[A] (free up rINST) 6111 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6112 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 6113 GET_INST_OPCODE(ip) @ extract opcode from rINST 6114 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 6115 GOTO_OPCODE(ip) @ jump to next instruction 6116 6117 6118/* ------------------------------ */ 6119 .balign 64 6120.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 6121/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 6122/* File: armv5te/binopWide2addr.S */ 6123 /* 6124 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6125 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6126 * This could be an ARM instruction or a function call. (If the result 6127 * comes back in a register other than r0, you can override "result".) 6128 * 6129 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6130 * vCC (r1). Useful for integer division and modulus. 6131 * 6132 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6133 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6134 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6135 * rem-double/2addr 6136 */ 6137 /* binop/2addr vA, vB */ 6138 mov r9, rINST, lsr #8 @ r9<- A+ 6139 mov r1, rINST, lsr #12 @ r1<- B 6140 and r9, r9, #15 6141 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6142 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6143 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6144 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6145 .if 1 6146 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6147 beq common_errDivideByZero 6148 .endif 6149 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6150 6151 @ optional op; may set condition codes 6152 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6153 GET_INST_OPCODE(ip) @ extract opcode from rINST 6154 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6155 GOTO_OPCODE(ip) @ jump to next instruction 6156 /* 12-15 instructions */ 6157 6158 6159 6160/* ------------------------------ */ 6161 .balign 64 6162.L_OP_REM_LONG_2ADDR: /* 0xbf */ 6163/* File: armv5te/OP_REM_LONG_2ADDR.S */ 6164/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 6165/* File: armv5te/binopWide2addr.S */ 6166 /* 6167 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6168 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6169 * This could be an ARM instruction or a function call. (If the result 6170 * comes back in a register other than r0, you can override "result".) 6171 * 6172 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6173 * vCC (r1). Useful for integer division and modulus. 6174 * 6175 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6176 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6177 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6178 * rem-double/2addr 6179 */ 6180 /* binop/2addr vA, vB */ 6181 mov r9, rINST, lsr #8 @ r9<- A+ 6182 mov r1, rINST, lsr #12 @ r1<- B 6183 and r9, r9, #15 6184 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6185 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6186 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6187 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6188 .if 1 6189 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6190 beq common_errDivideByZero 6191 .endif 6192 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6193 6194 @ optional op; may set condition codes 6195 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6196 GET_INST_OPCODE(ip) @ extract opcode from rINST 6197 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 6198 GOTO_OPCODE(ip) @ jump to next instruction 6199 /* 12-15 instructions */ 6200 6201 6202 6203/* ------------------------------ */ 6204 .balign 64 6205.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 6206/* File: armv5te/OP_AND_LONG_2ADDR.S */ 6207/* File: armv5te/binopWide2addr.S */ 6208 /* 6209 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6210 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6211 * This could be an ARM instruction or a function call. (If the result 6212 * comes back in a register other than r0, you can override "result".) 6213 * 6214 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6215 * vCC (r1). Useful for integer division and modulus. 6216 * 6217 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6218 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6219 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6220 * rem-double/2addr 6221 */ 6222 /* binop/2addr vA, vB */ 6223 mov r9, rINST, lsr #8 @ r9<- A+ 6224 mov r1, rINST, lsr #12 @ r1<- B 6225 and r9, r9, #15 6226 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6227 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6228 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6229 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6230 .if 0 6231 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6232 beq common_errDivideByZero 6233 .endif 6234 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6235 6236 and r0, r0, r2 @ optional op; may set condition codes 6237 and r1, r1, r3 @ result<- op, r0-r3 changed 6238 GET_INST_OPCODE(ip) @ extract opcode from rINST 6239 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6240 GOTO_OPCODE(ip) @ jump to next instruction 6241 /* 12-15 instructions */ 6242 6243 6244 6245/* ------------------------------ */ 6246 .balign 64 6247.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6248/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6249/* File: armv5te/binopWide2addr.S */ 6250 /* 6251 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6252 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6253 * This could be an ARM instruction or a function call. (If the result 6254 * comes back in a register other than r0, you can override "result".) 6255 * 6256 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6257 * vCC (r1). Useful for integer division and modulus. 6258 * 6259 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6260 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6261 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6262 * rem-double/2addr 6263 */ 6264 /* binop/2addr vA, vB */ 6265 mov r9, rINST, lsr #8 @ r9<- A+ 6266 mov r1, rINST, lsr #12 @ r1<- B 6267 and r9, r9, #15 6268 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6269 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6270 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6271 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6272 .if 0 6273 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6274 beq common_errDivideByZero 6275 .endif 6276 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6277 6278 orr r0, r0, r2 @ optional op; may set condition codes 6279 orr r1, r1, r3 @ result<- op, r0-r3 changed 6280 GET_INST_OPCODE(ip) @ extract opcode from rINST 6281 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6282 GOTO_OPCODE(ip) @ jump to next instruction 6283 /* 12-15 instructions */ 6284 6285 6286 6287/* ------------------------------ */ 6288 .balign 64 6289.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6290/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6291/* File: armv5te/binopWide2addr.S */ 6292 /* 6293 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6294 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6295 * This could be an ARM instruction or a function call. (If the result 6296 * comes back in a register other than r0, you can override "result".) 6297 * 6298 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6299 * vCC (r1). Useful for integer division and modulus. 6300 * 6301 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6302 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6303 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6304 * rem-double/2addr 6305 */ 6306 /* binop/2addr vA, vB */ 6307 mov r9, rINST, lsr #8 @ r9<- A+ 6308 mov r1, rINST, lsr #12 @ r1<- B 6309 and r9, r9, #15 6310 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6311 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6312 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6313 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6314 .if 0 6315 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6316 beq common_errDivideByZero 6317 .endif 6318 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6319 6320 eor r0, r0, r2 @ optional op; may set condition codes 6321 eor r1, r1, r3 @ result<- op, r0-r3 changed 6322 GET_INST_OPCODE(ip) @ extract opcode from rINST 6323 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6324 GOTO_OPCODE(ip) @ jump to next instruction 6325 /* 12-15 instructions */ 6326 6327 6328 6329/* ------------------------------ */ 6330 .balign 64 6331.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6332/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6333 /* 6334 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6335 * 32-bit shift distance. 6336 */ 6337 /* shl-long/2addr vA, vB */ 6338 mov r9, rINST, lsr #8 @ r9<- A+ 6339 mov r3, rINST, lsr #12 @ r3<- B 6340 and r9, r9, #15 6341 GET_VREG(r2, r3) @ r2<- vB 6342 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6343 and r2, r2, #63 @ r2<- r2 & 0x3f 6344 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6345 6346 mov r1, r1, asl r2 @ r1<- r1 << r2 6347 rsb r3, r2, #32 @ r3<- 32 - r2 6348 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6349 subs ip, r2, #32 @ ip<- r2 - 32 6350 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6351 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6352 mov r0, r0, asl r2 @ r0<- r0 << r2 6353 b .LOP_SHL_LONG_2ADDR_finish 6354 6355/* ------------------------------ */ 6356 .balign 64 6357.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6358/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6359 /* 6360 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6361 * 32-bit shift distance. 6362 */ 6363 /* shr-long/2addr vA, vB */ 6364 mov r9, rINST, lsr #8 @ r9<- A+ 6365 mov r3, rINST, lsr #12 @ r3<- B 6366 and r9, r9, #15 6367 GET_VREG(r2, r3) @ r2<- vB 6368 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6369 and r2, r2, #63 @ r2<- r2 & 0x3f 6370 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6371 6372 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6373 rsb r3, r2, #32 @ r3<- 32 - r2 6374 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6375 subs ip, r2, #32 @ ip<- r2 - 32 6376 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6377 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6378 mov r1, r1, asr r2 @ r1<- r1 >> r2 6379 b .LOP_SHR_LONG_2ADDR_finish 6380 6381/* ------------------------------ */ 6382 .balign 64 6383.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6384/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6385 /* 6386 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6387 * 32-bit shift distance. 6388 */ 6389 /* ushr-long/2addr vA, vB */ 6390 mov r9, rINST, lsr #8 @ r9<- A+ 6391 mov r3, rINST, lsr #12 @ r3<- B 6392 and r9, r9, #15 6393 GET_VREG(r2, r3) @ r2<- vB 6394 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6395 and r2, r2, #63 @ r2<- r2 & 0x3f 6396 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6397 6398 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6399 rsb r3, r2, #32 @ r3<- 32 - r2 6400 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6401 subs ip, r2, #32 @ ip<- r2 - 32 6402 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6403 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6404 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6405 b .LOP_USHR_LONG_2ADDR_finish 6406 6407/* ------------------------------ */ 6408 .balign 64 6409.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6410/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */ 6411/* File: armv5te/binop2addr.S */ 6412 /* 6413 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6414 * that specifies an instruction that performs "result = r0 op r1". 6415 * This could be an ARM instruction or a function call. (If the result 6416 * comes back in a register other than r0, you can override "result".) 6417 * 6418 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6419 * vCC (r1). Useful for integer division and modulus. 6420 * 6421 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6422 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6423 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6424 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6425 */ 6426 /* binop/2addr vA, vB */ 6427 mov r9, rINST, lsr #8 @ r9<- A+ 6428 mov r3, rINST, lsr #12 @ r3<- B 6429 and r9, r9, #15 6430 GET_VREG(r1, r3) @ r1<- vB 6431 GET_VREG(r0, r9) @ r0<- vA 6432 .if 0 6433 cmp r1, #0 @ is second operand zero? 6434 beq common_errDivideByZero 6435 .endif 6436 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6437 6438 @ optional op; may set condition codes 6439 bl __aeabi_fadd @ r0<- op, r0-r3 changed 6440 GET_INST_OPCODE(ip) @ extract opcode from rINST 6441 SET_VREG(r0, r9) @ vAA<- r0 6442 GOTO_OPCODE(ip) @ jump to next instruction 6443 /* 10-13 instructions */ 6444 6445 6446 6447/* ------------------------------ */ 6448 .balign 64 6449.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6450/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */ 6451/* File: armv5te/binop2addr.S */ 6452 /* 6453 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6454 * that specifies an instruction that performs "result = r0 op r1". 6455 * This could be an ARM instruction or a function call. (If the result 6456 * comes back in a register other than r0, you can override "result".) 6457 * 6458 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6459 * vCC (r1). Useful for integer division and modulus. 6460 * 6461 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6462 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6463 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6464 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6465 */ 6466 /* binop/2addr vA, vB */ 6467 mov r9, rINST, lsr #8 @ r9<- A+ 6468 mov r3, rINST, lsr #12 @ r3<- B 6469 and r9, r9, #15 6470 GET_VREG(r1, r3) @ r1<- vB 6471 GET_VREG(r0, r9) @ r0<- vA 6472 .if 0 6473 cmp r1, #0 @ is second operand zero? 6474 beq common_errDivideByZero 6475 .endif 6476 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6477 6478 @ optional op; may set condition codes 6479 bl __aeabi_fsub @ r0<- op, r0-r3 changed 6480 GET_INST_OPCODE(ip) @ extract opcode from rINST 6481 SET_VREG(r0, r9) @ vAA<- r0 6482 GOTO_OPCODE(ip) @ jump to next instruction 6483 /* 10-13 instructions */ 6484 6485 6486 6487/* ------------------------------ */ 6488 .balign 64 6489.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6490/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */ 6491/* File: armv5te/binop2addr.S */ 6492 /* 6493 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6494 * that specifies an instruction that performs "result = r0 op r1". 6495 * This could be an ARM instruction or a function call. (If the result 6496 * comes back in a register other than r0, you can override "result".) 6497 * 6498 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6499 * vCC (r1). Useful for integer division and modulus. 6500 * 6501 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6502 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6503 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6504 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6505 */ 6506 /* binop/2addr vA, vB */ 6507 mov r9, rINST, lsr #8 @ r9<- A+ 6508 mov r3, rINST, lsr #12 @ r3<- B 6509 and r9, r9, #15 6510 GET_VREG(r1, r3) @ r1<- vB 6511 GET_VREG(r0, r9) @ r0<- vA 6512 .if 0 6513 cmp r1, #0 @ is second operand zero? 6514 beq common_errDivideByZero 6515 .endif 6516 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6517 6518 @ optional op; may set condition codes 6519 bl __aeabi_fmul @ r0<- op, r0-r3 changed 6520 GET_INST_OPCODE(ip) @ extract opcode from rINST 6521 SET_VREG(r0, r9) @ vAA<- r0 6522 GOTO_OPCODE(ip) @ jump to next instruction 6523 /* 10-13 instructions */ 6524 6525 6526 6527/* ------------------------------ */ 6528 .balign 64 6529.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6530/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */ 6531/* File: armv5te/binop2addr.S */ 6532 /* 6533 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6534 * that specifies an instruction that performs "result = r0 op r1". 6535 * This could be an ARM instruction or a function call. (If the result 6536 * comes back in a register other than r0, you can override "result".) 6537 * 6538 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6539 * vCC (r1). Useful for integer division and modulus. 6540 * 6541 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6542 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6543 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6544 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6545 */ 6546 /* binop/2addr vA, vB */ 6547 mov r9, rINST, lsr #8 @ r9<- A+ 6548 mov r3, rINST, lsr #12 @ r3<- B 6549 and r9, r9, #15 6550 GET_VREG(r1, r3) @ r1<- vB 6551 GET_VREG(r0, r9) @ r0<- vA 6552 .if 0 6553 cmp r1, #0 @ is second operand zero? 6554 beq common_errDivideByZero 6555 .endif 6556 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6557 6558 @ optional op; may set condition codes 6559 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 6560 GET_INST_OPCODE(ip) @ extract opcode from rINST 6561 SET_VREG(r0, r9) @ vAA<- r0 6562 GOTO_OPCODE(ip) @ jump to next instruction 6563 /* 10-13 instructions */ 6564 6565 6566 6567/* ------------------------------ */ 6568 .balign 64 6569.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6570/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6571/* EABI doesn't define a float remainder function, but libm does */ 6572/* File: armv5te/binop2addr.S */ 6573 /* 6574 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6575 * that specifies an instruction that performs "result = r0 op r1". 6576 * This could be an ARM instruction or a function call. (If the result 6577 * comes back in a register other than r0, you can override "result".) 6578 * 6579 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6580 * vCC (r1). Useful for integer division and modulus. 6581 * 6582 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6583 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6584 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6585 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6586 */ 6587 /* binop/2addr vA, vB */ 6588 mov r9, rINST, lsr #8 @ r9<- A+ 6589 mov r3, rINST, lsr #12 @ r3<- B 6590 and r9, r9, #15 6591 GET_VREG(r1, r3) @ r1<- vB 6592 GET_VREG(r0, r9) @ r0<- vA 6593 .if 0 6594 cmp r1, #0 @ is second operand zero? 6595 beq common_errDivideByZero 6596 .endif 6597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6598 6599 @ optional op; may set condition codes 6600 bl fmodf @ r0<- op, r0-r3 changed 6601 GET_INST_OPCODE(ip) @ extract opcode from rINST 6602 SET_VREG(r0, r9) @ vAA<- r0 6603 GOTO_OPCODE(ip) @ jump to next instruction 6604 /* 10-13 instructions */ 6605 6606 6607 6608/* ------------------------------ */ 6609 .balign 64 6610.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6611/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */ 6612/* File: armv5te/binopWide2addr.S */ 6613 /* 6614 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6615 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6616 * This could be an ARM instruction or a function call. (If the result 6617 * comes back in a register other than r0, you can override "result".) 6618 * 6619 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6620 * vCC (r1). Useful for integer division and modulus. 6621 * 6622 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6623 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6624 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6625 * rem-double/2addr 6626 */ 6627 /* binop/2addr vA, vB */ 6628 mov r9, rINST, lsr #8 @ r9<- A+ 6629 mov r1, rINST, lsr #12 @ r1<- B 6630 and r9, r9, #15 6631 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6632 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6633 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6634 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6635 .if 0 6636 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6637 beq common_errDivideByZero 6638 .endif 6639 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6640 6641 @ optional op; may set condition codes 6642 bl __aeabi_dadd @ result<- op, r0-r3 changed 6643 GET_INST_OPCODE(ip) @ extract opcode from rINST 6644 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6645 GOTO_OPCODE(ip) @ jump to next instruction 6646 /* 12-15 instructions */ 6647 6648 6649 6650/* ------------------------------ */ 6651 .balign 64 6652.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6653/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */ 6654/* File: armv5te/binopWide2addr.S */ 6655 /* 6656 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6657 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6658 * This could be an ARM instruction or a function call. (If the result 6659 * comes back in a register other than r0, you can override "result".) 6660 * 6661 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6662 * vCC (r1). Useful for integer division and modulus. 6663 * 6664 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6665 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6666 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6667 * rem-double/2addr 6668 */ 6669 /* binop/2addr vA, vB */ 6670 mov r9, rINST, lsr #8 @ r9<- A+ 6671 mov r1, rINST, lsr #12 @ r1<- B 6672 and r9, r9, #15 6673 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6674 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6675 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6676 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6677 .if 0 6678 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6679 beq common_errDivideByZero 6680 .endif 6681 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6682 6683 @ optional op; may set condition codes 6684 bl __aeabi_dsub @ result<- op, r0-r3 changed 6685 GET_INST_OPCODE(ip) @ extract opcode from rINST 6686 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6687 GOTO_OPCODE(ip) @ jump to next instruction 6688 /* 12-15 instructions */ 6689 6690 6691 6692/* ------------------------------ */ 6693 .balign 64 6694.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6695/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */ 6696/* File: armv5te/binopWide2addr.S */ 6697 /* 6698 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6699 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6700 * This could be an ARM instruction or a function call. (If the result 6701 * comes back in a register other than r0, you can override "result".) 6702 * 6703 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6704 * vCC (r1). Useful for integer division and modulus. 6705 * 6706 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6707 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6708 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6709 * rem-double/2addr 6710 */ 6711 /* binop/2addr vA, vB */ 6712 mov r9, rINST, lsr #8 @ r9<- A+ 6713 mov r1, rINST, lsr #12 @ r1<- B 6714 and r9, r9, #15 6715 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6716 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6717 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6718 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6719 .if 0 6720 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6721 beq common_errDivideByZero 6722 .endif 6723 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6724 6725 @ optional op; may set condition codes 6726 bl __aeabi_dmul @ result<- op, r0-r3 changed 6727 GET_INST_OPCODE(ip) @ extract opcode from rINST 6728 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6729 GOTO_OPCODE(ip) @ jump to next instruction 6730 /* 12-15 instructions */ 6731 6732 6733 6734/* ------------------------------ */ 6735 .balign 64 6736.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6737/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */ 6738/* File: armv5te/binopWide2addr.S */ 6739 /* 6740 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6741 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6742 * This could be an ARM instruction or a function call. (If the result 6743 * comes back in a register other than r0, you can override "result".) 6744 * 6745 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6746 * vCC (r1). Useful for integer division and modulus. 6747 * 6748 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6749 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6750 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6751 * rem-double/2addr 6752 */ 6753 /* binop/2addr vA, vB */ 6754 mov r9, rINST, lsr #8 @ r9<- A+ 6755 mov r1, rINST, lsr #12 @ r1<- B 6756 and r9, r9, #15 6757 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6758 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6759 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6760 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6761 .if 0 6762 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6763 beq common_errDivideByZero 6764 .endif 6765 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6766 6767 @ optional op; may set condition codes 6768 bl __aeabi_ddiv @ result<- op, r0-r3 changed 6769 GET_INST_OPCODE(ip) @ extract opcode from rINST 6770 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6771 GOTO_OPCODE(ip) @ jump to next instruction 6772 /* 12-15 instructions */ 6773 6774 6775 6776/* ------------------------------ */ 6777 .balign 64 6778.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6779/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6780/* EABI doesn't define a double remainder function, but libm does */ 6781/* File: armv5te/binopWide2addr.S */ 6782 /* 6783 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6784 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6785 * This could be an ARM instruction or a function call. (If the result 6786 * comes back in a register other than r0, you can override "result".) 6787 * 6788 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6789 * vCC (r1). Useful for integer division and modulus. 6790 * 6791 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6792 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6793 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6794 * rem-double/2addr 6795 */ 6796 /* binop/2addr vA, vB */ 6797 mov r9, rINST, lsr #8 @ r9<- A+ 6798 mov r1, rINST, lsr #12 @ r1<- B 6799 and r9, r9, #15 6800 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6801 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6802 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6803 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6804 .if 0 6805 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6806 beq common_errDivideByZero 6807 .endif 6808 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6809 6810 @ optional op; may set condition codes 6811 bl fmod @ result<- op, r0-r3 changed 6812 GET_INST_OPCODE(ip) @ extract opcode from rINST 6813 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6814 GOTO_OPCODE(ip) @ jump to next instruction 6815 /* 12-15 instructions */ 6816 6817 6818 6819/* ------------------------------ */ 6820 .balign 64 6821.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6822/* File: armv5te/OP_ADD_INT_LIT16.S */ 6823/* File: armv5te/binopLit16.S */ 6824 /* 6825 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6826 * that specifies an instruction that performs "result = r0 op r1". 6827 * This could be an ARM instruction or a function call. (If the result 6828 * comes back in a register other than r0, you can override "result".) 6829 * 6830 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6831 * vCC (r1). Useful for integer division and modulus. 6832 * 6833 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6834 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6835 */ 6836 /* binop/lit16 vA, vB, #+CCCC */ 6837 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6838 mov r2, rINST, lsr #12 @ r2<- B 6839 mov r9, rINST, lsr #8 @ r9<- A+ 6840 GET_VREG(r0, r2) @ r0<- vB 6841 and r9, r9, #15 6842 .if 0 6843 cmp r1, #0 @ is second operand zero? 6844 beq common_errDivideByZero 6845 .endif 6846 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6847 6848 add r0, r0, r1 @ r0<- op, r0-r3 changed 6849 GET_INST_OPCODE(ip) @ extract opcode from rINST 6850 SET_VREG(r0, r9) @ vAA<- r0 6851 GOTO_OPCODE(ip) @ jump to next instruction 6852 /* 10-13 instructions */ 6853 6854 6855 6856/* ------------------------------ */ 6857 .balign 64 6858.L_OP_RSUB_INT: /* 0xd1 */ 6859/* File: armv5te/OP_RSUB_INT.S */ 6860/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6861/* File: armv5te/binopLit16.S */ 6862 /* 6863 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6864 * that specifies an instruction that performs "result = r0 op r1". 6865 * This could be an ARM instruction or a function call. (If the result 6866 * comes back in a register other than r0, you can override "result".) 6867 * 6868 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6869 * vCC (r1). Useful for integer division and modulus. 6870 * 6871 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6872 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6873 */ 6874 /* binop/lit16 vA, vB, #+CCCC */ 6875 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6876 mov r2, rINST, lsr #12 @ r2<- B 6877 mov r9, rINST, lsr #8 @ r9<- A+ 6878 GET_VREG(r0, r2) @ r0<- vB 6879 and r9, r9, #15 6880 .if 0 6881 cmp r1, #0 @ is second operand zero? 6882 beq common_errDivideByZero 6883 .endif 6884 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6885 6886 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6887 GET_INST_OPCODE(ip) @ extract opcode from rINST 6888 SET_VREG(r0, r9) @ vAA<- r0 6889 GOTO_OPCODE(ip) @ jump to next instruction 6890 /* 10-13 instructions */ 6891 6892 6893 6894/* ------------------------------ */ 6895 .balign 64 6896.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6897/* File: armv5te/OP_MUL_INT_LIT16.S */ 6898/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6899/* File: armv5te/binopLit16.S */ 6900 /* 6901 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6902 * that specifies an instruction that performs "result = r0 op r1". 6903 * This could be an ARM instruction or a function call. (If the result 6904 * comes back in a register other than r0, you can override "result".) 6905 * 6906 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6907 * vCC (r1). Useful for integer division and modulus. 6908 * 6909 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6910 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6911 */ 6912 /* binop/lit16 vA, vB, #+CCCC */ 6913 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6914 mov r2, rINST, lsr #12 @ r2<- B 6915 mov r9, rINST, lsr #8 @ r9<- A+ 6916 GET_VREG(r0, r2) @ r0<- vB 6917 and r9, r9, #15 6918 .if 0 6919 cmp r1, #0 @ is second operand zero? 6920 beq common_errDivideByZero 6921 .endif 6922 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6923 6924 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6925 GET_INST_OPCODE(ip) @ extract opcode from rINST 6926 SET_VREG(r0, r9) @ vAA<- r0 6927 GOTO_OPCODE(ip) @ jump to next instruction 6928 /* 10-13 instructions */ 6929 6930 6931 6932/* ------------------------------ */ 6933 .balign 64 6934.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6935/* File: armv5te/OP_DIV_INT_LIT16.S */ 6936/* File: armv5te/binopLit16.S */ 6937 /* 6938 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6939 * that specifies an instruction that performs "result = r0 op r1". 6940 * This could be an ARM instruction or a function call. (If the result 6941 * comes back in a register other than r0, you can override "result".) 6942 * 6943 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6944 * vCC (r1). Useful for integer division and modulus. 6945 * 6946 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6947 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6948 */ 6949 /* binop/lit16 vA, vB, #+CCCC */ 6950 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6951 mov r2, rINST, lsr #12 @ r2<- B 6952 mov r9, rINST, lsr #8 @ r9<- A+ 6953 GET_VREG(r0, r2) @ r0<- vB 6954 and r9, r9, #15 6955 .if 1 6956 cmp r1, #0 @ is second operand zero? 6957 beq common_errDivideByZero 6958 .endif 6959 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6960 6961 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6962 GET_INST_OPCODE(ip) @ extract opcode from rINST 6963 SET_VREG(r0, r9) @ vAA<- r0 6964 GOTO_OPCODE(ip) @ jump to next instruction 6965 /* 10-13 instructions */ 6966 6967 6968 6969/* ------------------------------ */ 6970 .balign 64 6971.L_OP_REM_INT_LIT16: /* 0xd4 */ 6972/* File: armv5te/OP_REM_INT_LIT16.S */ 6973/* idivmod returns quotient in r0 and remainder in r1 */ 6974/* File: armv5te/binopLit16.S */ 6975 /* 6976 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6977 * that specifies an instruction that performs "result = r0 op r1". 6978 * This could be an ARM instruction or a function call. (If the result 6979 * comes back in a register other than r0, you can override "result".) 6980 * 6981 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6982 * vCC (r1). Useful for integer division and modulus. 6983 * 6984 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6985 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6986 */ 6987 /* binop/lit16 vA, vB, #+CCCC */ 6988 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6989 mov r2, rINST, lsr #12 @ r2<- B 6990 mov r9, rINST, lsr #8 @ r9<- A+ 6991 GET_VREG(r0, r2) @ r0<- vB 6992 and r9, r9, #15 6993 .if 1 6994 cmp r1, #0 @ is second operand zero? 6995 beq common_errDivideByZero 6996 .endif 6997 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6998 6999 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7000 GET_INST_OPCODE(ip) @ extract opcode from rINST 7001 SET_VREG(r1, r9) @ vAA<- r1 7002 GOTO_OPCODE(ip) @ jump to next instruction 7003 /* 10-13 instructions */ 7004 7005 7006 7007/* ------------------------------ */ 7008 .balign 64 7009.L_OP_AND_INT_LIT16: /* 0xd5 */ 7010/* File: armv5te/OP_AND_INT_LIT16.S */ 7011/* File: armv5te/binopLit16.S */ 7012 /* 7013 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7014 * that specifies an instruction that performs "result = r0 op r1". 7015 * This could be an ARM instruction or a function call. (If the result 7016 * comes back in a register other than r0, you can override "result".) 7017 * 7018 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7019 * vCC (r1). Useful for integer division and modulus. 7020 * 7021 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7022 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7023 */ 7024 /* binop/lit16 vA, vB, #+CCCC */ 7025 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7026 mov r2, rINST, lsr #12 @ r2<- B 7027 mov r9, rINST, lsr #8 @ r9<- A+ 7028 GET_VREG(r0, r2) @ r0<- vB 7029 and r9, r9, #15 7030 .if 0 7031 cmp r1, #0 @ is second operand zero? 7032 beq common_errDivideByZero 7033 .endif 7034 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7035 7036 and r0, r0, r1 @ r0<- op, r0-r3 changed 7037 GET_INST_OPCODE(ip) @ extract opcode from rINST 7038 SET_VREG(r0, r9) @ vAA<- r0 7039 GOTO_OPCODE(ip) @ jump to next instruction 7040 /* 10-13 instructions */ 7041 7042 7043 7044/* ------------------------------ */ 7045 .balign 64 7046.L_OP_OR_INT_LIT16: /* 0xd6 */ 7047/* File: armv5te/OP_OR_INT_LIT16.S */ 7048/* File: armv5te/binopLit16.S */ 7049 /* 7050 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7051 * that specifies an instruction that performs "result = r0 op r1". 7052 * This could be an ARM instruction or a function call. (If the result 7053 * comes back in a register other than r0, you can override "result".) 7054 * 7055 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7056 * vCC (r1). Useful for integer division and modulus. 7057 * 7058 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7059 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7060 */ 7061 /* binop/lit16 vA, vB, #+CCCC */ 7062 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7063 mov r2, rINST, lsr #12 @ r2<- B 7064 mov r9, rINST, lsr #8 @ r9<- A+ 7065 GET_VREG(r0, r2) @ r0<- vB 7066 and r9, r9, #15 7067 .if 0 7068 cmp r1, #0 @ is second operand zero? 7069 beq common_errDivideByZero 7070 .endif 7071 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7072 7073 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7074 GET_INST_OPCODE(ip) @ extract opcode from rINST 7075 SET_VREG(r0, r9) @ vAA<- r0 7076 GOTO_OPCODE(ip) @ jump to next instruction 7077 /* 10-13 instructions */ 7078 7079 7080 7081/* ------------------------------ */ 7082 .balign 64 7083.L_OP_XOR_INT_LIT16: /* 0xd7 */ 7084/* File: armv5te/OP_XOR_INT_LIT16.S */ 7085/* File: armv5te/binopLit16.S */ 7086 /* 7087 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7088 * that specifies an instruction that performs "result = r0 op r1". 7089 * This could be an ARM instruction or a function call. (If the result 7090 * comes back in a register other than r0, you can override "result".) 7091 * 7092 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7093 * vCC (r1). Useful for integer division and modulus. 7094 * 7095 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7096 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7097 */ 7098 /* binop/lit16 vA, vB, #+CCCC */ 7099 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7100 mov r2, rINST, lsr #12 @ r2<- B 7101 mov r9, rINST, lsr #8 @ r9<- A+ 7102 GET_VREG(r0, r2) @ r0<- vB 7103 and r9, r9, #15 7104 .if 0 7105 cmp r1, #0 @ is second operand zero? 7106 beq common_errDivideByZero 7107 .endif 7108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7109 7110 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7111 GET_INST_OPCODE(ip) @ extract opcode from rINST 7112 SET_VREG(r0, r9) @ vAA<- r0 7113 GOTO_OPCODE(ip) @ jump to next instruction 7114 /* 10-13 instructions */ 7115 7116 7117 7118/* ------------------------------ */ 7119 .balign 64 7120.L_OP_ADD_INT_LIT8: /* 0xd8 */ 7121/* File: armv5te/OP_ADD_INT_LIT8.S */ 7122/* File: armv5te/binopLit8.S */ 7123 /* 7124 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7125 * that specifies an instruction that performs "result = r0 op r1". 7126 * This could be an ARM instruction or a function call. (If the result 7127 * comes back in a register other than r0, you can override "result".) 7128 * 7129 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7130 * vCC (r1). Useful for integer division and modulus. 7131 * 7132 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7133 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7134 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7135 */ 7136 /* binop/lit8 vAA, vBB, #+CC */ 7137 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7138 mov r9, rINST, lsr #8 @ r9<- AA 7139 and r2, r3, #255 @ r2<- BB 7140 GET_VREG(r0, r2) @ r0<- vBB 7141 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7142 .if 0 7143 @cmp r1, #0 @ is second operand zero? 7144 beq common_errDivideByZero 7145 .endif 7146 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7147 7148 @ optional op; may set condition codes 7149 add r0, r0, r1 @ r0<- op, r0-r3 changed 7150 GET_INST_OPCODE(ip) @ extract opcode from rINST 7151 SET_VREG(r0, r9) @ vAA<- r0 7152 GOTO_OPCODE(ip) @ jump to next instruction 7153 /* 10-12 instructions */ 7154 7155 7156 7157/* ------------------------------ */ 7158 .balign 64 7159.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 7160/* File: armv5te/OP_RSUB_INT_LIT8.S */ 7161/* File: armv5te/binopLit8.S */ 7162 /* 7163 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7164 * that specifies an instruction that performs "result = r0 op r1". 7165 * This could be an ARM instruction or a function call. (If the result 7166 * comes back in a register other than r0, you can override "result".) 7167 * 7168 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7169 * vCC (r1). Useful for integer division and modulus. 7170 * 7171 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7172 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7173 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7174 */ 7175 /* binop/lit8 vAA, vBB, #+CC */ 7176 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7177 mov r9, rINST, lsr #8 @ r9<- AA 7178 and r2, r3, #255 @ r2<- BB 7179 GET_VREG(r0, r2) @ r0<- vBB 7180 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7181 .if 0 7182 @cmp r1, #0 @ is second operand zero? 7183 beq common_errDivideByZero 7184 .endif 7185 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7186 7187 @ optional op; may set condition codes 7188 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 7189 GET_INST_OPCODE(ip) @ extract opcode from rINST 7190 SET_VREG(r0, r9) @ vAA<- r0 7191 GOTO_OPCODE(ip) @ jump to next instruction 7192 /* 10-12 instructions */ 7193 7194 7195 7196/* ------------------------------ */ 7197 .balign 64 7198.L_OP_MUL_INT_LIT8: /* 0xda */ 7199/* File: armv5te/OP_MUL_INT_LIT8.S */ 7200/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 7201/* File: armv5te/binopLit8.S */ 7202 /* 7203 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7204 * that specifies an instruction that performs "result = r0 op r1". 7205 * This could be an ARM instruction or a function call. (If the result 7206 * comes back in a register other than r0, you can override "result".) 7207 * 7208 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7209 * vCC (r1). Useful for integer division and modulus. 7210 * 7211 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7212 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7213 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7214 */ 7215 /* binop/lit8 vAA, vBB, #+CC */ 7216 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7217 mov r9, rINST, lsr #8 @ r9<- AA 7218 and r2, r3, #255 @ r2<- BB 7219 GET_VREG(r0, r2) @ r0<- vBB 7220 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7221 .if 0 7222 @cmp r1, #0 @ is second operand zero? 7223 beq common_errDivideByZero 7224 .endif 7225 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7226 7227 @ optional op; may set condition codes 7228 mul r0, r1, r0 @ r0<- op, r0-r3 changed 7229 GET_INST_OPCODE(ip) @ extract opcode from rINST 7230 SET_VREG(r0, r9) @ vAA<- r0 7231 GOTO_OPCODE(ip) @ jump to next instruction 7232 /* 10-12 instructions */ 7233 7234 7235 7236/* ------------------------------ */ 7237 .balign 64 7238.L_OP_DIV_INT_LIT8: /* 0xdb */ 7239/* File: armv5te/OP_DIV_INT_LIT8.S */ 7240/* File: armv5te/binopLit8.S */ 7241 /* 7242 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7243 * that specifies an instruction that performs "result = r0 op r1". 7244 * This could be an ARM instruction or a function call. (If the result 7245 * comes back in a register other than r0, you can override "result".) 7246 * 7247 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7248 * vCC (r1). Useful for integer division and modulus. 7249 * 7250 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7251 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7252 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7253 */ 7254 /* binop/lit8 vAA, vBB, #+CC */ 7255 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7256 mov r9, rINST, lsr #8 @ r9<- AA 7257 and r2, r3, #255 @ r2<- BB 7258 GET_VREG(r0, r2) @ r0<- vBB 7259 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7260 .if 1 7261 @cmp r1, #0 @ is second operand zero? 7262 beq common_errDivideByZero 7263 .endif 7264 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7265 7266 @ optional op; may set condition codes 7267 bl __aeabi_idiv @ r0<- op, r0-r3 changed 7268 GET_INST_OPCODE(ip) @ extract opcode from rINST 7269 SET_VREG(r0, r9) @ vAA<- r0 7270 GOTO_OPCODE(ip) @ jump to next instruction 7271 /* 10-12 instructions */ 7272 7273 7274 7275/* ------------------------------ */ 7276 .balign 64 7277.L_OP_REM_INT_LIT8: /* 0xdc */ 7278/* File: armv5te/OP_REM_INT_LIT8.S */ 7279/* idivmod returns quotient in r0 and remainder in r1 */ 7280/* File: armv5te/binopLit8.S */ 7281 /* 7282 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7283 * that specifies an instruction that performs "result = r0 op r1". 7284 * This could be an ARM instruction or a function call. (If the result 7285 * comes back in a register other than r0, you can override "result".) 7286 * 7287 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7288 * vCC (r1). Useful for integer division and modulus. 7289 * 7290 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7291 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7292 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7293 */ 7294 /* binop/lit8 vAA, vBB, #+CC */ 7295 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7296 mov r9, rINST, lsr #8 @ r9<- AA 7297 and r2, r3, #255 @ r2<- BB 7298 GET_VREG(r0, r2) @ r0<- vBB 7299 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7300 .if 1 7301 @cmp r1, #0 @ is second operand zero? 7302 beq common_errDivideByZero 7303 .endif 7304 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7305 7306 @ optional op; may set condition codes 7307 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7308 GET_INST_OPCODE(ip) @ extract opcode from rINST 7309 SET_VREG(r1, r9) @ vAA<- r1 7310 GOTO_OPCODE(ip) @ jump to next instruction 7311 /* 10-12 instructions */ 7312 7313 7314 7315/* ------------------------------ */ 7316 .balign 64 7317.L_OP_AND_INT_LIT8: /* 0xdd */ 7318/* File: armv5te/OP_AND_INT_LIT8.S */ 7319/* File: armv5te/binopLit8.S */ 7320 /* 7321 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7322 * that specifies an instruction that performs "result = r0 op r1". 7323 * This could be an ARM instruction or a function call. (If the result 7324 * comes back in a register other than r0, you can override "result".) 7325 * 7326 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7327 * vCC (r1). Useful for integer division and modulus. 7328 * 7329 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7330 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7331 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7332 */ 7333 /* binop/lit8 vAA, vBB, #+CC */ 7334 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7335 mov r9, rINST, lsr #8 @ r9<- AA 7336 and r2, r3, #255 @ r2<- BB 7337 GET_VREG(r0, r2) @ r0<- vBB 7338 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7339 .if 0 7340 @cmp r1, #0 @ is second operand zero? 7341 beq common_errDivideByZero 7342 .endif 7343 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7344 7345 @ optional op; may set condition codes 7346 and r0, r0, r1 @ r0<- op, r0-r3 changed 7347 GET_INST_OPCODE(ip) @ extract opcode from rINST 7348 SET_VREG(r0, r9) @ vAA<- r0 7349 GOTO_OPCODE(ip) @ jump to next instruction 7350 /* 10-12 instructions */ 7351 7352 7353 7354/* ------------------------------ */ 7355 .balign 64 7356.L_OP_OR_INT_LIT8: /* 0xde */ 7357/* File: armv5te/OP_OR_INT_LIT8.S */ 7358/* File: armv5te/binopLit8.S */ 7359 /* 7360 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7361 * that specifies an instruction that performs "result = r0 op r1". 7362 * This could be an ARM instruction or a function call. (If the result 7363 * comes back in a register other than r0, you can override "result".) 7364 * 7365 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7366 * vCC (r1). Useful for integer division and modulus. 7367 * 7368 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7369 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7370 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7371 */ 7372 /* binop/lit8 vAA, vBB, #+CC */ 7373 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7374 mov r9, rINST, lsr #8 @ r9<- AA 7375 and r2, r3, #255 @ r2<- BB 7376 GET_VREG(r0, r2) @ r0<- vBB 7377 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7378 .if 0 7379 @cmp r1, #0 @ is second operand zero? 7380 beq common_errDivideByZero 7381 .endif 7382 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7383 7384 @ optional op; may set condition codes 7385 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7386 GET_INST_OPCODE(ip) @ extract opcode from rINST 7387 SET_VREG(r0, r9) @ vAA<- r0 7388 GOTO_OPCODE(ip) @ jump to next instruction 7389 /* 10-12 instructions */ 7390 7391 7392 7393/* ------------------------------ */ 7394 .balign 64 7395.L_OP_XOR_INT_LIT8: /* 0xdf */ 7396/* File: armv5te/OP_XOR_INT_LIT8.S */ 7397/* File: armv5te/binopLit8.S */ 7398 /* 7399 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7400 * that specifies an instruction that performs "result = r0 op r1". 7401 * This could be an ARM instruction or a function call. (If the result 7402 * comes back in a register other than r0, you can override "result".) 7403 * 7404 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7405 * vCC (r1). Useful for integer division and modulus. 7406 * 7407 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7408 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7409 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7410 */ 7411 /* binop/lit8 vAA, vBB, #+CC */ 7412 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7413 mov r9, rINST, lsr #8 @ r9<- AA 7414 and r2, r3, #255 @ r2<- BB 7415 GET_VREG(r0, r2) @ r0<- vBB 7416 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7417 .if 0 7418 @cmp r1, #0 @ is second operand zero? 7419 beq common_errDivideByZero 7420 .endif 7421 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7422 7423 @ optional op; may set condition codes 7424 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7425 GET_INST_OPCODE(ip) @ extract opcode from rINST 7426 SET_VREG(r0, r9) @ vAA<- r0 7427 GOTO_OPCODE(ip) @ jump to next instruction 7428 /* 10-12 instructions */ 7429 7430 7431 7432/* ------------------------------ */ 7433 .balign 64 7434.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7435/* File: armv5te/OP_SHL_INT_LIT8.S */ 7436/* File: armv5te/binopLit8.S */ 7437 /* 7438 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7439 * that specifies an instruction that performs "result = r0 op r1". 7440 * This could be an ARM instruction or a function call. (If the result 7441 * comes back in a register other than r0, you can override "result".) 7442 * 7443 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7444 * vCC (r1). Useful for integer division and modulus. 7445 * 7446 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7447 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7448 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7449 */ 7450 /* binop/lit8 vAA, vBB, #+CC */ 7451 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7452 mov r9, rINST, lsr #8 @ r9<- AA 7453 and r2, r3, #255 @ r2<- BB 7454 GET_VREG(r0, r2) @ r0<- vBB 7455 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7456 .if 0 7457 @cmp r1, #0 @ is second operand zero? 7458 beq common_errDivideByZero 7459 .endif 7460 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7461 7462 and r1, r1, #31 @ optional op; may set condition codes 7463 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7464 GET_INST_OPCODE(ip) @ extract opcode from rINST 7465 SET_VREG(r0, r9) @ vAA<- r0 7466 GOTO_OPCODE(ip) @ jump to next instruction 7467 /* 10-12 instructions */ 7468 7469 7470 7471/* ------------------------------ */ 7472 .balign 64 7473.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7474/* File: armv5te/OP_SHR_INT_LIT8.S */ 7475/* File: armv5te/binopLit8.S */ 7476 /* 7477 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7478 * that specifies an instruction that performs "result = r0 op r1". 7479 * This could be an ARM instruction or a function call. (If the result 7480 * comes back in a register other than r0, you can override "result".) 7481 * 7482 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7483 * vCC (r1). Useful for integer division and modulus. 7484 * 7485 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7486 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7487 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7488 */ 7489 /* binop/lit8 vAA, vBB, #+CC */ 7490 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7491 mov r9, rINST, lsr #8 @ r9<- AA 7492 and r2, r3, #255 @ r2<- BB 7493 GET_VREG(r0, r2) @ r0<- vBB 7494 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7495 .if 0 7496 @cmp r1, #0 @ is second operand zero? 7497 beq common_errDivideByZero 7498 .endif 7499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7500 7501 and r1, r1, #31 @ optional op; may set condition codes 7502 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7503 GET_INST_OPCODE(ip) @ extract opcode from rINST 7504 SET_VREG(r0, r9) @ vAA<- r0 7505 GOTO_OPCODE(ip) @ jump to next instruction 7506 /* 10-12 instructions */ 7507 7508 7509 7510/* ------------------------------ */ 7511 .balign 64 7512.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7513/* File: armv5te/OP_USHR_INT_LIT8.S */ 7514/* File: armv5te/binopLit8.S */ 7515 /* 7516 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7517 * that specifies an instruction that performs "result = r0 op r1". 7518 * This could be an ARM instruction or a function call. (If the result 7519 * comes back in a register other than r0, you can override "result".) 7520 * 7521 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7522 * vCC (r1). Useful for integer division and modulus. 7523 * 7524 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7525 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7526 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7527 */ 7528 /* binop/lit8 vAA, vBB, #+CC */ 7529 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7530 mov r9, rINST, lsr #8 @ r9<- AA 7531 and r2, r3, #255 @ r2<- BB 7532 GET_VREG(r0, r2) @ r0<- vBB 7533 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7534 .if 0 7535 @cmp r1, #0 @ is second operand zero? 7536 beq common_errDivideByZero 7537 .endif 7538 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7539 7540 and r1, r1, #31 @ optional op; may set condition codes 7541 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7542 GET_INST_OPCODE(ip) @ extract opcode from rINST 7543 SET_VREG(r0, r9) @ vAA<- r0 7544 GOTO_OPCODE(ip) @ jump to next instruction 7545 /* 10-12 instructions */ 7546 7547 7548 7549/* ------------------------------ */ 7550 .balign 64 7551.L_OP_UNUSED_E3: /* 0xe3 */ 7552/* File: armv5te/OP_UNUSED_E3.S */ 7553/* File: armv5te/unused.S */ 7554 bl common_abort 7555 7556 7557 7558/* ------------------------------ */ 7559 .balign 64 7560.L_OP_UNUSED_E4: /* 0xe4 */ 7561/* File: armv5te/OP_UNUSED_E4.S */ 7562/* File: armv5te/unused.S */ 7563 bl common_abort 7564 7565 7566 7567/* ------------------------------ */ 7568 .balign 64 7569.L_OP_UNUSED_E5: /* 0xe5 */ 7570/* File: armv5te/OP_UNUSED_E5.S */ 7571/* File: armv5te/unused.S */ 7572 bl common_abort 7573 7574 7575 7576/* ------------------------------ */ 7577 .balign 64 7578.L_OP_UNUSED_E6: /* 0xe6 */ 7579/* File: armv5te/OP_UNUSED_E6.S */ 7580/* File: armv5te/unused.S */ 7581 bl common_abort 7582 7583 7584 7585/* ------------------------------ */ 7586 .balign 64 7587.L_OP_UNUSED_E7: /* 0xe7 */ 7588/* File: armv5te/OP_UNUSED_E7.S */ 7589/* File: armv5te/unused.S */ 7590 bl common_abort 7591 7592 7593 7594/* ------------------------------ */ 7595 .balign 64 7596.L_OP_UNUSED_E8: /* 0xe8 */ 7597/* File: armv5te/OP_UNUSED_E8.S */ 7598/* File: armv5te/unused.S */ 7599 bl common_abort 7600 7601 7602 7603/* ------------------------------ */ 7604 .balign 64 7605.L_OP_UNUSED_E9: /* 0xe9 */ 7606/* File: armv5te/OP_UNUSED_E9.S */ 7607/* File: armv5te/unused.S */ 7608 bl common_abort 7609 7610 7611 7612/* ------------------------------ */ 7613 .balign 64 7614.L_OP_UNUSED_EA: /* 0xea */ 7615/* File: armv5te/OP_UNUSED_EA.S */ 7616/* File: armv5te/unused.S */ 7617 bl common_abort 7618 7619 7620 7621/* ------------------------------ */ 7622 .balign 64 7623.L_OP_UNUSED_EB: /* 0xeb */ 7624/* File: armv5te/OP_UNUSED_EB.S */ 7625/* File: armv5te/unused.S */ 7626 bl common_abort 7627 7628 7629 7630/* ------------------------------ */ 7631 .balign 64 7632.L_OP_BREAKPOINT: /* 0xec */ 7633/* File: armv5te/OP_BREAKPOINT.S */ 7634/* File: armv5te/unused.S */ 7635 bl common_abort 7636 7637 7638 7639/* ------------------------------ */ 7640 .balign 64 7641.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7642/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7643 /* 7644 * Handle a throw-verification-error instruction. This throws an 7645 * exception for an error discovered during verification. The 7646 * exception is indicated by AA, with some detail provided by BBBB. 7647 */ 7648 /* op AA, ref@BBBB */ 7649 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7650 FETCH(r2, 1) @ r2<- BBBB 7651 EXPORT_PC() @ export the PC 7652 mov r1, rINST, lsr #8 @ r1<- AA 7653 bl dvmThrowVerificationError @ always throws 7654 b common_exceptionThrown @ handle exception 7655 7656 7657/* ------------------------------ */ 7658 .balign 64 7659.L_OP_EXECUTE_INLINE: /* 0xee */ 7660/* File: armv5te/OP_EXECUTE_INLINE.S */ 7661 /* 7662 * Execute a "native inline" instruction. 7663 * 7664 * We need to call an InlineOp4Func: 7665 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7666 * 7667 * The first four args are in r0-r3, pointer to return value storage 7668 * is on the stack. The function's return value is a flag that tells 7669 * us if an exception was thrown. 7670 */ 7671 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7672 FETCH(r10, 1) @ r10<- BBBB 7673 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7674 EXPORT_PC() @ can throw 7675 sub sp, sp, #8 @ make room for arg, +64 bit align 7676 mov r0, rINST, lsr #12 @ r0<- B 7677 str r1, [sp] @ push &glue->retval 7678 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7679 add sp, sp, #8 @ pop stack 7680 cmp r0, #0 @ test boolean result of inline 7681 beq common_exceptionThrown @ returned false, handle exception 7682 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7683 GET_INST_OPCODE(ip) @ extract opcode from rINST 7684 GOTO_OPCODE(ip) @ jump to next instruction 7685 7686/* ------------------------------ */ 7687 .balign 64 7688.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7689/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7690 /* 7691 * Execute a "native inline" instruction, using "/range" semantics. 7692 * Same idea as execute-inline, but we get the args differently. 7693 * 7694 * We need to call an InlineOp4Func: 7695 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7696 * 7697 * The first four args are in r0-r3, pointer to return value storage 7698 * is on the stack. The function's return value is a flag that tells 7699 * us if an exception was thrown. 7700 */ 7701 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7702 FETCH(r10, 1) @ r10<- BBBB 7703 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7704 EXPORT_PC() @ can throw 7705 sub sp, sp, #8 @ make room for arg, +64 bit align 7706 mov r0, rINST, lsr #8 @ r0<- AA 7707 str r1, [sp] @ push &glue->retval 7708 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7709 add sp, sp, #8 @ pop stack 7710 cmp r0, #0 @ test boolean result of inline 7711 beq common_exceptionThrown @ returned false, handle exception 7712 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7713 GET_INST_OPCODE(ip) @ extract opcode from rINST 7714 GOTO_OPCODE(ip) @ jump to next instruction 7715 7716/* ------------------------------ */ 7717 .balign 64 7718.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7719/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7720 /* 7721 * invoke-direct-empty is a no-op in a "standard" interpreter. 7722 */ 7723 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7724 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7725 GOTO_OPCODE(ip) @ execute it 7726 7727/* ------------------------------ */ 7728 .balign 64 7729.L_OP_UNUSED_F1: /* 0xf1 */ 7730/* File: armv5te/OP_UNUSED_F1.S */ 7731/* File: armv5te/unused.S */ 7732 bl common_abort 7733 7734 7735 7736/* ------------------------------ */ 7737 .balign 64 7738.L_OP_IGET_QUICK: /* 0xf2 */ 7739/* File: armv5te/OP_IGET_QUICK.S */ 7740 /* For: iget-quick, iget-object-quick */ 7741 /* op vA, vB, offset@CCCC */ 7742 mov r2, rINST, lsr #12 @ r2<- B 7743 GET_VREG(r3, r2) @ r3<- object we're operating on 7744 FETCH(r1, 1) @ r1<- field byte offset 7745 cmp r3, #0 @ check object for null 7746 mov r2, rINST, lsr #8 @ r2<- A(+) 7747 beq common_errNullObject @ object was null 7748 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7750 and r2, r2, #15 7751 GET_INST_OPCODE(ip) @ extract opcode from rINST 7752 SET_VREG(r0, r2) @ fp[A]<- r0 7753 GOTO_OPCODE(ip) @ jump to next instruction 7754 7755 7756/* ------------------------------ */ 7757 .balign 64 7758.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7759/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7760 /* iget-wide-quick vA, vB, offset@CCCC */ 7761 mov r2, rINST, lsr #12 @ r2<- B 7762 GET_VREG(r3, r2) @ r3<- object we're operating on 7763 FETCH(r1, 1) @ r1<- field byte offset 7764 cmp r3, #0 @ check object for null 7765 mov r2, rINST, lsr #8 @ r2<- A(+) 7766 beq common_errNullObject @ object was null 7767 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned) 7768 and r2, r2, #15 7769 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7770 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7771 GET_INST_OPCODE(ip) @ extract opcode from rINST 7772 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7773 GOTO_OPCODE(ip) @ jump to next instruction 7774 7775 7776/* ------------------------------ */ 7777 .balign 64 7778.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7779/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7780/* File: armv5te/OP_IGET_QUICK.S */ 7781 /* For: iget-quick, iget-object-quick */ 7782 /* op vA, vB, offset@CCCC */ 7783 mov r2, rINST, lsr #12 @ r2<- B 7784 GET_VREG(r3, r2) @ r3<- object we're operating on 7785 FETCH(r1, 1) @ r1<- field byte offset 7786 cmp r3, #0 @ check object for null 7787 mov r2, rINST, lsr #8 @ r2<- A(+) 7788 beq common_errNullObject @ object was null 7789 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7790 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7791 and r2, r2, #15 7792 GET_INST_OPCODE(ip) @ extract opcode from rINST 7793 SET_VREG(r0, r2) @ fp[A]<- r0 7794 GOTO_OPCODE(ip) @ jump to next instruction 7795 7796 7797 7798/* ------------------------------ */ 7799 .balign 64 7800.L_OP_IPUT_QUICK: /* 0xf5 */ 7801/* File: armv5te/OP_IPUT_QUICK.S */ 7802 /* For: iput-quick, iput-object-quick */ 7803 /* op vA, vB, offset@CCCC */ 7804 mov r2, rINST, lsr #12 @ r2<- B 7805 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7806 FETCH(r1, 1) @ r1<- field byte offset 7807 cmp r3, #0 @ check object for null 7808 mov r2, rINST, lsr #8 @ r2<- A(+) 7809 beq common_errNullObject @ object was null 7810 and r2, r2, #15 7811 GET_VREG(r0, r2) @ r0<- fp[A] 7812 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7813 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7814 GET_INST_OPCODE(ip) @ extract opcode from rINST 7815 GOTO_OPCODE(ip) @ jump to next instruction 7816 7817 7818/* ------------------------------ */ 7819 .balign 64 7820.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7821/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7822 /* iput-wide-quick vA, vB, offset@CCCC */ 7823 mov r0, rINST, lsr #8 @ r0<- A(+) 7824 mov r1, rINST, lsr #12 @ r1<- B 7825 and r0, r0, #15 7826 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7827 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7828 cmp r2, #0 @ check object for null 7829 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7830 beq common_errNullObject @ object was null 7831 FETCH(r3, 1) @ r3<- field byte offset 7832 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7833 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7834 GET_INST_OPCODE(ip) @ extract opcode from rINST 7835 GOTO_OPCODE(ip) @ jump to next instruction 7836 7837 7838/* ------------------------------ */ 7839 .balign 64 7840.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7841/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7842/* File: armv5te/OP_IPUT_QUICK.S */ 7843 /* For: iput-quick, iput-object-quick */ 7844 /* op vA, vB, offset@CCCC */ 7845 mov r2, rINST, lsr #12 @ r2<- B 7846 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7847 FETCH(r1, 1) @ r1<- field byte offset 7848 cmp r3, #0 @ check object for null 7849 mov r2, rINST, lsr #8 @ r2<- A(+) 7850 beq common_errNullObject @ object was null 7851 and r2, r2, #15 7852 GET_VREG(r0, r2) @ r0<- fp[A] 7853 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7854 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7855 GET_INST_OPCODE(ip) @ extract opcode from rINST 7856 GOTO_OPCODE(ip) @ jump to next instruction 7857 7858 7859 7860/* ------------------------------ */ 7861 .balign 64 7862.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7863/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7864 /* 7865 * Handle an optimized virtual method call. 7866 * 7867 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7868 */ 7869 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7870 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7871 FETCH(r3, 2) @ r3<- FEDC or CCCC 7872 FETCH(r1, 1) @ r1<- BBBB 7873 .if (!0) 7874 and r3, r3, #15 @ r3<- C (or stays CCCC) 7875 .endif 7876 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7877 cmp r2, #0 @ is "this" null? 7878 beq common_errNullObject @ null "this", throw exception 7879 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7880 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7881 EXPORT_PC() @ invoke must export 7882 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7883 bl common_invokeMethodNoRange @ continue on 7884 7885/* ------------------------------ */ 7886 .balign 64 7887.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7888/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7889/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7890 /* 7891 * Handle an optimized virtual method call. 7892 * 7893 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7894 */ 7895 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7896 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7897 FETCH(r3, 2) @ r3<- FEDC or CCCC 7898 FETCH(r1, 1) @ r1<- BBBB 7899 .if (!1) 7900 and r3, r3, #15 @ r3<- C (or stays CCCC) 7901 .endif 7902 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7903 cmp r2, #0 @ is "this" null? 7904 beq common_errNullObject @ null "this", throw exception 7905 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7906 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7907 EXPORT_PC() @ invoke must export 7908 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7909 bl common_invokeMethodRange @ continue on 7910 7911 7912/* ------------------------------ */ 7913 .balign 64 7914.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7915/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7916 /* 7917 * Handle an optimized "super" method call. 7918 * 7919 * for: [opt] invoke-super-quick, invoke-super-quick/range 7920 */ 7921 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7922 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7923 FETCH(r10, 2) @ r10<- GFED or CCCC 7924 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7925 .if (!0) 7926 and r10, r10, #15 @ r10<- D (or stays CCCC) 7927 .endif 7928 FETCH(r1, 1) @ r1<- BBBB 7929 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7930 EXPORT_PC() @ must export for invoke 7931 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7932 GET_VREG(r3, r10) @ r3<- "this" 7933 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7934 cmp r3, #0 @ null "this" ref? 7935 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7936 beq common_errNullObject @ "this" is null, throw exception 7937 bl common_invokeMethodNoRange @ continue on 7938 7939 7940/* ------------------------------ */ 7941 .balign 64 7942.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7943/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7944/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7945 /* 7946 * Handle an optimized "super" method call. 7947 * 7948 * for: [opt] invoke-super-quick, invoke-super-quick/range 7949 */ 7950 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7951 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7952 FETCH(r10, 2) @ r10<- GFED or CCCC 7953 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7954 .if (!1) 7955 and r10, r10, #15 @ r10<- D (or stays CCCC) 7956 .endif 7957 FETCH(r1, 1) @ r1<- BBBB 7958 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7959 EXPORT_PC() @ must export for invoke 7960 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7961 GET_VREG(r3, r10) @ r3<- "this" 7962 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7963 cmp r3, #0 @ null "this" ref? 7964 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7965 beq common_errNullObject @ "this" is null, throw exception 7966 bl common_invokeMethodRange @ continue on 7967 7968 7969 7970/* ------------------------------ */ 7971 .balign 64 7972.L_OP_UNUSED_FC: /* 0xfc */ 7973/* File: armv5te/OP_UNUSED_FC.S */ 7974/* File: armv5te/unused.S */ 7975 bl common_abort 7976 7977 7978 7979/* ------------------------------ */ 7980 .balign 64 7981.L_OP_UNUSED_FD: /* 0xfd */ 7982/* File: armv5te/OP_UNUSED_FD.S */ 7983/* File: armv5te/unused.S */ 7984 bl common_abort 7985 7986 7987 7988/* ------------------------------ */ 7989 .balign 64 7990.L_OP_UNUSED_FE: /* 0xfe */ 7991/* File: armv5te/OP_UNUSED_FE.S */ 7992/* File: armv5te/unused.S */ 7993 bl common_abort 7994 7995 7996 7997/* ------------------------------ */ 7998 .balign 64 7999.L_OP_UNUSED_FF: /* 0xff */ 8000/* File: armv5te/OP_UNUSED_FF.S */ 8001/* File: armv5te/unused.S */ 8002 bl common_abort 8003 8004 8005 8006 8007 .balign 64 8008 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 8009 .global dvmAsmInstructionEnd 8010dvmAsmInstructionEnd: 8011 8012/* 8013 * =========================================================================== 8014 * Sister implementations 8015 * =========================================================================== 8016 */ 8017 .global dvmAsmSisterStart 8018 .type dvmAsmSisterStart, %function 8019 .text 8020 .balign 4 8021dvmAsmSisterStart: 8022 8023/* continuation for OP_CONST_STRING */ 8024 8025 /* 8026 * Continuation if the String has not yet been resolved. 8027 * r1: BBBB (String ref) 8028 * r9: target register 8029 */ 8030.LOP_CONST_STRING_resolve: 8031 EXPORT_PC() 8032 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8033 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8034 bl dvmResolveString @ r0<- String reference 8035 cmp r0, #0 @ failed? 8036 beq common_exceptionThrown @ yup, handle the exception 8037 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8038 GET_INST_OPCODE(ip) @ extract opcode from rINST 8039 SET_VREG(r0, r9) @ vAA<- r0 8040 GOTO_OPCODE(ip) @ jump to next instruction 8041 8042 8043/* continuation for OP_CONST_STRING_JUMBO */ 8044 8045 /* 8046 * Continuation if the String has not yet been resolved. 8047 * r1: BBBBBBBB (String ref) 8048 * r9: target register 8049 */ 8050.LOP_CONST_STRING_JUMBO_resolve: 8051 EXPORT_PC() 8052 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8053 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8054 bl dvmResolveString @ r0<- String reference 8055 cmp r0, #0 @ failed? 8056 beq common_exceptionThrown @ yup, handle the exception 8057 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 8058 GET_INST_OPCODE(ip) @ extract opcode from rINST 8059 SET_VREG(r0, r9) @ vAA<- r0 8060 GOTO_OPCODE(ip) @ jump to next instruction 8061 8062 8063/* continuation for OP_CONST_CLASS */ 8064 8065 /* 8066 * Continuation if the Class has not yet been resolved. 8067 * r1: BBBB (Class ref) 8068 * r9: target register 8069 */ 8070.LOP_CONST_CLASS_resolve: 8071 EXPORT_PC() 8072 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8073 mov r2, #1 @ r2<- true 8074 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8075 bl dvmResolveClass @ r0<- Class reference 8076 cmp r0, #0 @ failed? 8077 beq common_exceptionThrown @ yup, handle the exception 8078 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8079 GET_INST_OPCODE(ip) @ extract opcode from rINST 8080 SET_VREG(r0, r9) @ vAA<- r0 8081 GOTO_OPCODE(ip) @ jump to next instruction 8082 8083 8084/* continuation for OP_CHECK_CAST */ 8085 8086 /* 8087 * Trivial test failed, need to perform full check. This is common. 8088 * r0 holds obj->clazz 8089 * r1 holds class resolved from BBBB 8090 * r9 holds object 8091 */ 8092.LOP_CHECK_CAST_fullcheck: 8093 bl dvmInstanceofNonTrivial @ r0<- boolean result 8094 cmp r0, #0 @ failed? 8095 bne .LOP_CHECK_CAST_okay @ no, success 8096 8097 @ A cast has failed. We need to throw a ClassCastException with the 8098 @ class of the object that failed to be cast. 8099 EXPORT_PC() @ about to throw 8100 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 8101 ldr r0, .LstrClassCastExceptionPtr 8102 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 8103 bl dvmThrowExceptionWithClassMessage 8104 b common_exceptionThrown 8105 8106 /* 8107 * Resolution required. This is the least-likely path. 8108 * 8109 * r2 holds BBBB 8110 * r9 holds object 8111 */ 8112.LOP_CHECK_CAST_resolve: 8113 EXPORT_PC() @ resolve() could throw 8114 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8115 mov r1, r2 @ r1<- BBBB 8116 mov r2, #0 @ r2<- false 8117 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8118 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8119 cmp r0, #0 @ got null? 8120 beq common_exceptionThrown @ yes, handle exception 8121 mov r1, r0 @ r1<- class resolved from BBB 8122 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8123 b .LOP_CHECK_CAST_resolved @ pick up where we left off 8124 8125.LstrClassCastExceptionPtr: 8126 .word .LstrClassCastException 8127 8128 8129/* continuation for OP_INSTANCE_OF */ 8130 8131 /* 8132 * Trivial test failed, need to perform full check. This is common. 8133 * r0 holds obj->clazz 8134 * r1 holds class resolved from BBBB 8135 * r9 holds A 8136 */ 8137.LOP_INSTANCE_OF_fullcheck: 8138 bl dvmInstanceofNonTrivial @ r0<- boolean result 8139 @ fall through to OP_INSTANCE_OF_store 8140 8141 /* 8142 * r0 holds boolean result 8143 * r9 holds A 8144 */ 8145.LOP_INSTANCE_OF_store: 8146 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8147 SET_VREG(r0, r9) @ vA<- r0 8148 GET_INST_OPCODE(ip) @ extract opcode from rINST 8149 GOTO_OPCODE(ip) @ jump to next instruction 8150 8151 /* 8152 * Trivial test succeeded, save and bail. 8153 * r9 holds A 8154 */ 8155.LOP_INSTANCE_OF_trivial: 8156 mov r0, #1 @ indicate success 8157 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 8158 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8159 SET_VREG(r0, r9) @ vA<- r0 8160 GET_INST_OPCODE(ip) @ extract opcode from rINST 8161 GOTO_OPCODE(ip) @ jump to next instruction 8162 8163 /* 8164 * Resolution required. This is the least-likely path. 8165 * 8166 * r3 holds BBBB 8167 * r9 holds A 8168 */ 8169.LOP_INSTANCE_OF_resolve: 8170 EXPORT_PC() @ resolve() could throw 8171 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8172 mov r1, r3 @ r1<- BBBB 8173 mov r2, #1 @ r2<- true 8174 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8175 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8176 cmp r0, #0 @ got null? 8177 beq common_exceptionThrown @ yes, handle exception 8178 mov r1, r0 @ r1<- class resolved from BBB 8179 mov r3, rINST, lsr #12 @ r3<- B 8180 GET_VREG(r0, r3) @ r0<- vB (object) 8181 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 8182 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 8183 8184 8185/* continuation for OP_NEW_INSTANCE */ 8186 8187 .balign 32 @ minimize cache lines 8188.LOP_NEW_INSTANCE_finish: @ r0=new object 8189 mov r3, rINST, lsr #8 @ r3<- AA 8190 cmp r0, #0 @ failed? 8191 beq common_exceptionThrown @ yes, handle the exception 8192 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8193 GET_INST_OPCODE(ip) @ extract opcode from rINST 8194 SET_VREG(r0, r3) @ vAA<- r0 8195 GOTO_OPCODE(ip) @ jump to next instruction 8196 8197 /* 8198 * Class initialization required. 8199 * 8200 * r0 holds class object 8201 */ 8202.LOP_NEW_INSTANCE_needinit: 8203 mov r9, r0 @ save r0 8204 bl dvmInitClass @ initialize class 8205 cmp r0, #0 @ check boolean result 8206 mov r0, r9 @ restore r0 8207 bne .LOP_NEW_INSTANCE_initialized @ success, continue 8208 b common_exceptionThrown @ failed, deal with init exception 8209 8210 /* 8211 * Resolution required. This is the least-likely path. 8212 * 8213 * r1 holds BBBB 8214 */ 8215.LOP_NEW_INSTANCE_resolve: 8216 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8217 mov r2, #0 @ r2<- false 8218 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8219 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8220 cmp r0, #0 @ got null? 8221 bne .LOP_NEW_INSTANCE_resolved @ no, continue 8222 b common_exceptionThrown @ yes, handle exception 8223 8224.LstrInstantiationErrorPtr: 8225 .word .LstrInstantiationError 8226 8227 8228/* continuation for OP_NEW_ARRAY */ 8229 8230 8231 /* 8232 * Resolve class. (This is an uncommon case.) 8233 * 8234 * r1 holds array length 8235 * r2 holds class ref CCCC 8236 */ 8237.LOP_NEW_ARRAY_resolve: 8238 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8239 mov r9, r1 @ r9<- length (save) 8240 mov r1, r2 @ r1<- CCCC 8241 mov r2, #0 @ r2<- false 8242 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8243 bl dvmResolveClass @ r0<- call(clazz, ref) 8244 cmp r0, #0 @ got null? 8245 mov r1, r9 @ r1<- length (restore) 8246 beq common_exceptionThrown @ yes, handle exception 8247 @ fall through to OP_NEW_ARRAY_finish 8248 8249 /* 8250 * Finish allocation. 8251 * 8252 * r0 holds class 8253 * r1 holds array length 8254 */ 8255.LOP_NEW_ARRAY_finish: 8256 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8257 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8258 cmp r0, #0 @ failed? 8259 mov r2, rINST, lsr #8 @ r2<- A+ 8260 beq common_exceptionThrown @ yes, handle the exception 8261 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8262 and r2, r2, #15 @ r2<- A 8263 GET_INST_OPCODE(ip) @ extract opcode from rINST 8264 SET_VREG(r0, r2) @ vA<- r0 8265 GOTO_OPCODE(ip) @ jump to next instruction 8266 8267 8268/* continuation for OP_FILLED_NEW_ARRAY */ 8269 8270 /* 8271 * On entry: 8272 * r0 holds array class 8273 * r10 holds AA or BA 8274 */ 8275.LOP_FILLED_NEW_ARRAY_continue: 8276 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8277 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8278 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8279 .if 0 8280 mov r1, r10 @ r1<- AA (length) 8281 .else 8282 mov r1, r10, lsr #4 @ r1<- B (length) 8283 .endif 8284 cmp r3, #'I' @ array of ints? 8285 cmpne r3, #'L' @ array of objects? 8286 cmpne r3, #'[' @ array of arrays? 8287 mov r9, r1 @ save length in r9 8288 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8289 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8290 cmp r0, #0 @ null return? 8291 beq common_exceptionThrown @ alloc failed, handle exception 8292 8293 FETCH(r1, 2) @ r1<- FEDC or CCCC 8294 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8295 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8296 subs r9, r9, #1 @ length--, check for neg 8297 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8298 bmi 2f @ was zero, bail 8299 8300 @ copy values from registers into the array 8301 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8302 .if 0 8303 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 83041: ldr r3, [r2], #4 @ r3<- *r2++ 8305 subs r9, r9, #1 @ count-- 8306 str r3, [r0], #4 @ *contents++ = vX 8307 bpl 1b 8308 @ continue at 2 8309 .else 8310 cmp r9, #4 @ length was initially 5? 8311 and r2, r10, #15 @ r2<- A 8312 bne 1f @ <= 4 args, branch 8313 GET_VREG(r3, r2) @ r3<- vA 8314 sub r9, r9, #1 @ count-- 8315 str r3, [r0, #16] @ contents[4] = vA 83161: and r2, r1, #15 @ r2<- F/E/D/C 8317 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8318 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8319 subs r9, r9, #1 @ count-- 8320 str r3, [r0], #4 @ *contents++ = vX 8321 bpl 1b 8322 @ continue at 2 8323 .endif 8324 83252: 8326 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8327 GOTO_OPCODE(ip) @ execute it 8328 8329 /* 8330 * Throw an exception indicating that we have not implemented this 8331 * mode of filled-new-array. 8332 */ 8333.LOP_FILLED_NEW_ARRAY_notimpl: 8334 ldr r0, .L_strInternalError 8335 ldr r1, .L_strFilledNewArrayNotImpl 8336 bl dvmThrowException 8337 b common_exceptionThrown 8338 8339 .if (!0) @ define in one or the other, not both 8340.L_strFilledNewArrayNotImpl: 8341 .word .LstrFilledNewArrayNotImpl 8342.L_strInternalError: 8343 .word .LstrInternalError 8344 .endif 8345 8346 8347/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8348 8349 /* 8350 * On entry: 8351 * r0 holds array class 8352 * r10 holds AA or BA 8353 */ 8354.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8355 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8356 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8357 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8358 .if 1 8359 mov r1, r10 @ r1<- AA (length) 8360 .else 8361 mov r1, r10, lsr #4 @ r1<- B (length) 8362 .endif 8363 cmp r3, #'I' @ array of ints? 8364 cmpne r3, #'L' @ array of objects? 8365 cmpne r3, #'[' @ array of arrays? 8366 mov r9, r1 @ save length in r9 8367 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8368 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8369 cmp r0, #0 @ null return? 8370 beq common_exceptionThrown @ alloc failed, handle exception 8371 8372 FETCH(r1, 2) @ r1<- FEDC or CCCC 8373 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8374 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8375 subs r9, r9, #1 @ length--, check for neg 8376 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8377 bmi 2f @ was zero, bail 8378 8379 @ copy values from registers into the array 8380 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8381 .if 1 8382 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 83831: ldr r3, [r2], #4 @ r3<- *r2++ 8384 subs r9, r9, #1 @ count-- 8385 str r3, [r0], #4 @ *contents++ = vX 8386 bpl 1b 8387 @ continue at 2 8388 .else 8389 cmp r9, #4 @ length was initially 5? 8390 and r2, r10, #15 @ r2<- A 8391 bne 1f @ <= 4 args, branch 8392 GET_VREG(r3, r2) @ r3<- vA 8393 sub r9, r9, #1 @ count-- 8394 str r3, [r0, #16] @ contents[4] = vA 83951: and r2, r1, #15 @ r2<- F/E/D/C 8396 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8397 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8398 subs r9, r9, #1 @ count-- 8399 str r3, [r0], #4 @ *contents++ = vX 8400 bpl 1b 8401 @ continue at 2 8402 .endif 8403 84042: 8405 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8406 GOTO_OPCODE(ip) @ execute it 8407 8408 /* 8409 * Throw an exception indicating that we have not implemented this 8410 * mode of filled-new-array. 8411 */ 8412.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8413 ldr r0, .L_strInternalError 8414 ldr r1, .L_strFilledNewArrayNotImpl 8415 bl dvmThrowException 8416 b common_exceptionThrown 8417 8418 .if (!1) @ define in one or the other, not both 8419.L_strFilledNewArrayNotImpl: 8420 .word .LstrFilledNewArrayNotImpl 8421.L_strInternalError: 8422 .word .LstrInternalError 8423 .endif 8424 8425 8426/* continuation for OP_CMPL_FLOAT */ 8427 8428 @ Test for NaN with a second comparison. EABI forbids testing bit 8429 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8430 @ make the library call. 8431.LOP_CMPL_FLOAT_gt_or_nan: 8432 mov r1, r9 @ reverse order 8433 mov r0, r10 8434 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8435 @bleq common_abort 8436 movcc r1, #1 @ (greater than) r1<- 1 8437 bcc .LOP_CMPL_FLOAT_finish 8438 mvn r1, #0 @ r1<- 1 or -1 for NaN 8439 b .LOP_CMPL_FLOAT_finish 8440 8441 8442#if 0 /* "clasic" form */ 8443 FETCH(r0, 1) @ r0<- CCBB 8444 and r2, r0, #255 @ r2<- BB 8445 mov r3, r0, lsr #8 @ r3<- CC 8446 GET_VREG(r9, r2) @ r9<- vBB 8447 GET_VREG(r10, r3) @ r10<- vCC 8448 mov r0, r9 @ r0<- vBB 8449 mov r1, r10 @ r1<- vCC 8450 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8451 cmp r0, #0 @ equal? 8452 movne r1, #0 @ yes, result is 0 8453 bne OP_CMPL_FLOAT_finish 8454 mov r0, r9 @ r0<- vBB 8455 mov r1, r10 @ r1<- vCC 8456 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8457 cmp r0, #0 @ less than? 8458 b OP_CMPL_FLOAT_continue 8459@%break 8460 8461OP_CMPL_FLOAT_continue: 8462 mvnne r1, #0 @ yes, result is -1 8463 bne OP_CMPL_FLOAT_finish 8464 mov r0, r9 @ r0<- vBB 8465 mov r1, r10 @ r1<- vCC 8466 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8467 cmp r0, #0 @ greater than? 8468 beq OP_CMPL_FLOAT_nan @ no, must be NaN 8469 mov r1, #1 @ yes, result is 1 8470 @ fall through to _finish 8471 8472OP_CMPL_FLOAT_finish: 8473 mov r3, rINST, lsr #8 @ r3<- AA 8474 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8475 SET_VREG(r1, r3) @ vAA<- r1 8476 GET_INST_OPCODE(ip) @ extract opcode from rINST 8477 GOTO_OPCODE(ip) @ jump to next instruction 8478 8479 /* 8480 * This is expected to be uncommon, so we double-branch (once to here, 8481 * again back to _finish). 8482 */ 8483OP_CMPL_FLOAT_nan: 8484 mvn r1, #0 @ r1<- 1 or -1 for NaN 8485 b OP_CMPL_FLOAT_finish 8486 8487#endif 8488 8489 8490/* continuation for OP_CMPG_FLOAT */ 8491 8492 @ Test for NaN with a second comparison. EABI forbids testing bit 8493 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8494 @ make the library call. 8495.LOP_CMPG_FLOAT_gt_or_nan: 8496 mov r1, r9 @ reverse order 8497 mov r0, r10 8498 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8499 @bleq common_abort 8500 movcc r1, #1 @ (greater than) r1<- 1 8501 bcc .LOP_CMPG_FLOAT_finish 8502 mov r1, #1 @ r1<- 1 or -1 for NaN 8503 b .LOP_CMPG_FLOAT_finish 8504 8505 8506#if 0 /* "clasic" form */ 8507 FETCH(r0, 1) @ r0<- CCBB 8508 and r2, r0, #255 @ r2<- BB 8509 mov r3, r0, lsr #8 @ r3<- CC 8510 GET_VREG(r9, r2) @ r9<- vBB 8511 GET_VREG(r10, r3) @ r10<- vCC 8512 mov r0, r9 @ r0<- vBB 8513 mov r1, r10 @ r1<- vCC 8514 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8515 cmp r0, #0 @ equal? 8516 movne r1, #0 @ yes, result is 0 8517 bne OP_CMPG_FLOAT_finish 8518 mov r0, r9 @ r0<- vBB 8519 mov r1, r10 @ r1<- vCC 8520 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8521 cmp r0, #0 @ less than? 8522 b OP_CMPG_FLOAT_continue 8523@%break 8524 8525OP_CMPG_FLOAT_continue: 8526 mvnne r1, #0 @ yes, result is -1 8527 bne OP_CMPG_FLOAT_finish 8528 mov r0, r9 @ r0<- vBB 8529 mov r1, r10 @ r1<- vCC 8530 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8531 cmp r0, #0 @ greater than? 8532 beq OP_CMPG_FLOAT_nan @ no, must be NaN 8533 mov r1, #1 @ yes, result is 1 8534 @ fall through to _finish 8535 8536OP_CMPG_FLOAT_finish: 8537 mov r3, rINST, lsr #8 @ r3<- AA 8538 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8539 SET_VREG(r1, r3) @ vAA<- r1 8540 GET_INST_OPCODE(ip) @ extract opcode from rINST 8541 GOTO_OPCODE(ip) @ jump to next instruction 8542 8543 /* 8544 * This is expected to be uncommon, so we double-branch (once to here, 8545 * again back to _finish). 8546 */ 8547OP_CMPG_FLOAT_nan: 8548 mov r1, #1 @ r1<- 1 or -1 for NaN 8549 b OP_CMPG_FLOAT_finish 8550 8551#endif 8552 8553 8554/* continuation for OP_CMPL_DOUBLE */ 8555 8556 @ Test for NaN with a second comparison. EABI forbids testing bit 8557 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8558 @ make the library call. 8559.LOP_CMPL_DOUBLE_gt_or_nan: 8560 ldmia r10, {r0-r1} @ reverse order 8561 ldmia r9, {r2-r3} 8562 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8563 @bleq common_abort 8564 movcc r1, #1 @ (greater than) r1<- 1 8565 bcc .LOP_CMPL_DOUBLE_finish 8566 mvn r1, #0 @ r1<- 1 or -1 for NaN 8567 b .LOP_CMPL_DOUBLE_finish 8568 8569 8570/* continuation for OP_CMPG_DOUBLE */ 8571 8572 @ Test for NaN with a second comparison. EABI forbids testing bit 8573 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8574 @ make the library call. 8575.LOP_CMPG_DOUBLE_gt_or_nan: 8576 ldmia r10, {r0-r1} @ reverse order 8577 ldmia r9, {r2-r3} 8578 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8579 @bleq common_abort 8580 movcc r1, #1 @ (greater than) r1<- 1 8581 bcc .LOP_CMPG_DOUBLE_finish 8582 mov r1, #1 @ r1<- 1 or -1 for NaN 8583 b .LOP_CMPG_DOUBLE_finish 8584 8585 8586/* continuation for OP_CMP_LONG */ 8587 8588.LOP_CMP_LONG_less: 8589 mvn r1, #0 @ r1<- -1 8590 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8591 @ instead, we just replicate the tail end. 8592 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8593 SET_VREG(r1, r9) @ vAA<- r1 8594 GET_INST_OPCODE(ip) @ extract opcode from rINST 8595 GOTO_OPCODE(ip) @ jump to next instruction 8596 8597.LOP_CMP_LONG_greater: 8598 mov r1, #1 @ r1<- 1 8599 @ fall through to _finish 8600 8601.LOP_CMP_LONG_finish: 8602 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8603 SET_VREG(r1, r9) @ vAA<- r1 8604 GET_INST_OPCODE(ip) @ extract opcode from rINST 8605 GOTO_OPCODE(ip) @ jump to next instruction 8606 8607 8608/* continuation for OP_AGET_WIDE */ 8609 8610.LOP_AGET_WIDE_finish: 8611 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8612 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8613 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8614 GET_INST_OPCODE(ip) @ extract opcode from rINST 8615 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8616 GOTO_OPCODE(ip) @ jump to next instruction 8617 8618 8619/* continuation for OP_APUT_WIDE */ 8620 8621.LOP_APUT_WIDE_finish: 8622 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8623 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8624 GET_INST_OPCODE(ip) @ extract opcode from rINST 8625 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8626 GOTO_OPCODE(ip) @ jump to next instruction 8627 8628 8629/* continuation for OP_APUT_OBJECT */ 8630 /* 8631 * On entry: 8632 * r1 = vBB (arrayObj) 8633 * r9 = vAA (obj) 8634 * r10 = offset into array (vBB + vCC * width) 8635 */ 8636.LOP_APUT_OBJECT_finish: 8637 cmp r9, #0 @ storing null reference? 8638 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8639 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8640 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8641 bl dvmCanPutArrayElement @ test object type vs. array type 8642 cmp r0, #0 @ okay? 8643 beq common_errArrayStore @ no 8644.LOP_APUT_OBJECT_skip_check: 8645 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8646 GET_INST_OPCODE(ip) @ extract opcode from rINST 8647 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8648 GOTO_OPCODE(ip) @ jump to next instruction 8649 8650 8651/* continuation for OP_IGET */ 8652 8653 /* 8654 * Currently: 8655 * r0 holds resolved field 8656 * r9 holds object 8657 */ 8658.LOP_IGET_finish: 8659 @bl common_squeak0 8660 cmp r9, #0 @ check object for null 8661 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8662 beq common_errNullObject @ object was null 8663 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8664 mov r2, rINST, lsr #8 @ r2<- A+ 8665 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8666 and r2, r2, #15 @ r2<- A 8667 GET_INST_OPCODE(ip) @ extract opcode from rINST 8668 SET_VREG(r0, r2) @ fp[A]<- r0 8669 GOTO_OPCODE(ip) @ jump to next instruction 8670 8671 8672/* continuation for OP_IGET_WIDE */ 8673 8674 /* 8675 * Currently: 8676 * r0 holds resolved field 8677 * r9 holds object 8678 */ 8679.LOP_IGET_WIDE_finish: 8680 cmp r9, #0 @ check object for null 8681 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8682 beq common_errNullObject @ object was null 8683 mov r2, rINST, lsr #8 @ r2<- A+ 8684 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8685 and r2, r2, #15 @ r2<- A 8686 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8687 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8688 GET_INST_OPCODE(ip) @ extract opcode from rINST 8689 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8690 GOTO_OPCODE(ip) @ jump to next instruction 8691 8692 8693/* continuation for OP_IGET_OBJECT */ 8694 8695 /* 8696 * Currently: 8697 * r0 holds resolved field 8698 * r9 holds object 8699 */ 8700.LOP_IGET_OBJECT_finish: 8701 @bl common_squeak0 8702 cmp r9, #0 @ check object for null 8703 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8704 beq common_errNullObject @ object was null 8705 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8706 mov r2, rINST, lsr #8 @ r2<- A+ 8707 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8708 and r2, r2, #15 @ r2<- A 8709 GET_INST_OPCODE(ip) @ extract opcode from rINST 8710 SET_VREG(r0, r2) @ fp[A]<- r0 8711 GOTO_OPCODE(ip) @ jump to next instruction 8712 8713 8714/* continuation for OP_IGET_BOOLEAN */ 8715 8716 /* 8717 * Currently: 8718 * r0 holds resolved field 8719 * r9 holds object 8720 */ 8721.LOP_IGET_BOOLEAN_finish: 8722 @bl common_squeak1 8723 cmp r9, #0 @ check object for null 8724 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8725 beq common_errNullObject @ object was null 8726 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8727 mov r2, rINST, lsr #8 @ r2<- A+ 8728 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8729 and r2, r2, #15 @ r2<- A 8730 GET_INST_OPCODE(ip) @ extract opcode from rINST 8731 SET_VREG(r0, r2) @ fp[A]<- r0 8732 GOTO_OPCODE(ip) @ jump to next instruction 8733 8734 8735/* continuation for OP_IGET_BYTE */ 8736 8737 /* 8738 * Currently: 8739 * r0 holds resolved field 8740 * r9 holds object 8741 */ 8742.LOP_IGET_BYTE_finish: 8743 @bl common_squeak2 8744 cmp r9, #0 @ check object for null 8745 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8746 beq common_errNullObject @ object was null 8747 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8748 mov r2, rINST, lsr #8 @ r2<- A+ 8749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8750 and r2, r2, #15 @ r2<- A 8751 GET_INST_OPCODE(ip) @ extract opcode from rINST 8752 SET_VREG(r0, r2) @ fp[A]<- r0 8753 GOTO_OPCODE(ip) @ jump to next instruction 8754 8755 8756/* continuation for OP_IGET_CHAR */ 8757 8758 /* 8759 * Currently: 8760 * r0 holds resolved field 8761 * r9 holds object 8762 */ 8763.LOP_IGET_CHAR_finish: 8764 @bl common_squeak3 8765 cmp r9, #0 @ check object for null 8766 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8767 beq common_errNullObject @ object was null 8768 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8769 mov r2, rINST, lsr #8 @ r2<- A+ 8770 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8771 and r2, r2, #15 @ r2<- A 8772 GET_INST_OPCODE(ip) @ extract opcode from rINST 8773 SET_VREG(r0, r2) @ fp[A]<- r0 8774 GOTO_OPCODE(ip) @ jump to next instruction 8775 8776 8777/* continuation for OP_IGET_SHORT */ 8778 8779 /* 8780 * Currently: 8781 * r0 holds resolved field 8782 * r9 holds object 8783 */ 8784.LOP_IGET_SHORT_finish: 8785 @bl common_squeak4 8786 cmp r9, #0 @ check object for null 8787 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8788 beq common_errNullObject @ object was null 8789 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8790 mov r2, rINST, lsr #8 @ r2<- A+ 8791 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8792 and r2, r2, #15 @ r2<- A 8793 GET_INST_OPCODE(ip) @ extract opcode from rINST 8794 SET_VREG(r0, r2) @ fp[A]<- r0 8795 GOTO_OPCODE(ip) @ jump to next instruction 8796 8797 8798/* continuation for OP_IPUT */ 8799 8800 /* 8801 * Currently: 8802 * r0 holds resolved field 8803 * r9 holds object 8804 */ 8805.LOP_IPUT_finish: 8806 @bl common_squeak0 8807 mov r1, rINST, lsr #8 @ r1<- A+ 8808 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8809 and r1, r1, #15 @ r1<- A 8810 cmp r9, #0 @ check object for null 8811 GET_VREG(r0, r1) @ r0<- fp[A] 8812 beq common_errNullObject @ object was null 8813 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8814 GET_INST_OPCODE(ip) @ extract opcode from rINST 8815 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8816 GOTO_OPCODE(ip) @ jump to next instruction 8817 8818 8819/* continuation for OP_IPUT_WIDE */ 8820 8821 /* 8822 * Currently: 8823 * r0 holds resolved field 8824 * r9 holds object 8825 */ 8826.LOP_IPUT_WIDE_finish: 8827 mov r2, rINST, lsr #8 @ r2<- A+ 8828 cmp r9, #0 @ check object for null 8829 and r2, r2, #15 @ r2<- A 8830 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8831 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8832 beq common_errNullObject @ object was null 8833 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8834 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8835 GET_INST_OPCODE(ip) @ extract opcode from rINST 8836 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0 8837 GOTO_OPCODE(ip) @ jump to next instruction 8838 8839 8840/* continuation for OP_IPUT_OBJECT */ 8841 8842 /* 8843 * Currently: 8844 * r0 holds resolved field 8845 * r9 holds object 8846 */ 8847.LOP_IPUT_OBJECT_finish: 8848 @bl common_squeak0 8849 mov r1, rINST, lsr #8 @ r1<- A+ 8850 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8851 and r1, r1, #15 @ r1<- A 8852 cmp r9, #0 @ check object for null 8853 GET_VREG(r0, r1) @ r0<- fp[A] 8854 beq common_errNullObject @ object was null 8855 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8856 GET_INST_OPCODE(ip) @ extract opcode from rINST 8857 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8858 GOTO_OPCODE(ip) @ jump to next instruction 8859 8860 8861/* continuation for OP_IPUT_BOOLEAN */ 8862 8863 /* 8864 * Currently: 8865 * r0 holds resolved field 8866 * r9 holds object 8867 */ 8868.LOP_IPUT_BOOLEAN_finish: 8869 @bl common_squeak1 8870 mov r1, rINST, lsr #8 @ r1<- A+ 8871 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8872 and r1, r1, #15 @ r1<- A 8873 cmp r9, #0 @ check object for null 8874 GET_VREG(r0, r1) @ r0<- fp[A] 8875 beq common_errNullObject @ object was null 8876 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8877 GET_INST_OPCODE(ip) @ extract opcode from rINST 8878 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8879 GOTO_OPCODE(ip) @ jump to next instruction 8880 8881 8882/* continuation for OP_IPUT_BYTE */ 8883 8884 /* 8885 * Currently: 8886 * r0 holds resolved field 8887 * r9 holds object 8888 */ 8889.LOP_IPUT_BYTE_finish: 8890 @bl common_squeak2 8891 mov r1, rINST, lsr #8 @ r1<- A+ 8892 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8893 and r1, r1, #15 @ r1<- A 8894 cmp r9, #0 @ check object for null 8895 GET_VREG(r0, r1) @ r0<- fp[A] 8896 beq common_errNullObject @ object was null 8897 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8898 GET_INST_OPCODE(ip) @ extract opcode from rINST 8899 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8900 GOTO_OPCODE(ip) @ jump to next instruction 8901 8902 8903/* continuation for OP_IPUT_CHAR */ 8904 8905 /* 8906 * Currently: 8907 * r0 holds resolved field 8908 * r9 holds object 8909 */ 8910.LOP_IPUT_CHAR_finish: 8911 @bl common_squeak3 8912 mov r1, rINST, lsr #8 @ r1<- A+ 8913 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8914 and r1, r1, #15 @ r1<- A 8915 cmp r9, #0 @ check object for null 8916 GET_VREG(r0, r1) @ r0<- fp[A] 8917 beq common_errNullObject @ object was null 8918 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8919 GET_INST_OPCODE(ip) @ extract opcode from rINST 8920 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8921 GOTO_OPCODE(ip) @ jump to next instruction 8922 8923 8924/* continuation for OP_IPUT_SHORT */ 8925 8926 /* 8927 * Currently: 8928 * r0 holds resolved field 8929 * r9 holds object 8930 */ 8931.LOP_IPUT_SHORT_finish: 8932 @bl common_squeak4 8933 mov r1, rINST, lsr #8 @ r1<- A+ 8934 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8935 and r1, r1, #15 @ r1<- A 8936 cmp r9, #0 @ check object for null 8937 GET_VREG(r0, r1) @ r0<- fp[A] 8938 beq common_errNullObject @ object was null 8939 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8940 GET_INST_OPCODE(ip) @ extract opcode from rINST 8941 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8942 GOTO_OPCODE(ip) @ jump to next instruction 8943 8944 8945/* continuation for OP_SGET */ 8946 8947 /* 8948 * Continuation if the field has not yet been resolved. 8949 * r1: BBBB field ref 8950 */ 8951.LOP_SGET_resolve: 8952 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8953 EXPORT_PC() @ resolve() could throw, so export now 8954 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8955 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8956 cmp r0, #0 @ success? 8957 bne .LOP_SGET_finish @ yes, finish 8958 b common_exceptionThrown @ no, handle exception 8959 8960 8961/* continuation for OP_SGET_WIDE */ 8962 8963 /* 8964 * Continuation if the field has not yet been resolved. 8965 * r1: BBBB field ref 8966 */ 8967.LOP_SGET_WIDE_resolve: 8968 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8969 EXPORT_PC() @ resolve() could throw, so export now 8970 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8971 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8972 cmp r0, #0 @ success? 8973 bne .LOP_SGET_WIDE_finish @ yes, finish 8974 b common_exceptionThrown @ no, handle exception 8975 8976 8977/* continuation for OP_SGET_OBJECT */ 8978 8979 /* 8980 * Continuation if the field has not yet been resolved. 8981 * r1: BBBB field ref 8982 */ 8983.LOP_SGET_OBJECT_resolve: 8984 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8985 EXPORT_PC() @ resolve() could throw, so export now 8986 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8987 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8988 cmp r0, #0 @ success? 8989 bne .LOP_SGET_OBJECT_finish @ yes, finish 8990 b common_exceptionThrown @ no, handle exception 8991 8992 8993/* continuation for OP_SGET_BOOLEAN */ 8994 8995 /* 8996 * Continuation if the field has not yet been resolved. 8997 * r1: BBBB field ref 8998 */ 8999.LOP_SGET_BOOLEAN_resolve: 9000 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9001 EXPORT_PC() @ resolve() could throw, so export now 9002 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9003 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9004 cmp r0, #0 @ success? 9005 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 9006 b common_exceptionThrown @ no, handle exception 9007 9008 9009/* continuation for OP_SGET_BYTE */ 9010 9011 /* 9012 * Continuation if the field has not yet been resolved. 9013 * r1: BBBB field ref 9014 */ 9015.LOP_SGET_BYTE_resolve: 9016 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9017 EXPORT_PC() @ resolve() could throw, so export now 9018 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9019 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9020 cmp r0, #0 @ success? 9021 bne .LOP_SGET_BYTE_finish @ yes, finish 9022 b common_exceptionThrown @ no, handle exception 9023 9024 9025/* continuation for OP_SGET_CHAR */ 9026 9027 /* 9028 * Continuation if the field has not yet been resolved. 9029 * r1: BBBB field ref 9030 */ 9031.LOP_SGET_CHAR_resolve: 9032 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9033 EXPORT_PC() @ resolve() could throw, so export now 9034 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9035 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9036 cmp r0, #0 @ success? 9037 bne .LOP_SGET_CHAR_finish @ yes, finish 9038 b common_exceptionThrown @ no, handle exception 9039 9040 9041/* continuation for OP_SGET_SHORT */ 9042 9043 /* 9044 * Continuation if the field has not yet been resolved. 9045 * r1: BBBB field ref 9046 */ 9047.LOP_SGET_SHORT_resolve: 9048 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9049 EXPORT_PC() @ resolve() could throw, so export now 9050 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9051 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9052 cmp r0, #0 @ success? 9053 bne .LOP_SGET_SHORT_finish @ yes, finish 9054 b common_exceptionThrown @ no, handle exception 9055 9056 9057/* continuation for OP_SPUT */ 9058 9059 /* 9060 * Continuation if the field has not yet been resolved. 9061 * r1: BBBB field ref 9062 */ 9063.LOP_SPUT_resolve: 9064 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9065 EXPORT_PC() @ resolve() could throw, so export now 9066 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9067 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9068 cmp r0, #0 @ success? 9069 bne .LOP_SPUT_finish @ yes, finish 9070 b common_exceptionThrown @ no, handle exception 9071 9072 9073/* continuation for OP_SPUT_WIDE */ 9074 9075 /* 9076 * Continuation if the field has not yet been resolved. 9077 * r1: BBBB field ref 9078 * r9: &fp[AA] 9079 */ 9080.LOP_SPUT_WIDE_resolve: 9081 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9082 EXPORT_PC() @ resolve() could throw, so export now 9083 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9084 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9085 cmp r0, #0 @ success? 9086 bne .LOP_SPUT_WIDE_finish @ yes, finish 9087 b common_exceptionThrown @ no, handle exception 9088 9089 9090/* continuation for OP_SPUT_OBJECT */ 9091 9092 /* 9093 * Continuation if the field has not yet been resolved. 9094 * r1: BBBB field ref 9095 */ 9096.LOP_SPUT_OBJECT_resolve: 9097 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9098 EXPORT_PC() @ resolve() could throw, so export now 9099 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9100 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9101 cmp r0, #0 @ success? 9102 bne .LOP_SPUT_OBJECT_finish @ yes, finish 9103 b common_exceptionThrown @ no, handle exception 9104 9105 9106/* continuation for OP_SPUT_BOOLEAN */ 9107 9108 /* 9109 * Continuation if the field has not yet been resolved. 9110 * r1: BBBB field ref 9111 */ 9112.LOP_SPUT_BOOLEAN_resolve: 9113 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9114 EXPORT_PC() @ resolve() could throw, so export now 9115 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9116 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9117 cmp r0, #0 @ success? 9118 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 9119 b common_exceptionThrown @ no, handle exception 9120 9121 9122/* continuation for OP_SPUT_BYTE */ 9123 9124 /* 9125 * Continuation if the field has not yet been resolved. 9126 * r1: BBBB field ref 9127 */ 9128.LOP_SPUT_BYTE_resolve: 9129 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9130 EXPORT_PC() @ resolve() could throw, so export now 9131 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9132 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9133 cmp r0, #0 @ success? 9134 bne .LOP_SPUT_BYTE_finish @ yes, finish 9135 b common_exceptionThrown @ no, handle exception 9136 9137 9138/* continuation for OP_SPUT_CHAR */ 9139 9140 /* 9141 * Continuation if the field has not yet been resolved. 9142 * r1: BBBB field ref 9143 */ 9144.LOP_SPUT_CHAR_resolve: 9145 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9146 EXPORT_PC() @ resolve() could throw, so export now 9147 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9148 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9149 cmp r0, #0 @ success? 9150 bne .LOP_SPUT_CHAR_finish @ yes, finish 9151 b common_exceptionThrown @ no, handle exception 9152 9153 9154/* continuation for OP_SPUT_SHORT */ 9155 9156 /* 9157 * Continuation if the field has not yet been resolved. 9158 * r1: BBBB field ref 9159 */ 9160.LOP_SPUT_SHORT_resolve: 9161 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9162 EXPORT_PC() @ resolve() could throw, so export now 9163 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9164 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9165 cmp r0, #0 @ success? 9166 bne .LOP_SPUT_SHORT_finish @ yes, finish 9167 b common_exceptionThrown @ no, handle exception 9168 9169 9170/* continuation for OP_INVOKE_VIRTUAL */ 9171 9172 /* 9173 * At this point: 9174 * r0 = resolved base method 9175 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9176 */ 9177.LOP_INVOKE_VIRTUAL_continue: 9178 GET_VREG(r1, r10) @ r1<- "this" ptr 9179 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9180 cmp r1, #0 @ is "this" null? 9181 beq common_errNullObject @ null "this", throw exception 9182 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9183 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9184 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9185 bl common_invokeMethodNoRange @ continue on 9186 9187 9188/* continuation for OP_INVOKE_SUPER */ 9189 9190 /* 9191 * At this point: 9192 * r0 = resolved base method 9193 * r9 = method->clazz 9194 */ 9195.LOP_INVOKE_SUPER_continue: 9196 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9197 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9198 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9199 EXPORT_PC() @ must export for invoke 9200 cmp r2, r3 @ compare (methodIndex, vtableCount) 9201 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 9202 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9203 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9204 bl common_invokeMethodNoRange @ continue on 9205 9206.LOP_INVOKE_SUPER_resolve: 9207 mov r0, r9 @ r0<- method->clazz 9208 mov r2, #METHOD_VIRTUAL @ resolver method type 9209 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9210 cmp r0, #0 @ got null? 9211 bne .LOP_INVOKE_SUPER_continue @ no, continue 9212 b common_exceptionThrown @ yes, handle exception 9213 9214 /* 9215 * Throw a NoSuchMethodError with the method name as the message. 9216 * r0 = resolved base method 9217 */ 9218.LOP_INVOKE_SUPER_nsm: 9219 ldr r1, [r0, #offMethod_name] @ r1<- method name 9220 b common_errNoSuchMethod 9221 9222 9223/* continuation for OP_INVOKE_DIRECT */ 9224 9225 /* 9226 * On entry: 9227 * r1 = reference (BBBB or CCCC) 9228 * r10 = "this" register 9229 */ 9230.LOP_INVOKE_DIRECT_resolve: 9231 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9232 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9233 mov r2, #METHOD_DIRECT @ resolver method type 9234 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9235 cmp r0, #0 @ got null? 9236 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9237 bne .LOP_INVOKE_DIRECT_finish @ no, continue 9238 b common_exceptionThrown @ yes, handle exception 9239 9240 9241/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 9242 9243 /* 9244 * At this point: 9245 * r0 = resolved base method 9246 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9247 */ 9248.LOP_INVOKE_VIRTUAL_RANGE_continue: 9249 GET_VREG(r1, r10) @ r1<- "this" ptr 9250 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9251 cmp r1, #0 @ is "this" null? 9252 beq common_errNullObject @ null "this", throw exception 9253 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9254 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9255 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9256 bl common_invokeMethodRange @ continue on 9257 9258 9259/* continuation for OP_INVOKE_SUPER_RANGE */ 9260 9261 /* 9262 * At this point: 9263 * r0 = resolved base method 9264 * r9 = method->clazz 9265 */ 9266.LOP_INVOKE_SUPER_RANGE_continue: 9267 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9268 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9269 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9270 EXPORT_PC() @ must export for invoke 9271 cmp r2, r3 @ compare (methodIndex, vtableCount) 9272 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 9273 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9274 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9275 bl common_invokeMethodRange @ continue on 9276 9277.LOP_INVOKE_SUPER_RANGE_resolve: 9278 mov r0, r9 @ r0<- method->clazz 9279 mov r2, #METHOD_VIRTUAL @ resolver method type 9280 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9281 cmp r0, #0 @ got null? 9282 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 9283 b common_exceptionThrown @ yes, handle exception 9284 9285 /* 9286 * Throw a NoSuchMethodError with the method name as the message. 9287 * r0 = resolved base method 9288 */ 9289.LOP_INVOKE_SUPER_RANGE_nsm: 9290 ldr r1, [r0, #offMethod_name] @ r1<- method name 9291 b common_errNoSuchMethod 9292 9293 9294/* continuation for OP_INVOKE_DIRECT_RANGE */ 9295 9296 /* 9297 * On entry: 9298 * r1 = reference (BBBB or CCCC) 9299 * r10 = "this" register 9300 */ 9301.LOP_INVOKE_DIRECT_RANGE_resolve: 9302 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9303 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9304 mov r2, #METHOD_DIRECT @ resolver method type 9305 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9306 cmp r0, #0 @ got null? 9307 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9308 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 9309 b common_exceptionThrown @ yes, handle exception 9310 9311 9312/* continuation for OP_FLOAT_TO_LONG */ 9313/* 9314 * Convert the float in r0 to a long in r0/r1. 9315 * 9316 * We have to clip values to long min/max per the specification. The 9317 * expected common case is a "reasonable" value that converts directly 9318 * to modest integer. The EABI convert function isn't doing this for us. 9319 */ 9320f2l_doconv: 9321 stmfd sp!, {r4, lr} 9322 mov r1, #0x5f000000 @ (float)maxlong 9323 mov r4, r0 9324 bl __aeabi_fcmpge @ is arg >= maxlong? 9325 cmp r0, #0 @ nonzero == yes 9326 mvnne r0, #0 @ return maxlong (7fffffff) 9327 mvnne r1, #0x80000000 9328 ldmnefd sp!, {r4, pc} 9329 9330 mov r0, r4 @ recover arg 9331 mov r1, #0xdf000000 @ (float)minlong 9332 bl __aeabi_fcmple @ is arg <= minlong? 9333 cmp r0, #0 @ nonzero == yes 9334 movne r0, #0 @ return minlong (80000000) 9335 movne r1, #0x80000000 9336 ldmnefd sp!, {r4, pc} 9337 9338 mov r0, r4 @ recover arg 9339 mov r1, r4 9340 bl __aeabi_fcmpeq @ is arg == self? 9341 cmp r0, #0 @ zero == no 9342 moveq r1, #0 @ return zero for NaN 9343 ldmeqfd sp!, {r4, pc} 9344 9345 mov r0, r4 @ recover arg 9346 bl __aeabi_f2lz @ convert float to long 9347 ldmfd sp!, {r4, pc} 9348 9349 9350/* continuation for OP_DOUBLE_TO_LONG */ 9351/* 9352 * Convert the double in r0/r1 to a long in r0/r1. 9353 * 9354 * We have to clip values to long min/max per the specification. The 9355 * expected common case is a "reasonable" value that converts directly 9356 * to modest integer. The EABI convert function isn't doing this for us. 9357 */ 9358d2l_doconv: 9359 stmfd sp!, {r4, r5, lr} @ save regs 9360 mov r3, #0x43000000 @ maxlong, as a double (high word) 9361 add r3, #0x00e00000 @ 0x43e00000 9362 mov r2, #0 @ maxlong, as a double (low word) 9363 sub sp, sp, #4 @ align for EABI 9364 mov r4, r0 @ save a copy of r0 9365 mov r5, r1 @ and r1 9366 bl __aeabi_dcmpge @ is arg >= maxlong? 9367 cmp r0, #0 @ nonzero == yes 9368 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9369 mvnne r1, #0x80000000 9370 bne 1f 9371 9372 mov r0, r4 @ recover arg 9373 mov r1, r5 9374 mov r3, #0xc3000000 @ minlong, as a double (high word) 9375 add r3, #0x00e00000 @ 0xc3e00000 9376 mov r2, #0 @ minlong, as a double (low word) 9377 bl __aeabi_dcmple @ is arg <= minlong? 9378 cmp r0, #0 @ nonzero == yes 9379 movne r0, #0 @ return minlong (8000000000000000) 9380 movne r1, #0x80000000 9381 bne 1f 9382 9383 mov r0, r4 @ recover arg 9384 mov r1, r5 9385 mov r2, r4 @ compare against self 9386 mov r3, r5 9387 bl __aeabi_dcmpeq @ is arg == self? 9388 cmp r0, #0 @ zero == no 9389 moveq r1, #0 @ return zero for NaN 9390 beq 1f 9391 9392 mov r0, r4 @ recover arg 9393 mov r1, r5 9394 bl __aeabi_d2lz @ convert double to long 9395 93961: 9397 add sp, sp, #4 9398 ldmfd sp!, {r4, r5, pc} 9399 9400 9401/* continuation for OP_MUL_LONG */ 9402 9403.LOP_MUL_LONG_finish: 9404 GET_INST_OPCODE(ip) @ extract opcode from rINST 9405 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9406 GOTO_OPCODE(ip) @ jump to next instruction 9407 9408 9409/* continuation for OP_SHL_LONG */ 9410 9411.LOP_SHL_LONG_finish: 9412 mov r0, r0, asl r2 @ r0<- r0 << r2 9413 GET_INST_OPCODE(ip) @ extract opcode from rINST 9414 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9415 GOTO_OPCODE(ip) @ jump to next instruction 9416 9417 9418/* continuation for OP_SHR_LONG */ 9419 9420.LOP_SHR_LONG_finish: 9421 mov r1, r1, asr r2 @ r1<- r1 >> r2 9422 GET_INST_OPCODE(ip) @ extract opcode from rINST 9423 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9424 GOTO_OPCODE(ip) @ jump to next instruction 9425 9426 9427/* continuation for OP_USHR_LONG */ 9428 9429.LOP_USHR_LONG_finish: 9430 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9431 GET_INST_OPCODE(ip) @ extract opcode from rINST 9432 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9433 GOTO_OPCODE(ip) @ jump to next instruction 9434 9435 9436/* continuation for OP_SHL_LONG_2ADDR */ 9437 9438.LOP_SHL_LONG_2ADDR_finish: 9439 GET_INST_OPCODE(ip) @ extract opcode from rINST 9440 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9441 GOTO_OPCODE(ip) @ jump to next instruction 9442 9443 9444/* continuation for OP_SHR_LONG_2ADDR */ 9445 9446.LOP_SHR_LONG_2ADDR_finish: 9447 GET_INST_OPCODE(ip) @ extract opcode from rINST 9448 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9449 GOTO_OPCODE(ip) @ jump to next instruction 9450 9451 9452/* continuation for OP_USHR_LONG_2ADDR */ 9453 9454.LOP_USHR_LONG_2ADDR_finish: 9455 GET_INST_OPCODE(ip) @ extract opcode from rINST 9456 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9457 GOTO_OPCODE(ip) @ jump to next instruction 9458 9459 9460/* continuation for OP_EXECUTE_INLINE */ 9461 9462 /* 9463 * Extract args, call function. 9464 * r0 = #of args (0-4) 9465 * r10 = call index 9466 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9467 * 9468 * Other ideas: 9469 * - Use a jump table from the main piece to jump directly into the 9470 * AND/LDR pairs. Costs a data load, saves a branch. 9471 * - Have five separate pieces that do the loading, so we can work the 9472 * interleave a little better. Increases code size. 9473 */ 9474.LOP_EXECUTE_INLINE_continue: 9475 rsb r0, r0, #4 @ r0<- 4-r0 9476 FETCH(r9, 2) @ r9<- FEDC 9477 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9478 bl common_abort @ (skipped due to ARM prefetch) 94794: and ip, r9, #0xf000 @ isolate F 9480 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 94813: and ip, r9, #0x0f00 @ isolate E 9482 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 94832: and ip, r9, #0x00f0 @ isolate D 9484 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 94851: and ip, r9, #0x000f @ isolate C 9486 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 94870: 9488 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9489 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9490 @ (not reached) 9491 9492.LOP_EXECUTE_INLINE_table: 9493 .word gDvmInlineOpsTable 9494 9495 9496/* continuation for OP_EXECUTE_INLINE_RANGE */ 9497 9498 /* 9499 * Extract args, call function. 9500 * r0 = #of args (0-4) 9501 * r10 = call index 9502 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9503 */ 9504.LOP_EXECUTE_INLINE_RANGE_continue: 9505 rsb r0, r0, #4 @ r0<- 4-r0 9506 FETCH(r9, 2) @ r9<- CCCC 9507 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9508 bl common_abort @ (skipped due to ARM prefetch) 95094: add ip, r9, #3 @ base+3 9510 GET_VREG(r3, ip) @ r3<- vBase[3] 95113: add ip, r9, #2 @ base+2 9512 GET_VREG(r2, ip) @ r2<- vBase[2] 95132: add ip, r9, #1 @ base+1 9514 GET_VREG(r1, ip) @ r1<- vBase[1] 95151: add ip, r9, #0 @ (nop) 9516 GET_VREG(r0, ip) @ r0<- vBase[0] 95170: 9518 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9519 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9520 @ (not reached) 9521 9522.LOP_EXECUTE_INLINE_RANGE_table: 9523 .word gDvmInlineOpsTable 9524 9525 9526 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9527 .global dvmAsmSisterEnd 9528dvmAsmSisterEnd: 9529 9530/* File: armv5te/footer.S */ 9531 9532/* 9533 * =========================================================================== 9534 * Common subroutines and data 9535 * =========================================================================== 9536 */ 9537 9538 9539 9540 .text 9541 .align 2 9542 9543#if defined(WITH_JIT) 9544#if defined(WITH_SELF_VERIFICATION) 9545 .global dvmJitToInterpPunt 9546dvmJitToInterpPunt: 9547 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9548 mov r2,#kSVSPunt @ r2<- interpreter entry point 9549 mov r3, #0 9550 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9551 b jitSVShadowRunEnd @ doesn't return 9552 9553 .global dvmJitToInterpSingleStep 9554dvmJitToInterpSingleStep: 9555 str lr,[rGLUE,#offGlue_jitResumeNPC] 9556 str r1,[rGLUE,#offGlue_jitResumeDPC] 9557 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9558 b jitSVShadowRunEnd @ doesn't return 9559 9560 .global dvmJitToInterpTraceSelectNoChain 9561dvmJitToInterpTraceSelectNoChain: 9562 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9563 mov r0,rPC @ pass our target PC 9564 mov r2,#kSVSTraceSelectNoChain @ r2<- interpreter entry point 9565 mov r3, #0 9566 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9567 b jitSVShadowRunEnd @ doesn't return 9568 9569 .global dvmJitToInterpTraceSelect 9570dvmJitToInterpTraceSelect: 9571 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9572 ldr r0,[lr, #-1] @ pass our target PC 9573 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9574 mov r3, #0 9575 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9576 b jitSVShadowRunEnd @ doesn't return 9577 9578 .global dvmJitToInterpBackwardBranch 9579dvmJitToInterpBackwardBranch: 9580 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9581 ldr r0,[lr, #-1] @ pass our target PC 9582 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9583 mov r3, #0 9584 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9585 b jitSVShadowRunEnd @ doesn't return 9586 9587 .global dvmJitToInterpNormal 9588dvmJitToInterpNormal: 9589 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9590 ldr r0,[lr, #-1] @ pass our target PC 9591 mov r2,#kSVSNormal @ r2<- interpreter entry point 9592 mov r3, #0 9593 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9594 b jitSVShadowRunEnd @ doesn't return 9595 9596 .global dvmJitToInterpNoChain 9597dvmJitToInterpNoChain: 9598 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9599 mov r0,rPC @ pass our target PC 9600 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9601 mov r3, #0 9602 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9603 b jitSVShadowRunEnd @ doesn't return 9604#else 9605/* 9606 * Return from the translation cache to the interpreter when the compiler is 9607 * having issues translating/executing a Dalvik instruction. We have to skip 9608 * the code cache lookup otherwise it is possible to indefinitely bouce 9609 * between the interpreter and the code cache if the instruction that fails 9610 * to be compiled happens to be at a trace start. 9611 */ 9612 .global dvmJitToInterpPunt 9613dvmJitToInterpPunt: 9614 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9615 mov rPC, r0 9616#ifdef JIT_STATS 9617 mov r0,lr 9618 bl dvmBumpPunt; 9619#endif 9620 EXPORT_PC() 9621 mov r0, #0 9622 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9623 adrl rIBASE, dvmAsmInstructionStart 9624 FETCH_INST() 9625 GET_INST_OPCODE(ip) 9626 GOTO_OPCODE(ip) 9627 9628/* 9629 * Return to the interpreter to handle a single instruction. 9630 * On entry: 9631 * r0 <= PC 9632 * r1 <= PC of resume instruction 9633 * lr <= resume point in translation 9634 */ 9635 .global dvmJitToInterpSingleStep 9636dvmJitToInterpSingleStep: 9637 str lr,[rGLUE,#offGlue_jitResumeNPC] 9638 str r1,[rGLUE,#offGlue_jitResumeDPC] 9639 mov r1,#kInterpEntryInstr 9640 @ enum is 4 byte in aapcs-EABI 9641 str r1, [rGLUE, #offGlue_entryPoint] 9642 mov rPC,r0 9643 EXPORT_PC() 9644 9645 adrl rIBASE, dvmAsmInstructionStart 9646 mov r2,#kJitSingleStep @ Ask for single step and then revert 9647 str r2,[rGLUE,#offGlue_jitState] 9648 mov r1,#1 @ set changeInterp to bail to debug interp 9649 b common_gotoBail 9650 9651/* 9652 * Return from the translation cache and immediately request 9653 * a translation for the exit target. Commonly used for callees. 9654 */ 9655 .global dvmJitToInterpTraceSelectNoChain 9656dvmJitToInterpTraceSelectNoChain: 9657#ifdef JIT_STATS 9658 bl dvmBumpNoChain 9659#endif 9660 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9661 mov r0,rPC 9662 bl dvmJitGetCodeAddr @ Is there a translation? 9663 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9664 mov r1, rPC @ arg1 of translation may need this 9665 mov lr, #0 @ in case target is HANDLER_INTERPRET 9666 cmp r0,#0 9667 bxne r0 @ continue native execution if so 9668 b 2f 9669 9670/* 9671 * Return from the translation cache and immediately request 9672 * a translation for the exit target. Commonly used following 9673 * invokes. 9674 */ 9675 .global dvmJitToInterpTraceSelect 9676dvmJitToInterpTraceSelect: 9677 ldr rPC,[lr, #-1] @ get our target PC 9678 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9679 add rINST,lr,#-5 @ save start of chain branch 9680 mov r0,rPC 9681 bl dvmJitGetCodeAddr @ Is there a translation? 9682 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9683 cmp r0,#0 9684 beq 2f 9685 mov r1,rINST 9686 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9687 mov r1, rPC @ arg1 of translation may need this 9688 mov lr, #0 @ in case target is HANDLER_INTERPRET 9689 cmp r0,#0 @ successful chain? 9690 bxne r0 @ continue native execution 9691 b toInterpreter @ didn't chain - resume with interpreter 9692 9693/* No translation, so request one if profiling isn't disabled*/ 96942: 9695 adrl rIBASE, dvmAsmInstructionStart 9696 GET_JIT_PROF_TABLE(r0) 9697 FETCH_INST() 9698 cmp r0, #0 9699 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9700 bne common_selectTrace 9701 GET_INST_OPCODE(ip) 9702 GOTO_OPCODE(ip) 9703 9704/* 9705 * Return from the translation cache to the interpreter. 9706 * The return was done with a BLX from thumb mode, and 9707 * the following 32-bit word contains the target rPC value. 9708 * Note that lr (r14) will have its low-order bit set to denote 9709 * its thumb-mode origin. 9710 * 9711 * We'll need to stash our lr origin away, recover the new 9712 * target and then check to see if there is a translation available 9713 * for our new target. If so, we do a translation chain and 9714 * go back to native execution. Otherwise, it's back to the 9715 * interpreter (after treating this entry as a potential 9716 * trace start). 9717 */ 9718 .global dvmJitToInterpNormal 9719dvmJitToInterpNormal: 9720 ldr rPC,[lr, #-1] @ get our target PC 9721 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9722 add rINST,lr,#-5 @ save start of chain branch 9723#ifdef JIT_STATS 9724 bl dvmBumpNormal 9725#endif 9726 mov r0,rPC 9727 bl dvmJitGetCodeAddr @ Is there a translation? 9728 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9729 cmp r0,#0 9730 beq toInterpreter @ go if not, otherwise do chain 9731 mov r1,rINST 9732 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9733 mov r1, rPC @ arg1 of translation may need this 9734 mov lr, #0 @ in case target is HANDLER_INTERPRET 9735 cmp r0,#0 @ successful chain? 9736 bxne r0 @ continue native execution 9737 b toInterpreter @ didn't chain - resume with interpreter 9738 9739/* 9740 * Return from the translation cache to the interpreter to do method invocation. 9741 * Check if translation exists for the callee, but don't chain to it. 9742 */ 9743 .global dvmJitToInterpNoChain 9744dvmJitToInterpNoChain: 9745#ifdef JIT_STATS 9746 bl dvmBumpNoChain 9747#endif 9748 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9749 mov r0,rPC 9750 bl dvmJitGetCodeAddr @ Is there a translation? 9751 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9752 mov r1, rPC @ arg1 of translation may need this 9753 mov lr, #0 @ in case target is HANDLER_INTERPRET 9754 cmp r0,#0 9755 bxne r0 @ continue native execution if so 9756#endif 9757 9758/* 9759 * No translation, restore interpreter regs and start interpreting. 9760 * rGLUE & rFP were preserved in the translated code, and rPC has 9761 * already been restored by the time we get here. We'll need to set 9762 * up rIBASE & rINST, and load the address of the JitTable into r0. 9763 */ 9764toInterpreter: 9765 EXPORT_PC() 9766 adrl rIBASE, dvmAsmInstructionStart 9767 FETCH_INST() 9768 GET_JIT_PROF_TABLE(r0) 9769 @ NOTE: intended fallthrough 9770/* 9771 * Common code to update potential trace start counter, and initiate 9772 * a trace-build if appropriate. On entry, rPC should point to the 9773 * next instruction to execute, and rINST should be already loaded with 9774 * the next opcode word, and r0 holds a pointer to the jit profile 9775 * table (pJitProfTable). 9776 */ 9777common_testUpdateProfile: 9778 cmp r0,#0 9779 GET_INST_OPCODE(ip) 9780 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9781 9782common_updateProfile: 9783 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9784 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9785 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9786 GET_INST_OPCODE(ip) 9787 subs r1,r1,#1 @ decrement counter 9788 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9789 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9790 9791/* 9792 * Here, we switch to the debug interpreter to request 9793 * trace selection. First, though, check to see if there 9794 * is already a native translation in place (and, if so, 9795 * jump to it now). 9796 */ 9797 GET_JIT_THRESHOLD(r1) 9798 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9799 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9800 EXPORT_PC() 9801 mov r0,rPC 9802 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9803 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9804 mov r1, rPC @ arg1 of translation may need this 9805 mov lr, #0 @ in case target is HANDLER_INTERPRET 9806 cmp r0,#0 9807#if !defined(WITH_SELF_VERIFICATION) 9808 bxne r0 @ jump to the translation 9809 mov r2,#kJitTSelectRequest @ ask for trace selection 9810 @ fall-through to common_selectTrace 9811#else 9812 moveq r2,#kJitTSelectRequest @ ask for trace selection 9813 beq common_selectTrace 9814 /* 9815 * At this point, we have a target translation. However, if 9816 * that translation is actually the interpret-only pseudo-translation 9817 * we want to treat it the same as no translation. 9818 */ 9819 mov r10, r0 @ save target 9820 bl dvmCompilerGetInterpretTemplate 9821 cmp r0, r10 @ special case? 9822 bne jitSVShadowRunStart @ set up self verification shadow space 9823 GET_INST_OPCODE(ip) 9824 GOTO_OPCODE(ip) 9825 /* no return */ 9826#endif 9827 9828/* 9829 * On entry: 9830 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9831 */ 9832common_selectTrace: 9833 str r2,[rGLUE,#offGlue_jitState] 9834 mov r2,#kInterpEntryInstr @ normal entry reason 9835 str r2,[rGLUE,#offGlue_entryPoint] 9836 mov r1,#1 @ set changeInterp 9837 b common_gotoBail 9838 9839#if defined(WITH_SELF_VERIFICATION) 9840/* 9841 * Save PC and registers to shadow memory for self verification mode 9842 * before jumping to native translation. 9843 * On entry: 9844 * rPC, rFP, rGLUE: the values that they should contain 9845 * r10: the address of the target translation. 9846 */ 9847jitSVShadowRunStart: 9848 mov r0,rPC @ r0<- program counter 9849 mov r1,rFP @ r1<- frame pointer 9850 mov r2,rGLUE @ r2<- InterpState pointer 9851 mov r3,r10 @ r3<- target translation 9852 bl dvmSelfVerificationSaveState @ save registers to shadow space 9853 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9854 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9855 bx r10 @ jump to the translation 9856 9857/* 9858 * Restore PC, registers, and interpState to original values 9859 * before jumping back to the interpreter. 9860 */ 9861jitSVShadowRunEnd: 9862 mov r1,rFP @ pass ending fp 9863 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9864 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9865 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9866 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9867 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9868 cmp r1,#0 @ check for punt condition 9869 beq 1f 9870 mov r2,#kJitSelfVerification @ ask for self verification 9871 str r2,[rGLUE,#offGlue_jitState] 9872 mov r2,#kInterpEntryInstr @ normal entry reason 9873 str r2,[rGLUE,#offGlue_entryPoint] 9874 mov r1,#1 @ set changeInterp 9875 b common_gotoBail 9876 98771: @ exit to interpreter without check 9878 EXPORT_PC() 9879 adrl rIBASE, dvmAsmInstructionStart 9880 FETCH_INST() 9881 GET_INST_OPCODE(ip) 9882 GOTO_OPCODE(ip) 9883#endif 9884 9885#endif 9886 9887/* 9888 * Common code when a backward branch is taken. 9889 * 9890 * On entry: 9891 * r9 is PC adjustment *in bytes* 9892 */ 9893common_backwardBranch: 9894 mov r0, #kInterpEntryInstr 9895 bl common_periodicChecks 9896#if defined(WITH_JIT) 9897 GET_JIT_PROF_TABLE(r0) 9898 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9899 cmp r0,#0 9900 bne common_updateProfile 9901 GET_INST_OPCODE(ip) 9902 GOTO_OPCODE(ip) 9903#else 9904 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9905 GET_INST_OPCODE(ip) @ extract opcode from rINST 9906 GOTO_OPCODE(ip) @ jump to next instruction 9907#endif 9908 9909 9910/* 9911 * Need to see if the thread needs to be suspended or debugger/profiler 9912 * activity has begun. 9913 * 9914 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9915 * have to do the second ldr. 9916 * 9917 * TODO: reduce this so we're just checking a single location. 9918 * 9919 * On entry: 9920 * r0 is reentry type, e.g. kInterpEntryInstr 9921 * r9 is trampoline PC adjustment *in bytes* 9922 */ 9923common_periodicChecks: 9924 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9925 9926 @ speculatively store r0 before it is clobbered by dvmCheckSuspendPending 9927 str r0, [rGLUE, #offGlue_entryPoint] 9928 9929#if defined(WITH_DEBUGGER) 9930 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9931#endif 9932#if defined(WITH_PROFILER) 9933 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9934#endif 9935 9936 ldr r3, [r3] @ r3<- suspendCount (int) 9937 9938#if defined(WITH_DEBUGGER) 9939 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9940#endif 9941#if defined (WITH_PROFILER) 9942 ldr r2, [r2] @ r2<- activeProfilers (int) 9943#endif 9944 9945 cmp r3, #0 @ suspend pending? 9946 bne 2f @ yes, do full suspension check 9947 9948#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9949# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9950 orrs r1, r1, r2 @ r1<- r1 | r2 9951 cmp r1, #0 @ debugger attached or profiler started? 9952# elif defined(WITH_DEBUGGER) 9953 cmp r1, #0 @ debugger attached? 9954# elif defined(WITH_PROFILER) 9955 cmp r2, #0 @ profiler started? 9956# endif 9957 bne 3f @ debugger/profiler, switch interp 9958#endif 9959 9960 bx lr @ nothing to do, return 9961 99622: @ check suspend 9963#if defined(WITH_JIT) 9964 /* 9965 * Refresh the Jit's cached copy of profile table pointer. This pointer 9966 * doubles as the Jit's on/off switch. 9967 */ 9968 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9969 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9970 ldr r3, [r3] @ r3 <- pJitProfTable 9971 EXPORT_PC() @ need for precise GC 9972 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9973#else 9974 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9975 EXPORT_PC() @ need for precise GC 9976#endif 9977 b dvmCheckSuspendPending @ suspend if necessary, then return 9978 99793: @ debugger/profiler enabled, bail out 9980 add rPC, rPC, r9 @ update rPC 9981 mov r1, #1 @ "want switch" = true 9982 b common_gotoBail 9983 9984 9985/* 9986 * The equivalent of "goto bail", this calls through the "bail handler". 9987 * 9988 * State registers will be saved to the "glue" area before bailing. 9989 * 9990 * On entry: 9991 * r1 is "bool changeInterp", indicating if we want to switch to the 9992 * other interpreter or just bail all the way out 9993 */ 9994common_gotoBail: 9995 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9996 mov r0, rGLUE @ r0<- glue ptr 9997 b dvmMterpStdBail @ call(glue, changeInterp) 9998 9999 @add r1, r1, #1 @ using (boolean+1) 10000 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 10001 @bl _longjmp @ does not return 10002 @bl common_abort 10003 10004 10005/* 10006 * Common code for method invocation with range. 10007 * 10008 * On entry: 10009 * r0 is "Method* methodToCall", the method we're trying to call 10010 */ 10011common_invokeMethodRange: 10012.LinvokeNewRange: 10013 @ prepare to copy args to "outs" area of current frame 10014 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 10015 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10016 beq .LinvokeArgsDone @ if no args, skip the rest 10017 FETCH(r1, 2) @ r1<- CCCC 10018 10019 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 10020 @ (very few methods have > 10 args; could unroll for common cases) 10021 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 10022 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 10023 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 100241: ldr r1, [r3], #4 @ val = *fp++ 10025 subs r2, r2, #1 @ count-- 10026 str r1, [r10], #4 @ *outs++ = val 10027 bne 1b @ ...while count != 0 10028 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10029 b .LinvokeArgsDone 10030 10031/* 10032 * Common code for method invocation without range. 10033 * 10034 * On entry: 10035 * r0 is "Method* methodToCall", the method we're trying to call 10036 */ 10037common_invokeMethodNoRange: 10038.LinvokeNewNoRange: 10039 @ prepare to copy args to "outs" area of current frame 10040 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 10041 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10042 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 10043 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 10044 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10045 beq .LinvokeArgsDone 10046 10047 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 10048.LinvokeNonRange: 10049 rsb r2, r2, #5 @ r2<- 5-r2 10050 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 10051 bl common_abort @ (skipped due to ARM prefetch) 100525: and ip, rINST, #0x0f00 @ isolate A 10053 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 10054 mov r0, r0 @ nop 10055 str r2, [r10, #-4]! @ *--outs = vA 100564: and ip, r1, #0xf000 @ isolate G 10057 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 10058 mov r0, r0 @ nop 10059 str r2, [r10, #-4]! @ *--outs = vG 100603: and ip, r1, #0x0f00 @ isolate F 10061 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 10062 mov r0, r0 @ nop 10063 str r2, [r10, #-4]! @ *--outs = vF 100642: and ip, r1, #0x00f0 @ isolate E 10065 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 10066 mov r0, r0 @ nop 10067 str r2, [r10, #-4]! @ *--outs = vE 100681: and ip, r1, #0x000f @ isolate D 10069 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 10070 mov r0, r0 @ nop 10071 str r2, [r10, #-4]! @ *--outs = vD 100720: @ fall through to .LinvokeArgsDone 10073 10074.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 10075 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 10076 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 10077 @ find space for the new stack frame, check for overflow 10078 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 10079 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 10080 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 10081@ bl common_dumpRegs 10082 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 10083 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 10084 cmp r3, r9 @ bottom < interpStackEnd? 10085 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 10086 blt .LstackOverflow @ yes, this frame will overflow stack 10087 10088 @ set up newSaveArea 10089#ifdef EASY_GDB 10090 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 10091 str ip, [r10, #offStackSaveArea_prevSave] 10092#endif 10093 str rFP, [r10, #offStackSaveArea_prevFrame] 10094 str rPC, [r10, #offStackSaveArea_savedPc] 10095#if defined(WITH_JIT) 10096 mov r9, #0 10097 str r9, [r10, #offStackSaveArea_returnAddr] 10098#endif 10099 str r0, [r10, #offStackSaveArea_method] 10100 tst r3, #ACC_NATIVE 10101 bne .LinvokeNative 10102 10103 /* 10104 stmfd sp!, {r0-r3} 10105 bl common_printNewline 10106 mov r0, rFP 10107 mov r1, #0 10108 bl dvmDumpFp 10109 ldmfd sp!, {r0-r3} 10110 stmfd sp!, {r0-r3} 10111 mov r0, r1 10112 mov r1, r10 10113 bl dvmDumpFp 10114 bl common_printNewline 10115 ldmfd sp!, {r0-r3} 10116 */ 10117 10118 ldrh r9, [r2] @ r9 <- load INST from new PC 10119 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10120 mov rPC, r2 @ publish new rPC 10121 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10122 10123 @ Update "glue" values for the new method 10124 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10125 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10126 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10127#if defined(WITH_JIT) 10128 GET_JIT_PROF_TABLE(r0) 10129 mov rFP, r1 @ fp = newFp 10130 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10131 mov rINST, r9 @ publish new rINST 10132 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10133 cmp r0,#0 10134 bne common_updateProfile 10135 GOTO_OPCODE(ip) @ jump to next instruction 10136#else 10137 mov rFP, r1 @ fp = newFp 10138 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10139 mov rINST, r9 @ publish new rINST 10140 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10141 GOTO_OPCODE(ip) @ jump to next instruction 10142#endif 10143 10144.LinvokeNative: 10145 @ Prep for the native call 10146 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10147 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10148 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10149 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10150 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10151 mov r9, r3 @ r9<- glue->self (preserve) 10152 10153 mov r2, r0 @ r2<- methodToCall 10154 mov r0, r1 @ r0<- newFp (points to args) 10155 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10156 10157#ifdef ASSIST_DEBUGGER 10158 /* insert fake function header to help gdb find the stack frame */ 10159 b .Lskip 10160 .type dalvik_mterp, %function 10161dalvik_mterp: 10162 .fnstart 10163 MTERP_ENTRY1 10164 MTERP_ENTRY2 10165.Lskip: 10166#endif 10167 10168 @mov lr, pc @ set return addr 10169 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10170 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10171 10172#if defined(WITH_JIT) 10173 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10174#endif 10175 10176 @ native return; r9=self, r10=newSaveArea 10177 @ equivalent to dvmPopJniLocals 10178 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10179 ldr r1, [r9, #offThread_exception] @ check for exception 10180#if defined(WITH_JIT) 10181 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10182#endif 10183 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10184 cmp r1, #0 @ null? 10185 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10186#if defined(WITH_JIT) 10187 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10188#endif 10189 bne common_exceptionThrown @ no, handle exception 10190 10191 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10192 GET_INST_OPCODE(ip) @ extract opcode from rINST 10193 GOTO_OPCODE(ip) @ jump to next instruction 10194 10195.LstackOverflow: @ r0=methodToCall 10196 mov r1, r0 @ r1<- methodToCall 10197 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10198 bl dvmHandleStackOverflow 10199 b common_exceptionThrown 10200#ifdef ASSIST_DEBUGGER 10201 .fnend 10202#endif 10203 10204 10205 /* 10206 * Common code for method invocation, calling through "glue code". 10207 * 10208 * TODO: now that we have range and non-range invoke handlers, this 10209 * needs to be split into two. Maybe just create entry points 10210 * that set r9 and jump here? 10211 * 10212 * On entry: 10213 * r0 is "Method* methodToCall", the method we're trying to call 10214 * r9 is "bool methodCallRange", indicating if this is a /range variant 10215 */ 10216 .if 0 10217.LinvokeOld: 10218 sub sp, sp, #8 @ space for args + pad 10219 FETCH(ip, 2) @ ip<- FEDC or CCCC 10220 mov r2, r0 @ A2<- methodToCall 10221 mov r0, rGLUE @ A0<- glue 10222 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10223 mov r1, r9 @ A1<- methodCallRange 10224 mov r3, rINST, lsr #8 @ A3<- AA 10225 str ip, [sp, #0] @ A4<- ip 10226 bl dvmMterp_invokeMethod @ call the C invokeMethod 10227 add sp, sp, #8 @ remove arg area 10228 b common_resumeAfterGlueCall @ continue to next instruction 10229 .endif 10230 10231 10232 10233/* 10234 * Common code for handling a return instruction. 10235 * 10236 * This does not return. 10237 */ 10238common_returnFromMethod: 10239.LreturnNew: 10240 mov r0, #kInterpEntryReturn 10241 mov r9, #0 10242 bl common_periodicChecks 10243 10244 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10245 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10246 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10247 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10248 @ r2<- method we're returning to 10249 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10250 cmp r2, #0 @ is this a break frame? 10251 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10252 mov r1, #0 @ "want switch" = false 10253 beq common_gotoBail @ break frame, bail out completely 10254 10255 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10256 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10257 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10258 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10259#if defined(WITH_JIT) 10260 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10261 GET_JIT_PROF_TABLE(r0) 10262 mov rPC, r9 @ publish new rPC 10263 str r1, [rGLUE, #offGlue_methodClassDex] 10264 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10265 cmp r10, #0 @ caller is compiled code 10266 blxne r10 10267 GET_INST_OPCODE(ip) @ extract opcode from rINST 10268 cmp r0,#0 10269 bne common_updateProfile 10270 GOTO_OPCODE(ip) @ jump to next instruction 10271#else 10272 GET_INST_OPCODE(ip) @ extract opcode from rINST 10273 mov rPC, r9 @ publish new rPC 10274 str r1, [rGLUE, #offGlue_methodClassDex] 10275 GOTO_OPCODE(ip) @ jump to next instruction 10276#endif 10277 10278 /* 10279 * Return handling, calls through "glue code". 10280 */ 10281 .if 0 10282.LreturnOld: 10283 SAVE_PC_FP_TO_GLUE() @ export state 10284 mov r0, rGLUE @ arg to function 10285 bl dvmMterp_returnFromMethod 10286 b common_resumeAfterGlueCall 10287 .endif 10288 10289 10290/* 10291 * Somebody has thrown an exception. Handle it. 10292 * 10293 * If the exception processing code returns to us (instead of falling 10294 * out of the interpreter), continue with whatever the next instruction 10295 * now happens to be. 10296 * 10297 * This does not return. 10298 */ 10299 .global dvmMterpCommonExceptionThrown 10300dvmMterpCommonExceptionThrown: 10301common_exceptionThrown: 10302.LexceptionNew: 10303 mov r0, #kInterpEntryThrow 10304 mov r9, #0 10305 bl common_periodicChecks 10306 10307 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10308 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10309 mov r1, r10 @ r1<- self 10310 mov r0, r9 @ r0<- exception 10311 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10312 mov r3, #0 @ r3<- NULL 10313 str r3, [r10, #offThread_exception] @ self->exception = NULL 10314 10315 /* set up args and a local for "&fp" */ 10316 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10317 str rFP, [sp, #-4]! @ *--sp = fp 10318 mov ip, sp @ ip<- &fp 10319 mov r3, #0 @ r3<- false 10320 str ip, [sp, #-4]! @ *--sp = &fp 10321 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10322 mov r0, r10 @ r0<- self 10323 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10324 mov r2, r9 @ r2<- exception 10325 sub r1, rPC, r1 @ r1<- pc - method->insns 10326 mov r1, r1, asr #1 @ r1<- offset in code units 10327 10328 /* call, r0 gets catchRelPc (a code-unit offset) */ 10329 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10330 10331 /* fix earlier stack overflow if necessary; may trash rFP */ 10332 ldrb r1, [r10, #offThread_stackOverflowed] 10333 cmp r1, #0 @ did we overflow earlier? 10334 beq 1f @ no, skip ahead 10335 mov rFP, r0 @ save relPc result in rFP 10336 mov r0, r10 @ r0<- self 10337 mov r1, r9 @ r1<- exception 10338 bl dvmCleanupStackOverflow @ call(self) 10339 mov r0, rFP @ restore result 103401: 10341 10342 /* update frame pointer and check result from dvmFindCatchBlock */ 10343 ldr rFP, [sp, #4] @ retrieve the updated rFP 10344 cmp r0, #0 @ is catchRelPc < 0? 10345 add sp, sp, #8 @ restore stack 10346 bmi .LnotCaughtLocally 10347 10348 /* adjust locals to match self->curFrame and updated PC */ 10349 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10350 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10351 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10352 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10353 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10354 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10355 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10356 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10357 10358 /* release the tracked alloc on the exception */ 10359 mov r0, r9 @ r0<- exception 10360 mov r1, r10 @ r1<- self 10361 bl dvmReleaseTrackedAlloc @ release the exception 10362 10363 /* restore the exception if the handler wants it */ 10364 FETCH_INST() @ load rINST from rPC 10365 GET_INST_OPCODE(ip) @ extract opcode from rINST 10366 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10367 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10368 GOTO_OPCODE(ip) @ jump to next instruction 10369 10370.LnotCaughtLocally: @ r9=exception, r10=self 10371 /* fix stack overflow if necessary */ 10372 ldrb r1, [r10, #offThread_stackOverflowed] 10373 cmp r1, #0 @ did we overflow earlier? 10374 movne r0, r10 @ if yes: r0<- self 10375 movne r1, r9 @ if yes: r1<- exception 10376 blne dvmCleanupStackOverflow @ if yes: call(self) 10377 10378 @ may want to show "not caught locally" debug messages here 10379#if DVM_SHOW_EXCEPTION >= 2 10380 /* call __android_log_print(prio, tag, format, ...) */ 10381 /* "Exception %s from %s:%d not caught locally" */ 10382 @ dvmLineNumFromPC(method, pc - method->insns) 10383 ldr r0, [rGLUE, #offGlue_method] 10384 ldr r1, [r0, #offMethod_insns] 10385 sub r1, rPC, r1 10386 asr r1, r1, #1 10387 bl dvmLineNumFromPC 10388 str r0, [sp, #-4]! 10389 @ dvmGetMethodSourceFile(method) 10390 ldr r0, [rGLUE, #offGlue_method] 10391 bl dvmGetMethodSourceFile 10392 str r0, [sp, #-4]! 10393 @ exception->clazz->descriptor 10394 ldr r3, [r9, #offObject_clazz] 10395 ldr r3, [r3, #offClassObject_descriptor] 10396 @ 10397 ldr r2, strExceptionNotCaughtLocally 10398 ldr r1, strLogTag 10399 mov r0, #3 @ LOG_DEBUG 10400 bl __android_log_print 10401#endif 10402 str r9, [r10, #offThread_exception] @ restore exception 10403 mov r0, r9 @ r0<- exception 10404 mov r1, r10 @ r1<- self 10405 bl dvmReleaseTrackedAlloc @ release the exception 10406 mov r1, #0 @ "want switch" = false 10407 b common_gotoBail @ bail out 10408 10409 10410 /* 10411 * Exception handling, calls through "glue code". 10412 */ 10413 .if 0 10414.LexceptionOld: 10415 SAVE_PC_FP_TO_GLUE() @ export state 10416 mov r0, rGLUE @ arg to function 10417 bl dvmMterp_exceptionThrown 10418 b common_resumeAfterGlueCall 10419 .endif 10420 10421 10422/* 10423 * After returning from a "glued" function, pull out the updated 10424 * values and start executing at the next instruction. 10425 */ 10426common_resumeAfterGlueCall: 10427 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10428 FETCH_INST() @ load rINST from rPC 10429 GET_INST_OPCODE(ip) @ extract opcode from rINST 10430 GOTO_OPCODE(ip) @ jump to next instruction 10431 10432/* 10433 * Invalid array index. 10434 */ 10435common_errArrayIndex: 10436 EXPORT_PC() 10437 ldr r0, strArrayIndexException 10438 mov r1, #0 10439 bl dvmThrowException 10440 b common_exceptionThrown 10441 10442/* 10443 * Invalid array value. 10444 */ 10445common_errArrayStore: 10446 EXPORT_PC() 10447 ldr r0, strArrayStoreException 10448 mov r1, #0 10449 bl dvmThrowException 10450 b common_exceptionThrown 10451 10452/* 10453 * Integer divide or mod by zero. 10454 */ 10455common_errDivideByZero: 10456 EXPORT_PC() 10457 ldr r0, strArithmeticException 10458 ldr r1, strDivideByZero 10459 bl dvmThrowException 10460 b common_exceptionThrown 10461 10462/* 10463 * Attempt to allocate an array with a negative size. 10464 */ 10465common_errNegativeArraySize: 10466 EXPORT_PC() 10467 ldr r0, strNegativeArraySizeException 10468 mov r1, #0 10469 bl dvmThrowException 10470 b common_exceptionThrown 10471 10472/* 10473 * Invocation of a non-existent method. 10474 */ 10475common_errNoSuchMethod: 10476 EXPORT_PC() 10477 ldr r0, strNoSuchMethodError 10478 mov r1, #0 10479 bl dvmThrowException 10480 b common_exceptionThrown 10481 10482/* 10483 * We encountered a null object when we weren't expecting one. We 10484 * export the PC, throw a NullPointerException, and goto the exception 10485 * processing code. 10486 */ 10487common_errNullObject: 10488 EXPORT_PC() 10489 ldr r0, strNullPointerException 10490 mov r1, #0 10491 bl dvmThrowException 10492 b common_exceptionThrown 10493 10494/* 10495 * For debugging, cause an immediate fault. The source address will 10496 * be in lr (use a bl instruction to jump here). 10497 */ 10498common_abort: 10499 ldr pc, .LdeadFood 10500.LdeadFood: 10501 .word 0xdeadf00d 10502 10503/* 10504 * Spit out a "we were here", preserving all registers. (The attempt 10505 * to save ip won't work, but we need to save an even number of 10506 * registers for EABI 64-bit stack alignment.) 10507 */ 10508 .macro SQUEAK num 10509common_squeak\num: 10510 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10511 ldr r0, strSqueak 10512 mov r1, #\num 10513 bl printf 10514 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10515 bx lr 10516 .endm 10517 10518 SQUEAK 0 10519 SQUEAK 1 10520 SQUEAK 2 10521 SQUEAK 3 10522 SQUEAK 4 10523 SQUEAK 5 10524 10525/* 10526 * Spit out the number in r0, preserving registers. 10527 */ 10528common_printNum: 10529 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10530 mov r1, r0 10531 ldr r0, strSqueak 10532 bl printf 10533 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10534 bx lr 10535 10536/* 10537 * Print a newline, preserving registers. 10538 */ 10539common_printNewline: 10540 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10541 ldr r0, strNewline 10542 bl printf 10543 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10544 bx lr 10545 10546 /* 10547 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10548 */ 10549common_printHex: 10550 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10551 mov r1, r0 10552 ldr r0, strPrintHex 10553 bl printf 10554 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10555 bx lr 10556 10557/* 10558 * Print the 64-bit quantity in r0-r1, preserving registers. 10559 */ 10560common_printLong: 10561 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10562 mov r3, r1 10563 mov r2, r0 10564 ldr r0, strPrintLong 10565 bl printf 10566 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10567 bx lr 10568 10569/* 10570 * Print full method info. Pass the Method* in r0. Preserves regs. 10571 */ 10572common_printMethod: 10573 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10574 bl dvmMterpPrintMethod 10575 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10576 bx lr 10577 10578/* 10579 * Call a C helper function that dumps regs and possibly some 10580 * additional info. Requires the C function to be compiled in. 10581 */ 10582 .if 0 10583common_dumpRegs: 10584 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10585 bl dvmMterpDumpArmRegs 10586 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10587 bx lr 10588 .endif 10589 10590#if 0 10591/* 10592 * Experiment on VFP mode. 10593 * 10594 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10595 * 10596 * Updates the bits specified by "mask", setting them to the values in "val". 10597 */ 10598setFPSCR: 10599 and r0, r0, r1 @ make sure no stray bits are set 10600 fmrx r2, fpscr @ get VFP reg 10601 mvn r1, r1 @ bit-invert mask 10602 and r2, r2, r1 @ clear masked bits 10603 orr r2, r2, r0 @ set specified bits 10604 fmxr fpscr, r2 @ set VFP reg 10605 mov r0, r2 @ return new value 10606 bx lr 10607 10608 .align 2 10609 .global dvmConfigureFP 10610 .type dvmConfigureFP, %function 10611dvmConfigureFP: 10612 stmfd sp!, {ip, lr} 10613 /* 0x03000000 sets DN/FZ */ 10614 /* 0x00009f00 clears the six exception enable flags */ 10615 bl common_squeak0 10616 mov r0, #0x03000000 @ r0<- 0x03000000 10617 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10618 bl setFPSCR 10619 ldmfd sp!, {ip, pc} 10620#endif 10621 10622 10623/* 10624 * String references, must be close to the code that uses them. 10625 */ 10626 .align 2 10627strArithmeticException: 10628 .word .LstrArithmeticException 10629strArrayIndexException: 10630 .word .LstrArrayIndexException 10631strArrayStoreException: 10632 .word .LstrArrayStoreException 10633strDivideByZero: 10634 .word .LstrDivideByZero 10635strNegativeArraySizeException: 10636 .word .LstrNegativeArraySizeException 10637strNoSuchMethodError: 10638 .word .LstrNoSuchMethodError 10639strNullPointerException: 10640 .word .LstrNullPointerException 10641 10642strLogTag: 10643 .word .LstrLogTag 10644strExceptionNotCaughtLocally: 10645 .word .LstrExceptionNotCaughtLocally 10646 10647strNewline: 10648 .word .LstrNewline 10649strSqueak: 10650 .word .LstrSqueak 10651strPrintHex: 10652 .word .LstrPrintHex 10653strPrintLong: 10654 .word .LstrPrintLong 10655 10656/* 10657 * Zero-terminated ASCII string data. 10658 * 10659 * On ARM we have two choices: do like gcc does, and LDR from a .word 10660 * with the address, or use an ADR pseudo-op to get the address 10661 * directly. ADR saves 4 bytes and an indirection, but it's using a 10662 * PC-relative addressing mode and hence has a limited range, which 10663 * makes it not work well with mergeable string sections. 10664 */ 10665 .section .rodata.str1.4,"aMS",%progbits,1 10666 10667.LstrBadEntryPoint: 10668 .asciz "Bad entry point %d\n" 10669.LstrArithmeticException: 10670 .asciz "Ljava/lang/ArithmeticException;" 10671.LstrArrayIndexException: 10672 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10673.LstrArrayStoreException: 10674 .asciz "Ljava/lang/ArrayStoreException;" 10675.LstrClassCastException: 10676 .asciz "Ljava/lang/ClassCastException;" 10677.LstrDivideByZero: 10678 .asciz "divide by zero" 10679.LstrFilledNewArrayNotImpl: 10680 .asciz "filled-new-array only implemented for objects and 'int'" 10681.LstrInternalError: 10682 .asciz "Ljava/lang/InternalError;" 10683.LstrInstantiationError: 10684 .asciz "Ljava/lang/InstantiationError;" 10685.LstrNegativeArraySizeException: 10686 .asciz "Ljava/lang/NegativeArraySizeException;" 10687.LstrNoSuchMethodError: 10688 .asciz "Ljava/lang/NoSuchMethodError;" 10689.LstrNullPointerException: 10690 .asciz "Ljava/lang/NullPointerException;" 10691 10692.LstrLogTag: 10693 .asciz "mterp" 10694.LstrExceptionNotCaughtLocally: 10695 .asciz "Exception %s from %s:%d not caught locally\n" 10696 10697.LstrNewline: 10698 .asciz "\n" 10699.LstrSqueak: 10700 .asciz "<%d>" 10701.LstrPrintHex: 10702 .asciz "<0x%x>" 10703.LstrPrintLong: 10704 .asciz "<%lld>" 10705 10706 10707