1/* 2 * This file was generated automatically by gen-mterp.py for 'armv4t'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204#if defined(WITH_JIT) 205#include "../common/jit-config.h" 206#endif 207 208/* File: armv5te/platform.S */ 209/* 210 * =========================================================================== 211 * CPU-version-specific defines 212 * =========================================================================== 213 */ 214 215/* 216 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 217 * one-way branch. 218 * 219 * May modify IP. Does not modify LR. 220 */ 221.macro LDR_PC source 222 ldr pc, \source 223.endm 224 225/* 226 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 227 * Jump to subroutine. 228 * 229 * May modify IP and LR. 230 */ 231.macro LDR_PC_LR source 232 mov lr, pc 233 ldr pc, \source 234.endm 235 236/* 237 * Macro for "LDMFD SP!, {...regs...,PC}". 238 * 239 * May modify IP and LR. 240 */ 241.macro LDMFD_PC regs 242 ldmfd sp!, {\regs,pc} 243.endm 244 245 246/* File: armv5te/entry.S */ 247/* 248 * Copyright (C) 2008 The Android Open Source Project 249 * 250 * Licensed under the Apache License, Version 2.0 (the "License"); 251 * you may not use this file except in compliance with the License. 252 * You may obtain a copy of the License at 253 * 254 * http://www.apache.org/licenses/LICENSE-2.0 255 * 256 * Unless required by applicable law or agreed to in writing, software 257 * distributed under the License is distributed on an "AS IS" BASIS, 258 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 259 * See the License for the specific language governing permissions and 260 * limitations under the License. 261 */ 262/* 263 * Interpreter entry point. 264 */ 265 266/* 267 * We don't have formal stack frames, so gdb scans upward in the code 268 * to find the start of the function (a label with the %function type), 269 * and then looks at the next few instructions to figure out what 270 * got pushed onto the stack. From this it figures out how to restore 271 * the registers, including PC, for the previous stack frame. If gdb 272 * sees a non-function label, it stops scanning, so either we need to 273 * have nothing but assembler-local labels between the entry point and 274 * the break, or we need to fake it out. 275 * 276 * When this is defined, we add some stuff to make gdb less confused. 277 */ 278#define ASSIST_DEBUGGER 1 279 280 .text 281 .align 2 282 .global dvmMterpStdRun 283 .type dvmMterpStdRun, %function 284 285/* 286 * On entry: 287 * r0 MterpGlue* glue 288 * 289 * This function returns a boolean "changeInterp" value. The return comes 290 * via a call to dvmMterpStdBail(). 291 */ 292dvmMterpStdRun: 293#define MTERP_ENTRY1 \ 294 .save {r4-r10,fp,lr}; \ 295 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 296#define MTERP_ENTRY2 \ 297 .pad #4; \ 298 sub sp, sp, #4 @ align 64 299 300 .fnstart 301 MTERP_ENTRY1 302 MTERP_ENTRY2 303 304 /* save stack pointer, add magic word for debuggerd */ 305 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 306 307 /* set up "named" registers, figure out entry point */ 308 mov rGLUE, r0 @ set rGLUE 309 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 310 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 311 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 312 cmp r1, #kInterpEntryInstr @ usual case? 313 bne .Lnot_instr @ no, handle it 314 315#if defined(WITH_JIT) 316.LentryInstr: 317 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 318 /* Entry is always a possible trace start */ 319 GET_JIT_PROF_TABLE(r0) 320 FETCH_INST() 321 mov r1, #0 @ prepare the value for the new state 322 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 323 cmp r0,#0 324 bne common_updateProfile 325 GET_INST_OPCODE(ip) 326 GOTO_OPCODE(ip) 327#else 328 /* start executing the instruction at rPC */ 329 FETCH_INST() @ load rINST from rPC 330 GET_INST_OPCODE(ip) @ extract opcode from rINST 331 GOTO_OPCODE(ip) @ jump to next instruction 332#endif 333 334.Lnot_instr: 335 cmp r1, #kInterpEntryReturn @ were we returning from a method? 336 beq common_returnFromMethod 337 338.Lnot_return: 339 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 340 beq common_exceptionThrown 341 342#if defined(WITH_JIT) 343.Lnot_throw: 344 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 345 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 346 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 347 bne .Lbad_arg 348 cmp rPC,r2 349 bne .LentryInstr @ must have branched, don't resume 350#if defined(WITH_SELF_VERIFICATION) 351 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 352 b jitSVShadowRunStart @ re-enter the translation after the 353 @ single-stepped instruction 354 @noreturn 355#endif 356 mov r1, #kInterpEntryInstr 357 str r1, [rGLUE, #offGlue_entryPoint] 358 bx r10 @ re-enter the translation 359#endif 360 361.Lbad_arg: 362 ldr r0, strBadEntryPoint 363 @ r1 holds value of entryPoint 364 bl printf 365 bl dvmAbort 366 .fnend 367 368 369 .global dvmMterpStdBail 370 .type dvmMterpStdBail, %function 371 372/* 373 * Restore the stack pointer and PC from the save point established on entry. 374 * This is essentially the same as a longjmp, but should be cheaper. The 375 * last instruction causes us to return to whoever called dvmMterpStdRun. 376 * 377 * We pushed some registers on the stack in dvmMterpStdRun, then saved 378 * SP and LR. Here we restore SP, restore the registers, and then restore 379 * LR to PC. 380 * 381 * On entry: 382 * r0 MterpGlue* glue 383 * r1 bool changeInterp 384 */ 385dvmMterpStdBail: 386 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 387 mov r0, r1 @ return the changeInterp value 388 add sp, sp, #4 @ un-align 64 389 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 390 391 392/* 393 * String references. 394 */ 395strBadEntryPoint: 396 .word .LstrBadEntryPoint 397 398 399 400 .global dvmAsmInstructionStart 401 .type dvmAsmInstructionStart, %function 402dvmAsmInstructionStart = .L_OP_NOP 403 .text 404 405/* ------------------------------ */ 406 .balign 64 407.L_OP_NOP: /* 0x00 */ 408/* File: armv5te/OP_NOP.S */ 409 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 410 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 411 GOTO_OPCODE(ip) @ execute it 412 413#ifdef ASSIST_DEBUGGER 414 /* insert fake function header to help gdb find the stack frame */ 415 .type dalvik_inst, %function 416dalvik_inst: 417 .fnstart 418 MTERP_ENTRY1 419 MTERP_ENTRY2 420 .fnend 421#endif 422 423 424/* ------------------------------ */ 425 .balign 64 426.L_OP_MOVE: /* 0x01 */ 427/* File: armv5te/OP_MOVE.S */ 428 /* for move, move-object, long-to-int */ 429 /* op vA, vB */ 430 mov r1, rINST, lsr #12 @ r1<- B from 15:12 431 mov r0, rINST, lsr #8 @ r0<- A from 11:8 432 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 433 GET_VREG(r2, r1) @ r2<- fp[B] 434 and r0, r0, #15 435 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 436 SET_VREG(r2, r0) @ fp[A]<- r2 437 GOTO_OPCODE(ip) @ execute next instruction 438 439 440/* ------------------------------ */ 441 .balign 64 442.L_OP_MOVE_FROM16: /* 0x02 */ 443/* File: armv5te/OP_MOVE_FROM16.S */ 444 /* for: move/from16, move-object/from16 */ 445 /* op vAA, vBBBB */ 446 FETCH(r1, 1) @ r1<- BBBB 447 mov r0, rINST, lsr #8 @ r0<- AA 448 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 449 GET_VREG(r2, r1) @ r2<- fp[BBBB] 450 GET_INST_OPCODE(ip) @ extract opcode from rINST 451 SET_VREG(r2, r0) @ fp[AA]<- r2 452 GOTO_OPCODE(ip) @ jump to next instruction 453 454 455/* ------------------------------ */ 456 .balign 64 457.L_OP_MOVE_16: /* 0x03 */ 458/* File: armv5te/OP_MOVE_16.S */ 459 /* for: move/16, move-object/16 */ 460 /* op vAAAA, vBBBB */ 461 FETCH(r1, 2) @ r1<- BBBB 462 FETCH(r0, 1) @ r0<- AAAA 463 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 464 GET_VREG(r2, r1) @ r2<- fp[BBBB] 465 GET_INST_OPCODE(ip) @ extract opcode from rINST 466 SET_VREG(r2, r0) @ fp[AAAA]<- r2 467 GOTO_OPCODE(ip) @ jump to next instruction 468 469 470/* ------------------------------ */ 471 .balign 64 472.L_OP_MOVE_WIDE: /* 0x04 */ 473/* File: armv5te/OP_MOVE_WIDE.S */ 474 /* move-wide vA, vB */ 475 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 476 mov r2, rINST, lsr #8 @ r2<- A(+) 477 mov r3, rINST, lsr #12 @ r3<- B 478 and r2, r2, #15 479 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 480 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 481 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 482 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 483 GET_INST_OPCODE(ip) @ extract opcode from rINST 484 stmia r2, {r0-r1} @ fp[A]<- r0/r1 485 GOTO_OPCODE(ip) @ jump to next instruction 486 487 488/* ------------------------------ */ 489 .balign 64 490.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 491/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 492 /* move-wide/from16 vAA, vBBBB */ 493 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 494 FETCH(r3, 1) @ r3<- BBBB 495 mov r2, rINST, lsr #8 @ r2<- AA 496 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 497 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 498 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 500 GET_INST_OPCODE(ip) @ extract opcode from rINST 501 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 502 GOTO_OPCODE(ip) @ jump to next instruction 503 504 505/* ------------------------------ */ 506 .balign 64 507.L_OP_MOVE_WIDE_16: /* 0x06 */ 508/* File: armv5te/OP_MOVE_WIDE_16.S */ 509 /* move-wide/16 vAAAA, vBBBB */ 510 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 511 FETCH(r3, 2) @ r3<- BBBB 512 FETCH(r2, 1) @ r2<- AAAA 513 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 514 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 515 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 516 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 517 GET_INST_OPCODE(ip) @ extract opcode from rINST 518 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 519 GOTO_OPCODE(ip) @ jump to next instruction 520 521 522/* ------------------------------ */ 523 .balign 64 524.L_OP_MOVE_OBJECT: /* 0x07 */ 525/* File: armv5te/OP_MOVE_OBJECT.S */ 526/* File: armv5te/OP_MOVE.S */ 527 /* for move, move-object, long-to-int */ 528 /* op vA, vB */ 529 mov r1, rINST, lsr #12 @ r1<- B from 15:12 530 mov r0, rINST, lsr #8 @ r0<- A from 11:8 531 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 532 GET_VREG(r2, r1) @ r2<- fp[B] 533 and r0, r0, #15 534 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 535 SET_VREG(r2, r0) @ fp[A]<- r2 536 GOTO_OPCODE(ip) @ execute next instruction 537 538 539 540/* ------------------------------ */ 541 .balign 64 542.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 543/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 544/* File: armv5te/OP_MOVE_FROM16.S */ 545 /* for: move/from16, move-object/from16 */ 546 /* op vAA, vBBBB */ 547 FETCH(r1, 1) @ r1<- BBBB 548 mov r0, rINST, lsr #8 @ r0<- AA 549 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 550 GET_VREG(r2, r1) @ r2<- fp[BBBB] 551 GET_INST_OPCODE(ip) @ extract opcode from rINST 552 SET_VREG(r2, r0) @ fp[AA]<- r2 553 GOTO_OPCODE(ip) @ jump to next instruction 554 555 556 557/* ------------------------------ */ 558 .balign 64 559.L_OP_MOVE_OBJECT_16: /* 0x09 */ 560/* File: armv5te/OP_MOVE_OBJECT_16.S */ 561/* File: armv5te/OP_MOVE_16.S */ 562 /* for: move/16, move-object/16 */ 563 /* op vAAAA, vBBBB */ 564 FETCH(r1, 2) @ r1<- BBBB 565 FETCH(r0, 1) @ r0<- AAAA 566 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 567 GET_VREG(r2, r1) @ r2<- fp[BBBB] 568 GET_INST_OPCODE(ip) @ extract opcode from rINST 569 SET_VREG(r2, r0) @ fp[AAAA]<- r2 570 GOTO_OPCODE(ip) @ jump to next instruction 571 572 573 574/* ------------------------------ */ 575 .balign 64 576.L_OP_MOVE_RESULT: /* 0x0a */ 577/* File: armv5te/OP_MOVE_RESULT.S */ 578 /* for: move-result, move-result-object */ 579 /* op vAA */ 580 mov r2, rINST, lsr #8 @ r2<- AA 581 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 582 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 583 GET_INST_OPCODE(ip) @ extract opcode from rINST 584 SET_VREG(r0, r2) @ fp[AA]<- r0 585 GOTO_OPCODE(ip) @ jump to next instruction 586 587 588/* ------------------------------ */ 589 .balign 64 590.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 591/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 592 /* move-result-wide vAA */ 593 mov r2, rINST, lsr #8 @ r2<- AA 594 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 595 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 596 ldmia r3, {r0-r1} @ r0/r1<- retval.j 597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 598 GET_INST_OPCODE(ip) @ extract opcode from rINST 599 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 600 GOTO_OPCODE(ip) @ jump to next instruction 601 602 603/* ------------------------------ */ 604 .balign 64 605.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 606/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 607/* File: armv5te/OP_MOVE_RESULT.S */ 608 /* for: move-result, move-result-object */ 609 /* op vAA */ 610 mov r2, rINST, lsr #8 @ r2<- AA 611 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 612 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 613 GET_INST_OPCODE(ip) @ extract opcode from rINST 614 SET_VREG(r0, r2) @ fp[AA]<- r0 615 GOTO_OPCODE(ip) @ jump to next instruction 616 617 618 619/* ------------------------------ */ 620 .balign 64 621.L_OP_MOVE_EXCEPTION: /* 0x0d */ 622/* File: armv5te/OP_MOVE_EXCEPTION.S */ 623 /* move-exception vAA */ 624 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 625 mov r2, rINST, lsr #8 @ r2<- AA 626 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 627 mov r1, #0 @ r1<- 0 628 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 629 SET_VREG(r3, r2) @ fp[AA]<- exception obj 630 GET_INST_OPCODE(ip) @ extract opcode from rINST 631 str r1, [r0, #offThread_exception] @ dvmClearException bypass 632 GOTO_OPCODE(ip) @ jump to next instruction 633 634 635/* ------------------------------ */ 636 .balign 64 637.L_OP_RETURN_VOID: /* 0x0e */ 638/* File: armv5te/OP_RETURN_VOID.S */ 639 b common_returnFromMethod 640 641 642/* ------------------------------ */ 643 .balign 64 644.L_OP_RETURN: /* 0x0f */ 645/* File: armv5te/OP_RETURN.S */ 646 /* 647 * Return a 32-bit value. Copies the return value into the "glue" 648 * structure, then jumps to the return handler. 649 * 650 * for: return, return-object 651 */ 652 /* op vAA */ 653 mov r2, rINST, lsr #8 @ r2<- AA 654 GET_VREG(r0, r2) @ r0<- vAA 655 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 656 b common_returnFromMethod 657 658 659/* ------------------------------ */ 660 .balign 64 661.L_OP_RETURN_WIDE: /* 0x10 */ 662/* File: armv5te/OP_RETURN_WIDE.S */ 663 /* 664 * Return a 64-bit value. Copies the return value into the "glue" 665 * structure, then jumps to the return handler. 666 */ 667 /* return-wide vAA */ 668 mov r2, rINST, lsr #8 @ r2<- AA 669 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 670 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 671 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 672 stmia r3, {r0-r1} @ retval<- r0/r1 673 b common_returnFromMethod 674 675 676/* ------------------------------ */ 677 .balign 64 678.L_OP_RETURN_OBJECT: /* 0x11 */ 679/* File: armv5te/OP_RETURN_OBJECT.S */ 680/* File: armv5te/OP_RETURN.S */ 681 /* 682 * Return a 32-bit value. Copies the return value into the "glue" 683 * structure, then jumps to the return handler. 684 * 685 * for: return, return-object 686 */ 687 /* op vAA */ 688 mov r2, rINST, lsr #8 @ r2<- AA 689 GET_VREG(r0, r2) @ r0<- vAA 690 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 691 b common_returnFromMethod 692 693 694 695/* ------------------------------ */ 696 .balign 64 697.L_OP_CONST_4: /* 0x12 */ 698/* File: armv5te/OP_CONST_4.S */ 699 /* const/4 vA, #+B */ 700 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 701 mov r0, rINST, lsr #8 @ r0<- A+ 702 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 703 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 704 and r0, r0, #15 705 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 706 SET_VREG(r1, r0) @ fp[A]<- r1 707 GOTO_OPCODE(ip) @ execute next instruction 708 709 710/* ------------------------------ */ 711 .balign 64 712.L_OP_CONST_16: /* 0x13 */ 713/* File: armv5te/OP_CONST_16.S */ 714 /* const/16 vAA, #+BBBB */ 715 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 716 mov r3, rINST, lsr #8 @ r3<- AA 717 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 718 SET_VREG(r0, r3) @ vAA<- r0 719 GET_INST_OPCODE(ip) @ extract opcode from rINST 720 GOTO_OPCODE(ip) @ jump to next instruction 721 722 723/* ------------------------------ */ 724 .balign 64 725.L_OP_CONST: /* 0x14 */ 726/* File: armv5te/OP_CONST.S */ 727 /* const vAA, #+BBBBbbbb */ 728 mov r3, rINST, lsr #8 @ r3<- AA 729 FETCH(r0, 1) @ r0<- bbbb (low) 730 FETCH(r1, 2) @ r1<- BBBB (high) 731 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 732 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 733 GET_INST_OPCODE(ip) @ extract opcode from rINST 734 SET_VREG(r0, r3) @ vAA<- r0 735 GOTO_OPCODE(ip) @ jump to next instruction 736 737 738/* ------------------------------ */ 739 .balign 64 740.L_OP_CONST_HIGH16: /* 0x15 */ 741/* File: armv5te/OP_CONST_HIGH16.S */ 742 /* const/high16 vAA, #+BBBB0000 */ 743 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 744 mov r3, rINST, lsr #8 @ r3<- AA 745 mov r0, r0, lsl #16 @ r0<- BBBB0000 746 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 747 SET_VREG(r0, r3) @ vAA<- r0 748 GET_INST_OPCODE(ip) @ extract opcode from rINST 749 GOTO_OPCODE(ip) @ jump to next instruction 750 751 752/* ------------------------------ */ 753 .balign 64 754.L_OP_CONST_WIDE_16: /* 0x16 */ 755/* File: armv5te/OP_CONST_WIDE_16.S */ 756 /* const-wide/16 vAA, #+BBBB */ 757 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 758 mov r3, rINST, lsr #8 @ r3<- AA 759 mov r1, r0, asr #31 @ r1<- ssssssss 760 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 761 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 762 GET_INST_OPCODE(ip) @ extract opcode from rINST 763 stmia r3, {r0-r1} @ vAA<- r0/r1 764 GOTO_OPCODE(ip) @ jump to next instruction 765 766 767/* ------------------------------ */ 768 .balign 64 769.L_OP_CONST_WIDE_32: /* 0x17 */ 770/* File: armv5te/OP_CONST_WIDE_32.S */ 771 /* const-wide/32 vAA, #+BBBBbbbb */ 772 FETCH(r0, 1) @ r0<- 0000bbbb (low) 773 mov r3, rINST, lsr #8 @ r3<- AA 774 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 775 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 776 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 777 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 778 mov r1, r0, asr #31 @ r1<- ssssssss 779 GET_INST_OPCODE(ip) @ extract opcode from rINST 780 stmia r3, {r0-r1} @ vAA<- r0/r1 781 GOTO_OPCODE(ip) @ jump to next instruction 782 783 784/* ------------------------------ */ 785 .balign 64 786.L_OP_CONST_WIDE: /* 0x18 */ 787/* File: armv5te/OP_CONST_WIDE.S */ 788 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 789 FETCH(r0, 1) @ r0<- bbbb (low) 790 FETCH(r1, 2) @ r1<- BBBB (low middle) 791 FETCH(r2, 3) @ r2<- hhhh (high middle) 792 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 793 FETCH(r3, 4) @ r3<- HHHH (high) 794 mov r9, rINST, lsr #8 @ r9<- AA 795 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 796 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 797 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 798 GET_INST_OPCODE(ip) @ extract opcode from rINST 799 stmia r9, {r0-r1} @ vAA<- r0/r1 800 GOTO_OPCODE(ip) @ jump to next instruction 801 802 803/* ------------------------------ */ 804 .balign 64 805.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 806/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 807 /* const-wide/high16 vAA, #+BBBB000000000000 */ 808 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 809 mov r3, rINST, lsr #8 @ r3<- AA 810 mov r0, #0 @ r0<- 00000000 811 mov r1, r1, lsl #16 @ r1<- BBBB0000 812 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 813 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 814 GET_INST_OPCODE(ip) @ extract opcode from rINST 815 stmia r3, {r0-r1} @ vAA<- r0/r1 816 GOTO_OPCODE(ip) @ jump to next instruction 817 818 819/* ------------------------------ */ 820 .balign 64 821.L_OP_CONST_STRING: /* 0x1a */ 822/* File: armv5te/OP_CONST_STRING.S */ 823 /* const/string vAA, String@BBBB */ 824 FETCH(r1, 1) @ r1<- BBBB 825 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 826 mov r9, rINST, lsr #8 @ r9<- AA 827 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 828 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 829 cmp r0, #0 @ not yet resolved? 830 beq .LOP_CONST_STRING_resolve 831 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 832 GET_INST_OPCODE(ip) @ extract opcode from rINST 833 SET_VREG(r0, r9) @ vAA<- r0 834 GOTO_OPCODE(ip) @ jump to next instruction 835 836/* ------------------------------ */ 837 .balign 64 838.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 839/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 840 /* const/string vAA, String@BBBBBBBB */ 841 FETCH(r0, 1) @ r0<- bbbb (low) 842 FETCH(r1, 2) @ r1<- BBBB (high) 843 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 844 mov r9, rINST, lsr #8 @ r9<- AA 845 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 846 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 847 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 848 cmp r0, #0 849 beq .LOP_CONST_STRING_JUMBO_resolve 850 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 851 GET_INST_OPCODE(ip) @ extract opcode from rINST 852 SET_VREG(r0, r9) @ vAA<- r0 853 GOTO_OPCODE(ip) @ jump to next instruction 854 855/* ------------------------------ */ 856 .balign 64 857.L_OP_CONST_CLASS: /* 0x1c */ 858/* File: armv5te/OP_CONST_CLASS.S */ 859 /* const/class vAA, Class@BBBB */ 860 FETCH(r1, 1) @ r1<- BBBB 861 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 862 mov r9, rINST, lsr #8 @ r9<- AA 863 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 864 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 865 cmp r0, #0 @ not yet resolved? 866 beq .LOP_CONST_CLASS_resolve 867 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 868 GET_INST_OPCODE(ip) @ extract opcode from rINST 869 SET_VREG(r0, r9) @ vAA<- r0 870 GOTO_OPCODE(ip) @ jump to next instruction 871 872/* ------------------------------ */ 873 .balign 64 874.L_OP_MONITOR_ENTER: /* 0x1d */ 875/* File: armv5te/OP_MONITOR_ENTER.S */ 876 /* 877 * Synchronize on an object. 878 */ 879 /* monitor-enter vAA */ 880 mov r2, rINST, lsr #8 @ r2<- AA 881 GET_VREG(r1, r2) @ r1<- vAA (object) 882 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 883 cmp r1, #0 @ null object? 884 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 885 beq common_errNullObject @ null object, throw an exception 886 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 887 bl dvmLockObject @ call(self, obj) 888#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 889 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 890 ldr r1, [r0, #offThread_exception] @ check for exception 891 cmp r1, #0 892 bne common_exceptionThrown @ exception raised, bail out 893#endif 894 GET_INST_OPCODE(ip) @ extract opcode from rINST 895 GOTO_OPCODE(ip) @ jump to next instruction 896 897 898/* ------------------------------ */ 899 .balign 64 900.L_OP_MONITOR_EXIT: /* 0x1e */ 901/* File: armv5te/OP_MONITOR_EXIT.S */ 902 /* 903 * Unlock an object. 904 * 905 * Exceptions that occur when unlocking a monitor need to appear as 906 * if they happened at the following instruction. See the Dalvik 907 * instruction spec. 908 */ 909 /* monitor-exit vAA */ 910 mov r2, rINST, lsr #8 @ r2<- AA 911 EXPORT_PC() @ before fetch: export the PC 912 GET_VREG(r1, r2) @ r1<- vAA (object) 913 cmp r1, #0 @ null object? 914 beq 1f @ yes 915 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 916 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 917 cmp r0, #0 @ failed? 918 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 919 beq common_exceptionThrown @ yes, exception is pending 920 GET_INST_OPCODE(ip) @ extract opcode from rINST 921 GOTO_OPCODE(ip) @ jump to next instruction 9221: 923 FETCH_ADVANCE_INST(1) @ advance before throw 924 b common_errNullObject 925 926 927/* ------------------------------ */ 928 .balign 64 929.L_OP_CHECK_CAST: /* 0x1f */ 930/* File: armv5te/OP_CHECK_CAST.S */ 931 /* 932 * Check to see if a cast from one class to another is allowed. 933 */ 934 /* check-cast vAA, class@BBBB */ 935 mov r3, rINST, lsr #8 @ r3<- AA 936 FETCH(r2, 1) @ r2<- BBBB 937 GET_VREG(r9, r3) @ r9<- object 938 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 939 cmp r9, #0 @ is object null? 940 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 941 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 942 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 943 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 944 cmp r1, #0 @ have we resolved this before? 945 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 946.LOP_CHECK_CAST_resolved: 947 cmp r0, r1 @ same class (trivial success)? 948 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 949.LOP_CHECK_CAST_okay: 950 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 951 GET_INST_OPCODE(ip) @ extract opcode from rINST 952 GOTO_OPCODE(ip) @ jump to next instruction 953 954/* ------------------------------ */ 955 .balign 64 956.L_OP_INSTANCE_OF: /* 0x20 */ 957/* File: armv5te/OP_INSTANCE_OF.S */ 958 /* 959 * Check to see if an object reference is an instance of a class. 960 * 961 * Most common situation is a non-null object, being compared against 962 * an already-resolved class. 963 */ 964 /* instance-of vA, vB, class@CCCC */ 965 mov r3, rINST, lsr #12 @ r3<- B 966 mov r9, rINST, lsr #8 @ r9<- A+ 967 GET_VREG(r0, r3) @ r0<- vB (object) 968 and r9, r9, #15 @ r9<- A 969 cmp r0, #0 @ is object null? 970 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 971 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 972 FETCH(r3, 1) @ r3<- CCCC 973 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 974 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 975 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 976 cmp r1, #0 @ have we resolved this before? 977 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 978.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 979 cmp r0, r1 @ same class (trivial success)? 980 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 981 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 982 983/* ------------------------------ */ 984 .balign 64 985.L_OP_ARRAY_LENGTH: /* 0x21 */ 986/* File: armv5te/OP_ARRAY_LENGTH.S */ 987 /* 988 * Return the length of an array. 989 */ 990 mov r1, rINST, lsr #12 @ r1<- B 991 mov r2, rINST, lsr #8 @ r2<- A+ 992 GET_VREG(r0, r1) @ r0<- vB (object ref) 993 and r2, r2, #15 @ r2<- A 994 cmp r0, #0 @ is object null? 995 beq common_errNullObject @ yup, fail 996 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 997 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 998 GET_INST_OPCODE(ip) @ extract opcode from rINST 999 SET_VREG(r3, r2) @ vB<- length 1000 GOTO_OPCODE(ip) @ jump to next instruction 1001 1002 1003/* ------------------------------ */ 1004 .balign 64 1005.L_OP_NEW_INSTANCE: /* 0x22 */ 1006/* File: armv5te/OP_NEW_INSTANCE.S */ 1007 /* 1008 * Create a new instance of a class. 1009 */ 1010 /* new-instance vAA, class@BBBB */ 1011 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1012 FETCH(r1, 1) @ r1<- BBBB 1013 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1014 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1015 EXPORT_PC() @ req'd for init, resolve, alloc 1016 cmp r0, #0 @ already resolved? 1017 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1018.LOP_NEW_INSTANCE_resolved: @ r0=class 1019 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1020 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1021 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1022.LOP_NEW_INSTANCE_initialized: @ r0=class 1023 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1024 bl dvmAllocObject @ r0<- new object 1025 b .LOP_NEW_INSTANCE_finish @ continue 1026 1027/* ------------------------------ */ 1028 .balign 64 1029.L_OP_NEW_ARRAY: /* 0x23 */ 1030/* File: armv5te/OP_NEW_ARRAY.S */ 1031 /* 1032 * Allocate an array of objects, specified with the array class 1033 * and a count. 1034 * 1035 * The verifier guarantees that this is an array class, so we don't 1036 * check for it here. 1037 */ 1038 /* new-array vA, vB, class@CCCC */ 1039 mov r0, rINST, lsr #12 @ r0<- B 1040 FETCH(r2, 1) @ r2<- CCCC 1041 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1042 GET_VREG(r1, r0) @ r1<- vB (array length) 1043 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1044 cmp r1, #0 @ check length 1045 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1046 bmi common_errNegativeArraySize @ negative length, bail 1047 cmp r0, #0 @ already resolved? 1048 EXPORT_PC() @ req'd for resolve, alloc 1049 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1050 b .LOP_NEW_ARRAY_resolve @ do resolve now 1051 1052/* ------------------------------ */ 1053 .balign 64 1054.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1055/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1056 /* 1057 * Create a new array with elements filled from registers. 1058 * 1059 * for: filled-new-array, filled-new-array/range 1060 */ 1061 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1062 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1063 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1064 FETCH(r1, 1) @ r1<- BBBB 1065 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1066 EXPORT_PC() @ need for resolve and alloc 1067 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1068 mov r10, rINST, lsr #8 @ r10<- AA or BA 1069 cmp r0, #0 @ already resolved? 1070 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10718: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1072 mov r2, #0 @ r2<- false 1073 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1074 bl dvmResolveClass @ r0<- call(clazz, ref) 1075 cmp r0, #0 @ got null? 1076 beq common_exceptionThrown @ yes, handle exception 1077 b .LOP_FILLED_NEW_ARRAY_continue 1078 1079/* ------------------------------ */ 1080 .balign 64 1081.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1082/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1083/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1084 /* 1085 * Create a new array with elements filled from registers. 1086 * 1087 * for: filled-new-array, filled-new-array/range 1088 */ 1089 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1090 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1091 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1092 FETCH(r1, 1) @ r1<- BBBB 1093 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1094 EXPORT_PC() @ need for resolve and alloc 1095 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1096 mov r10, rINST, lsr #8 @ r10<- AA or BA 1097 cmp r0, #0 @ already resolved? 1098 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10998: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1100 mov r2, #0 @ r2<- false 1101 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1102 bl dvmResolveClass @ r0<- call(clazz, ref) 1103 cmp r0, #0 @ got null? 1104 beq common_exceptionThrown @ yes, handle exception 1105 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1106 1107 1108/* ------------------------------ */ 1109 .balign 64 1110.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1111/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1112 /* fill-array-data vAA, +BBBBBBBB */ 1113 FETCH(r0, 1) @ r0<- bbbb (lo) 1114 FETCH(r1, 2) @ r1<- BBBB (hi) 1115 mov r3, rINST, lsr #8 @ r3<- AA 1116 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1117 GET_VREG(r0, r3) @ r0<- vAA (array object) 1118 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1119 EXPORT_PC(); 1120 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1121 cmp r0, #0 @ 0 means an exception is thrown 1122 beq common_exceptionThrown @ has exception 1123 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1124 GET_INST_OPCODE(ip) @ extract opcode from rINST 1125 GOTO_OPCODE(ip) @ jump to next instruction 1126 1127/* ------------------------------ */ 1128 .balign 64 1129.L_OP_THROW: /* 0x27 */ 1130/* File: armv5te/OP_THROW.S */ 1131 /* 1132 * Throw an exception object in the current thread. 1133 */ 1134 /* throw vAA */ 1135 mov r2, rINST, lsr #8 @ r2<- AA 1136 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1137 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1138 cmp r1, #0 @ null object? 1139 beq common_errNullObject @ yes, throw an NPE instead 1140 @ bypass dvmSetException, just store it 1141 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1142 b common_exceptionThrown 1143 1144 1145/* ------------------------------ */ 1146 .balign 64 1147.L_OP_GOTO: /* 0x28 */ 1148/* File: armv5te/OP_GOTO.S */ 1149 /* 1150 * Unconditional branch, 8-bit offset. 1151 * 1152 * The branch distance is a signed code-unit offset, which we need to 1153 * double to get a byte offset. 1154 */ 1155 /* goto +AA */ 1156 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1157 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1158 mov r9, r9, lsl #1 @ r9<- byte offset 1159 bmi common_backwardBranch @ backward branch, do periodic checks 1160#if defined(WITH_JIT) 1161 GET_JIT_PROF_TABLE(r0) 1162 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1163 cmp r0,#0 1164 bne common_updateProfile 1165 GET_INST_OPCODE(ip) @ extract opcode from rINST 1166 GOTO_OPCODE(ip) @ jump to next instruction 1167#else 1168 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1169 GET_INST_OPCODE(ip) @ extract opcode from rINST 1170 GOTO_OPCODE(ip) @ jump to next instruction 1171#endif 1172 1173/* ------------------------------ */ 1174 .balign 64 1175.L_OP_GOTO_16: /* 0x29 */ 1176/* File: armv5te/OP_GOTO_16.S */ 1177 /* 1178 * Unconditional branch, 16-bit offset. 1179 * 1180 * The branch distance is a signed code-unit offset, which we need to 1181 * double to get a byte offset. 1182 */ 1183 /* goto/16 +AAAA */ 1184 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1185 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1186 bmi common_backwardBranch @ backward branch, do periodic checks 1187#if defined(WITH_JIT) 1188 GET_JIT_PROF_TABLE(r0) 1189 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1190 cmp r0,#0 1191 bne common_updateProfile 1192 GET_INST_OPCODE(ip) @ extract opcode from rINST 1193 GOTO_OPCODE(ip) @ jump to next instruction 1194#else 1195 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1196 GET_INST_OPCODE(ip) @ extract opcode from rINST 1197 GOTO_OPCODE(ip) @ jump to next instruction 1198#endif 1199 1200 1201/* ------------------------------ */ 1202 .balign 64 1203.L_OP_GOTO_32: /* 0x2a */ 1204/* File: armv5te/OP_GOTO_32.S */ 1205 /* 1206 * Unconditional branch, 32-bit offset. 1207 * 1208 * The branch distance is a signed code-unit offset, which we need to 1209 * double to get a byte offset. 1210 * 1211 * Unlike most opcodes, this one is allowed to branch to itself, so 1212 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1213 * instruction doesn't affect the V flag, so we need to clear it 1214 * explicitly. 1215 */ 1216 /* goto/32 +AAAAAAAA */ 1217 FETCH(r0, 1) @ r0<- aaaa (lo) 1218 FETCH(r1, 2) @ r1<- AAAA (hi) 1219 cmp ip, ip @ (clear V flag during stall) 1220 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1221 mov r9, r0, asl #1 @ r9<- byte offset 1222 ble common_backwardBranch @ backward branch, do periodic checks 1223#if defined(WITH_JIT) 1224 GET_JIT_PROF_TABLE(r0) 1225 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1226 cmp r0,#0 1227 bne common_updateProfile 1228 GET_INST_OPCODE(ip) @ extract opcode from rINST 1229 GOTO_OPCODE(ip) @ jump to next instruction 1230#else 1231 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1232 GET_INST_OPCODE(ip) @ extract opcode from rINST 1233 GOTO_OPCODE(ip) @ jump to next instruction 1234#endif 1235 1236/* ------------------------------ */ 1237 .balign 64 1238.L_OP_PACKED_SWITCH: /* 0x2b */ 1239/* File: armv5te/OP_PACKED_SWITCH.S */ 1240 /* 1241 * Handle a packed-switch or sparse-switch instruction. In both cases 1242 * we decode it and hand it off to a helper function. 1243 * 1244 * We don't really expect backward branches in a switch statement, but 1245 * they're perfectly legal, so we check for them here. 1246 * 1247 * for: packed-switch, sparse-switch 1248 */ 1249 /* op vAA, +BBBB */ 1250 FETCH(r0, 1) @ r0<- bbbb (lo) 1251 FETCH(r1, 2) @ r1<- BBBB (hi) 1252 mov r3, rINST, lsr #8 @ r3<- AA 1253 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1254 GET_VREG(r1, r3) @ r1<- vAA 1255 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1256 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1257 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1258 bmi common_backwardBranch @ backward branch, do periodic checks 1259 beq common_backwardBranch @ (want to use BLE but V is unknown) 1260#if defined(WITH_JIT) 1261 GET_JIT_PROF_TABLE(r0) 1262 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1263 cmp r0,#0 1264 bne common_updateProfile 1265 GET_INST_OPCODE(ip) @ extract opcode from rINST 1266 GOTO_OPCODE(ip) @ jump to next instruction 1267#else 1268 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1269 GET_INST_OPCODE(ip) @ extract opcode from rINST 1270 GOTO_OPCODE(ip) @ jump to next instruction 1271#endif 1272 1273 1274/* ------------------------------ */ 1275 .balign 64 1276.L_OP_SPARSE_SWITCH: /* 0x2c */ 1277/* File: armv5te/OP_SPARSE_SWITCH.S */ 1278/* File: armv5te/OP_PACKED_SWITCH.S */ 1279 /* 1280 * Handle a packed-switch or sparse-switch instruction. In both cases 1281 * we decode it and hand it off to a helper function. 1282 * 1283 * We don't really expect backward branches in a switch statement, but 1284 * they're perfectly legal, so we check for them here. 1285 * 1286 * for: packed-switch, sparse-switch 1287 */ 1288 /* op vAA, +BBBB */ 1289 FETCH(r0, 1) @ r0<- bbbb (lo) 1290 FETCH(r1, 2) @ r1<- BBBB (hi) 1291 mov r3, rINST, lsr #8 @ r3<- AA 1292 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1293 GET_VREG(r1, r3) @ r1<- vAA 1294 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1295 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1296 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1297 bmi common_backwardBranch @ backward branch, do periodic checks 1298 beq common_backwardBranch @ (want to use BLE but V is unknown) 1299#if defined(WITH_JIT) 1300 GET_JIT_PROF_TABLE(r0) 1301 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1302 cmp r0,#0 1303 bne common_updateProfile 1304 GET_INST_OPCODE(ip) @ extract opcode from rINST 1305 GOTO_OPCODE(ip) @ jump to next instruction 1306#else 1307 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1308 GET_INST_OPCODE(ip) @ extract opcode from rINST 1309 GOTO_OPCODE(ip) @ jump to next instruction 1310#endif 1311 1312 1313 1314/* ------------------------------ */ 1315 .balign 64 1316.L_OP_CMPL_FLOAT: /* 0x2d */ 1317/* File: armv5te/OP_CMPL_FLOAT.S */ 1318 /* 1319 * Compare two floating-point values. Puts 0, 1, or -1 into the 1320 * destination register based on the results of the comparison. 1321 * 1322 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1323 * on what value we'd like to return when one of the operands is NaN. 1324 * 1325 * The operation we're implementing is: 1326 * if (x == y) 1327 * return 0; 1328 * else if (x < y) 1329 * return -1; 1330 * else if (x > y) 1331 * return 1; 1332 * else 1333 * return {-1,1}; // one or both operands was NaN 1334 * 1335 * The straightforward implementation requires 3 calls to functions 1336 * that return a result in r0. We can do it with two calls if our 1337 * EABI library supports __aeabi_cfcmple (only one if we want to check 1338 * for NaN directly): 1339 * check x <= y 1340 * if <, return -1 1341 * if ==, return 0 1342 * check y <= x 1343 * if <, return 1 1344 * return {-1,1} 1345 * 1346 * for: cmpl-float, cmpg-float 1347 */ 1348 /* op vAA, vBB, vCC */ 1349 FETCH(r0, 1) @ r0<- CCBB 1350 and r2, r0, #255 @ r2<- BB 1351 mov r3, r0, lsr #8 @ r3<- CC 1352 GET_VREG(r9, r2) @ r9<- vBB 1353 GET_VREG(r10, r3) @ r10<- vCC 1354 mov r0, r9 @ copy to arg registers 1355 mov r1, r10 1356 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1357 bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1358 mvncc r1, #0 @ (less than) r1<- -1 1359 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1360.LOP_CMPL_FLOAT_finish: 1361 mov r3, rINST, lsr #8 @ r3<- AA 1362 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1363 SET_VREG(r1, r3) @ vAA<- r1 1364 GET_INST_OPCODE(ip) @ extract opcode from rINST 1365 GOTO_OPCODE(ip) @ jump to next instruction 1366 1367/* ------------------------------ */ 1368 .balign 64 1369.L_OP_CMPG_FLOAT: /* 0x2e */ 1370/* File: armv5te/OP_CMPG_FLOAT.S */ 1371/* File: armv5te/OP_CMPL_FLOAT.S */ 1372 /* 1373 * Compare two floating-point values. Puts 0, 1, or -1 into the 1374 * destination register based on the results of the comparison. 1375 * 1376 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1377 * on what value we'd like to return when one of the operands is NaN. 1378 * 1379 * The operation we're implementing is: 1380 * if (x == y) 1381 * return 0; 1382 * else if (x < y) 1383 * return -1; 1384 * else if (x > y) 1385 * return 1; 1386 * else 1387 * return {-1,1}; // one or both operands was NaN 1388 * 1389 * The straightforward implementation requires 3 calls to functions 1390 * that return a result in r0. We can do it with two calls if our 1391 * EABI library supports __aeabi_cfcmple (only one if we want to check 1392 * for NaN directly): 1393 * check x <= y 1394 * if <, return -1 1395 * if ==, return 0 1396 * check y <= x 1397 * if <, return 1 1398 * return {-1,1} 1399 * 1400 * for: cmpl-float, cmpg-float 1401 */ 1402 /* op vAA, vBB, vCC */ 1403 FETCH(r0, 1) @ r0<- CCBB 1404 and r2, r0, #255 @ r2<- BB 1405 mov r3, r0, lsr #8 @ r3<- CC 1406 GET_VREG(r9, r2) @ r9<- vBB 1407 GET_VREG(r10, r3) @ r10<- vCC 1408 mov r0, r9 @ copy to arg registers 1409 mov r1, r10 1410 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1411 bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1412 mvncc r1, #0 @ (less than) r1<- -1 1413 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1414.LOP_CMPG_FLOAT_finish: 1415 mov r3, rINST, lsr #8 @ r3<- AA 1416 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1417 SET_VREG(r1, r3) @ vAA<- r1 1418 GET_INST_OPCODE(ip) @ extract opcode from rINST 1419 GOTO_OPCODE(ip) @ jump to next instruction 1420 1421 1422/* ------------------------------ */ 1423 .balign 64 1424.L_OP_CMPL_DOUBLE: /* 0x2f */ 1425/* File: armv5te/OP_CMPL_DOUBLE.S */ 1426 /* 1427 * Compare two floating-point values. Puts 0, 1, or -1 into the 1428 * destination register based on the results of the comparison. 1429 * 1430 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1431 * on what value we'd like to return when one of the operands is NaN. 1432 * 1433 * See OP_CMPL_FLOAT for an explanation. 1434 * 1435 * For: cmpl-double, cmpg-double 1436 */ 1437 /* op vAA, vBB, vCC */ 1438 FETCH(r0, 1) @ r0<- CCBB 1439 and r9, r0, #255 @ r9<- BB 1440 mov r10, r0, lsr #8 @ r10<- CC 1441 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1442 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1443 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1444 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1445 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1446 bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1447 mvncc r1, #0 @ (less than) r1<- -1 1448 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1449.LOP_CMPL_DOUBLE_finish: 1450 mov r3, rINST, lsr #8 @ r3<- AA 1451 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1452 SET_VREG(r1, r3) @ vAA<- r1 1453 GET_INST_OPCODE(ip) @ extract opcode from rINST 1454 GOTO_OPCODE(ip) @ jump to next instruction 1455 1456/* ------------------------------ */ 1457 .balign 64 1458.L_OP_CMPG_DOUBLE: /* 0x30 */ 1459/* File: armv5te/OP_CMPG_DOUBLE.S */ 1460/* File: armv5te/OP_CMPL_DOUBLE.S */ 1461 /* 1462 * Compare two floating-point values. Puts 0, 1, or -1 into the 1463 * destination register based on the results of the comparison. 1464 * 1465 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1466 * on what value we'd like to return when one of the operands is NaN. 1467 * 1468 * See OP_CMPL_FLOAT for an explanation. 1469 * 1470 * For: cmpl-double, cmpg-double 1471 */ 1472 /* op vAA, vBB, vCC */ 1473 FETCH(r0, 1) @ r0<- CCBB 1474 and r9, r0, #255 @ r9<- BB 1475 mov r10, r0, lsr #8 @ r10<- CC 1476 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1477 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1478 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1479 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1480 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1481 bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1482 mvncc r1, #0 @ (less than) r1<- -1 1483 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1484.LOP_CMPG_DOUBLE_finish: 1485 mov r3, rINST, lsr #8 @ r3<- AA 1486 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1487 SET_VREG(r1, r3) @ vAA<- r1 1488 GET_INST_OPCODE(ip) @ extract opcode from rINST 1489 GOTO_OPCODE(ip) @ jump to next instruction 1490 1491 1492/* ------------------------------ */ 1493 .balign 64 1494.L_OP_CMP_LONG: /* 0x31 */ 1495/* File: armv5te/OP_CMP_LONG.S */ 1496 /* 1497 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1498 * register based on the results of the comparison. 1499 * 1500 * We load the full values with LDM, but in practice many values could 1501 * be resolved by only looking at the high word. This could be made 1502 * faster or slower by splitting the LDM into a pair of LDRs. 1503 * 1504 * If we just wanted to set condition flags, we could do this: 1505 * subs ip, r0, r2 1506 * sbcs ip, r1, r3 1507 * subeqs ip, r0, r2 1508 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1509 * integer value, which we can do with 2 conditional mov/mvn instructions 1510 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1511 * us a constant 5-cycle path plus a branch at the end to the 1512 * instruction epilogue code. The multi-compare approach below needs 1513 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1514 * in the worst case (the 64-bit values are equal). 1515 */ 1516 /* cmp-long vAA, vBB, vCC */ 1517 FETCH(r0, 1) @ r0<- CCBB 1518 mov r9, rINST, lsr #8 @ r9<- AA 1519 and r2, r0, #255 @ r2<- BB 1520 mov r3, r0, lsr #8 @ r3<- CC 1521 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1522 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1523 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1524 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1525 cmp r1, r3 @ compare (vBB+1, vCC+1) 1526 blt .LOP_CMP_LONG_less @ signed compare on high part 1527 bgt .LOP_CMP_LONG_greater 1528 subs r1, r0, r2 @ r1<- r0 - r2 1529 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1530 bne .LOP_CMP_LONG_less 1531 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1532 1533/* ------------------------------ */ 1534 .balign 64 1535.L_OP_IF_EQ: /* 0x32 */ 1536/* File: armv5te/OP_IF_EQ.S */ 1537/* File: armv5te/bincmp.S */ 1538 /* 1539 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1540 * fragment that specifies the *reverse* comparison to perform, e.g. 1541 * for "if-le" you would use "gt". 1542 * 1543 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1544 */ 1545 /* if-cmp vA, vB, +CCCC */ 1546 mov r0, rINST, lsr #8 @ r0<- A+ 1547 mov r1, rINST, lsr #12 @ r1<- B 1548 and r0, r0, #15 1549 GET_VREG(r3, r1) @ r3<- vB 1550 GET_VREG(r2, r0) @ r2<- vA 1551 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1552 cmp r2, r3 @ compare (vA, vB) 1553 bne 1f @ branch to 1 if comparison failed 1554 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1555 movs r9, r9, asl #1 @ convert to bytes, check sign 1556 bmi common_backwardBranch @ yes, do periodic checks 15571: 1558#if defined(WITH_JIT) 1559 GET_JIT_PROF_TABLE(r0) 1560 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1561 b common_testUpdateProfile 1562#else 1563 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1564 GET_INST_OPCODE(ip) @ extract opcode from rINST 1565 GOTO_OPCODE(ip) @ jump to next instruction 1566#endif 1567 1568 1569 1570/* ------------------------------ */ 1571 .balign 64 1572.L_OP_IF_NE: /* 0x33 */ 1573/* File: armv5te/OP_IF_NE.S */ 1574/* File: armv5te/bincmp.S */ 1575 /* 1576 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1577 * fragment that specifies the *reverse* comparison to perform, e.g. 1578 * for "if-le" you would use "gt". 1579 * 1580 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1581 */ 1582 /* if-cmp vA, vB, +CCCC */ 1583 mov r0, rINST, lsr #8 @ r0<- A+ 1584 mov r1, rINST, lsr #12 @ r1<- B 1585 and r0, r0, #15 1586 GET_VREG(r3, r1) @ r3<- vB 1587 GET_VREG(r2, r0) @ r2<- vA 1588 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1589 cmp r2, r3 @ compare (vA, vB) 1590 beq 1f @ branch to 1 if comparison failed 1591 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1592 movs r9, r9, asl #1 @ convert to bytes, check sign 1593 bmi common_backwardBranch @ yes, do periodic checks 15941: 1595#if defined(WITH_JIT) 1596 GET_JIT_PROF_TABLE(r0) 1597 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1598 b common_testUpdateProfile 1599#else 1600 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1601 GET_INST_OPCODE(ip) @ extract opcode from rINST 1602 GOTO_OPCODE(ip) @ jump to next instruction 1603#endif 1604 1605 1606 1607/* ------------------------------ */ 1608 .balign 64 1609.L_OP_IF_LT: /* 0x34 */ 1610/* File: armv5te/OP_IF_LT.S */ 1611/* File: armv5te/bincmp.S */ 1612 /* 1613 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1614 * fragment that specifies the *reverse* comparison to perform, e.g. 1615 * for "if-le" you would use "gt". 1616 * 1617 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1618 */ 1619 /* if-cmp vA, vB, +CCCC */ 1620 mov r0, rINST, lsr #8 @ r0<- A+ 1621 mov r1, rINST, lsr #12 @ r1<- B 1622 and r0, r0, #15 1623 GET_VREG(r3, r1) @ r3<- vB 1624 GET_VREG(r2, r0) @ r2<- vA 1625 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1626 cmp r2, r3 @ compare (vA, vB) 1627 bge 1f @ branch to 1 if comparison failed 1628 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1629 movs r9, r9, asl #1 @ convert to bytes, check sign 1630 bmi common_backwardBranch @ yes, do periodic checks 16311: 1632#if defined(WITH_JIT) 1633 GET_JIT_PROF_TABLE(r0) 1634 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1635 b common_testUpdateProfile 1636#else 1637 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1638 GET_INST_OPCODE(ip) @ extract opcode from rINST 1639 GOTO_OPCODE(ip) @ jump to next instruction 1640#endif 1641 1642 1643 1644/* ------------------------------ */ 1645 .balign 64 1646.L_OP_IF_GE: /* 0x35 */ 1647/* File: armv5te/OP_IF_GE.S */ 1648/* File: armv5te/bincmp.S */ 1649 /* 1650 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1651 * fragment that specifies the *reverse* comparison to perform, e.g. 1652 * for "if-le" you would use "gt". 1653 * 1654 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1655 */ 1656 /* if-cmp vA, vB, +CCCC */ 1657 mov r0, rINST, lsr #8 @ r0<- A+ 1658 mov r1, rINST, lsr #12 @ r1<- B 1659 and r0, r0, #15 1660 GET_VREG(r3, r1) @ r3<- vB 1661 GET_VREG(r2, r0) @ r2<- vA 1662 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1663 cmp r2, r3 @ compare (vA, vB) 1664 blt 1f @ branch to 1 if comparison failed 1665 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1666 movs r9, r9, asl #1 @ convert to bytes, check sign 1667 bmi common_backwardBranch @ yes, do periodic checks 16681: 1669#if defined(WITH_JIT) 1670 GET_JIT_PROF_TABLE(r0) 1671 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1672 b common_testUpdateProfile 1673#else 1674 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1675 GET_INST_OPCODE(ip) @ extract opcode from rINST 1676 GOTO_OPCODE(ip) @ jump to next instruction 1677#endif 1678 1679 1680 1681/* ------------------------------ */ 1682 .balign 64 1683.L_OP_IF_GT: /* 0x36 */ 1684/* File: armv5te/OP_IF_GT.S */ 1685/* File: armv5te/bincmp.S */ 1686 /* 1687 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1688 * fragment that specifies the *reverse* comparison to perform, e.g. 1689 * for "if-le" you would use "gt". 1690 * 1691 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1692 */ 1693 /* if-cmp vA, vB, +CCCC */ 1694 mov r0, rINST, lsr #8 @ r0<- A+ 1695 mov r1, rINST, lsr #12 @ r1<- B 1696 and r0, r0, #15 1697 GET_VREG(r3, r1) @ r3<- vB 1698 GET_VREG(r2, r0) @ r2<- vA 1699 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1700 cmp r2, r3 @ compare (vA, vB) 1701 ble 1f @ branch to 1 if comparison failed 1702 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1703 movs r9, r9, asl #1 @ convert to bytes, check sign 1704 bmi common_backwardBranch @ yes, do periodic checks 17051: 1706#if defined(WITH_JIT) 1707 GET_JIT_PROF_TABLE(r0) 1708 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1709 b common_testUpdateProfile 1710#else 1711 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1712 GET_INST_OPCODE(ip) @ extract opcode from rINST 1713 GOTO_OPCODE(ip) @ jump to next instruction 1714#endif 1715 1716 1717 1718/* ------------------------------ */ 1719 .balign 64 1720.L_OP_IF_LE: /* 0x37 */ 1721/* File: armv5te/OP_IF_LE.S */ 1722/* File: armv5te/bincmp.S */ 1723 /* 1724 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1725 * fragment that specifies the *reverse* comparison to perform, e.g. 1726 * for "if-le" you would use "gt". 1727 * 1728 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1729 */ 1730 /* if-cmp vA, vB, +CCCC */ 1731 mov r0, rINST, lsr #8 @ r0<- A+ 1732 mov r1, rINST, lsr #12 @ r1<- B 1733 and r0, r0, #15 1734 GET_VREG(r3, r1) @ r3<- vB 1735 GET_VREG(r2, r0) @ r2<- vA 1736 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1737 cmp r2, r3 @ compare (vA, vB) 1738 bgt 1f @ branch to 1 if comparison failed 1739 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1740 movs r9, r9, asl #1 @ convert to bytes, check sign 1741 bmi common_backwardBranch @ yes, do periodic checks 17421: 1743#if defined(WITH_JIT) 1744 GET_JIT_PROF_TABLE(r0) 1745 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1746 b common_testUpdateProfile 1747#else 1748 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1749 GET_INST_OPCODE(ip) @ extract opcode from rINST 1750 GOTO_OPCODE(ip) @ jump to next instruction 1751#endif 1752 1753 1754 1755/* ------------------------------ */ 1756 .balign 64 1757.L_OP_IF_EQZ: /* 0x38 */ 1758/* File: armv5te/OP_IF_EQZ.S */ 1759/* File: armv5te/zcmp.S */ 1760 /* 1761 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1762 * fragment that specifies the *reverse* comparison to perform, e.g. 1763 * for "if-le" you would use "gt". 1764 * 1765 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1766 */ 1767 /* if-cmp vAA, +BBBB */ 1768 mov r0, rINST, lsr #8 @ r0<- AA 1769 GET_VREG(r2, r0) @ r2<- vAA 1770 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1771 cmp r2, #0 @ compare (vA, 0) 1772 bne 1f @ branch to 1 if comparison failed 1773 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1774 movs r9, r9, asl #1 @ convert to bytes, check sign 1775 bmi common_backwardBranch @ backward branch, do periodic checks 17761: 1777#if defined(WITH_JIT) 1778 GET_JIT_PROF_TABLE(r0) 1779 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1780 cmp r0,#0 1781 bne common_updateProfile 1782 GET_INST_OPCODE(ip) @ extract opcode from rINST 1783 GOTO_OPCODE(ip) @ jump to next instruction 1784#else 1785 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1786 GET_INST_OPCODE(ip) @ extract opcode from rINST 1787 GOTO_OPCODE(ip) @ jump to next instruction 1788#endif 1789 1790 1791 1792/* ------------------------------ */ 1793 .balign 64 1794.L_OP_IF_NEZ: /* 0x39 */ 1795/* File: armv5te/OP_IF_NEZ.S */ 1796/* File: armv5te/zcmp.S */ 1797 /* 1798 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1799 * fragment that specifies the *reverse* comparison to perform, e.g. 1800 * for "if-le" you would use "gt". 1801 * 1802 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1803 */ 1804 /* if-cmp vAA, +BBBB */ 1805 mov r0, rINST, lsr #8 @ r0<- AA 1806 GET_VREG(r2, r0) @ r2<- vAA 1807 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1808 cmp r2, #0 @ compare (vA, 0) 1809 beq 1f @ branch to 1 if comparison failed 1810 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1811 movs r9, r9, asl #1 @ convert to bytes, check sign 1812 bmi common_backwardBranch @ backward branch, do periodic checks 18131: 1814#if defined(WITH_JIT) 1815 GET_JIT_PROF_TABLE(r0) 1816 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1817 cmp r0,#0 1818 bne common_updateProfile 1819 GET_INST_OPCODE(ip) @ extract opcode from rINST 1820 GOTO_OPCODE(ip) @ jump to next instruction 1821#else 1822 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1823 GET_INST_OPCODE(ip) @ extract opcode from rINST 1824 GOTO_OPCODE(ip) @ jump to next instruction 1825#endif 1826 1827 1828 1829/* ------------------------------ */ 1830 .balign 64 1831.L_OP_IF_LTZ: /* 0x3a */ 1832/* File: armv5te/OP_IF_LTZ.S */ 1833/* File: armv5te/zcmp.S */ 1834 /* 1835 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1836 * fragment that specifies the *reverse* comparison to perform, e.g. 1837 * for "if-le" you would use "gt". 1838 * 1839 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1840 */ 1841 /* if-cmp vAA, +BBBB */ 1842 mov r0, rINST, lsr #8 @ r0<- AA 1843 GET_VREG(r2, r0) @ r2<- vAA 1844 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1845 cmp r2, #0 @ compare (vA, 0) 1846 bge 1f @ branch to 1 if comparison failed 1847 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1848 movs r9, r9, asl #1 @ convert to bytes, check sign 1849 bmi common_backwardBranch @ backward branch, do periodic checks 18501: 1851#if defined(WITH_JIT) 1852 GET_JIT_PROF_TABLE(r0) 1853 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1854 cmp r0,#0 1855 bne common_updateProfile 1856 GET_INST_OPCODE(ip) @ extract opcode from rINST 1857 GOTO_OPCODE(ip) @ jump to next instruction 1858#else 1859 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1860 GET_INST_OPCODE(ip) @ extract opcode from rINST 1861 GOTO_OPCODE(ip) @ jump to next instruction 1862#endif 1863 1864 1865 1866/* ------------------------------ */ 1867 .balign 64 1868.L_OP_IF_GEZ: /* 0x3b */ 1869/* File: armv5te/OP_IF_GEZ.S */ 1870/* File: armv5te/zcmp.S */ 1871 /* 1872 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1873 * fragment that specifies the *reverse* comparison to perform, e.g. 1874 * for "if-le" you would use "gt". 1875 * 1876 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1877 */ 1878 /* if-cmp vAA, +BBBB */ 1879 mov r0, rINST, lsr #8 @ r0<- AA 1880 GET_VREG(r2, r0) @ r2<- vAA 1881 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1882 cmp r2, #0 @ compare (vA, 0) 1883 blt 1f @ branch to 1 if comparison failed 1884 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1885 movs r9, r9, asl #1 @ convert to bytes, check sign 1886 bmi common_backwardBranch @ backward branch, do periodic checks 18871: 1888#if defined(WITH_JIT) 1889 GET_JIT_PROF_TABLE(r0) 1890 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1891 cmp r0,#0 1892 bne common_updateProfile 1893 GET_INST_OPCODE(ip) @ extract opcode from rINST 1894 GOTO_OPCODE(ip) @ jump to next instruction 1895#else 1896 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1897 GET_INST_OPCODE(ip) @ extract opcode from rINST 1898 GOTO_OPCODE(ip) @ jump to next instruction 1899#endif 1900 1901 1902 1903/* ------------------------------ */ 1904 .balign 64 1905.L_OP_IF_GTZ: /* 0x3c */ 1906/* File: armv5te/OP_IF_GTZ.S */ 1907/* File: armv5te/zcmp.S */ 1908 /* 1909 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1910 * fragment that specifies the *reverse* comparison to perform, e.g. 1911 * for "if-le" you would use "gt". 1912 * 1913 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1914 */ 1915 /* if-cmp vAA, +BBBB */ 1916 mov r0, rINST, lsr #8 @ r0<- AA 1917 GET_VREG(r2, r0) @ r2<- vAA 1918 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1919 cmp r2, #0 @ compare (vA, 0) 1920 ble 1f @ branch to 1 if comparison failed 1921 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1922 movs r9, r9, asl #1 @ convert to bytes, check sign 1923 bmi common_backwardBranch @ backward branch, do periodic checks 19241: 1925#if defined(WITH_JIT) 1926 GET_JIT_PROF_TABLE(r0) 1927 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1928 cmp r0,#0 1929 bne common_updateProfile 1930 GET_INST_OPCODE(ip) @ extract opcode from rINST 1931 GOTO_OPCODE(ip) @ jump to next instruction 1932#else 1933 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1934 GET_INST_OPCODE(ip) @ extract opcode from rINST 1935 GOTO_OPCODE(ip) @ jump to next instruction 1936#endif 1937 1938 1939 1940/* ------------------------------ */ 1941 .balign 64 1942.L_OP_IF_LEZ: /* 0x3d */ 1943/* File: armv5te/OP_IF_LEZ.S */ 1944/* File: armv5te/zcmp.S */ 1945 /* 1946 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1947 * fragment that specifies the *reverse* comparison to perform, e.g. 1948 * for "if-le" you would use "gt". 1949 * 1950 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1951 */ 1952 /* if-cmp vAA, +BBBB */ 1953 mov r0, rINST, lsr #8 @ r0<- AA 1954 GET_VREG(r2, r0) @ r2<- vAA 1955 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1956 cmp r2, #0 @ compare (vA, 0) 1957 bgt 1f @ branch to 1 if comparison failed 1958 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1959 movs r9, r9, asl #1 @ convert to bytes, check sign 1960 bmi common_backwardBranch @ backward branch, do periodic checks 19611: 1962#if defined(WITH_JIT) 1963 GET_JIT_PROF_TABLE(r0) 1964 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1965 cmp r0,#0 1966 bne common_updateProfile 1967 GET_INST_OPCODE(ip) @ extract opcode from rINST 1968 GOTO_OPCODE(ip) @ jump to next instruction 1969#else 1970 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1971 GET_INST_OPCODE(ip) @ extract opcode from rINST 1972 GOTO_OPCODE(ip) @ jump to next instruction 1973#endif 1974 1975 1976 1977/* ------------------------------ */ 1978 .balign 64 1979.L_OP_UNUSED_3E: /* 0x3e */ 1980/* File: armv5te/OP_UNUSED_3E.S */ 1981/* File: armv5te/unused.S */ 1982 bl common_abort 1983 1984 1985 1986/* ------------------------------ */ 1987 .balign 64 1988.L_OP_UNUSED_3F: /* 0x3f */ 1989/* File: armv5te/OP_UNUSED_3F.S */ 1990/* File: armv5te/unused.S */ 1991 bl common_abort 1992 1993 1994 1995/* ------------------------------ */ 1996 .balign 64 1997.L_OP_UNUSED_40: /* 0x40 */ 1998/* File: armv5te/OP_UNUSED_40.S */ 1999/* File: armv5te/unused.S */ 2000 bl common_abort 2001 2002 2003 2004/* ------------------------------ */ 2005 .balign 64 2006.L_OP_UNUSED_41: /* 0x41 */ 2007/* File: armv5te/OP_UNUSED_41.S */ 2008/* File: armv5te/unused.S */ 2009 bl common_abort 2010 2011 2012 2013/* ------------------------------ */ 2014 .balign 64 2015.L_OP_UNUSED_42: /* 0x42 */ 2016/* File: armv5te/OP_UNUSED_42.S */ 2017/* File: armv5te/unused.S */ 2018 bl common_abort 2019 2020 2021 2022/* ------------------------------ */ 2023 .balign 64 2024.L_OP_UNUSED_43: /* 0x43 */ 2025/* File: armv5te/OP_UNUSED_43.S */ 2026/* File: armv5te/unused.S */ 2027 bl common_abort 2028 2029 2030 2031/* ------------------------------ */ 2032 .balign 64 2033.L_OP_AGET: /* 0x44 */ 2034/* File: armv5te/OP_AGET.S */ 2035 /* 2036 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2037 * 2038 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2039 * instructions. We use a pair of FETCH_Bs instead. 2040 * 2041 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2042 */ 2043 /* op vAA, vBB, vCC */ 2044 FETCH_B(r2, 1, 0) @ r2<- BB 2045 mov r9, rINST, lsr #8 @ r9<- AA 2046 FETCH_B(r3, 1, 1) @ r3<- CC 2047 GET_VREG(r0, r2) @ r0<- vBB (array object) 2048 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2049 cmp r0, #0 @ null array object? 2050 beq common_errNullObject @ yes, bail 2051 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2052 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2053 cmp r1, r3 @ compare unsigned index, length 2054 bcs common_errArrayIndex @ index >= length, bail 2055 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2056 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2057 GET_INST_OPCODE(ip) @ extract opcode from rINST 2058 SET_VREG(r2, r9) @ vAA<- r2 2059 GOTO_OPCODE(ip) @ jump to next instruction 2060 2061 2062/* ------------------------------ */ 2063 .balign 64 2064.L_OP_AGET_WIDE: /* 0x45 */ 2065/* File: armv4t/OP_AGET_WIDE.S */ 2066 /* 2067 * Array get, 64 bits. vAA <- vBB[vCC]. 2068 * 2069 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2070 */ 2071 /* aget-wide vAA, vBB, vCC */ 2072 FETCH(r0, 1) @ r0<- CCBB 2073 mov r9, rINST, lsr #8 @ r9<- AA 2074 and r2, r0, #255 @ r2<- BB 2075 mov r3, r0, lsr #8 @ r3<- CC 2076 GET_VREG(r0, r2) @ r0<- vBB (array object) 2077 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2078 cmp r0, #0 @ null array object? 2079 beq common_errNullObject @ yes, bail 2080 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2081 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2082 cmp r1, r3 @ compare unsigned index, length 2083 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2084 b common_errArrayIndex @ index >= length, bail 2085 @ May want to swap the order of these two branches depending on how the 2086 @ branch prediction (if any) handles conditional forward branches vs. 2087 @ unconditional forward branches. 2088 2089/* ------------------------------ */ 2090 .balign 64 2091.L_OP_AGET_OBJECT: /* 0x46 */ 2092/* File: armv5te/OP_AGET_OBJECT.S */ 2093/* File: armv5te/OP_AGET.S */ 2094 /* 2095 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2096 * 2097 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2098 * instructions. We use a pair of FETCH_Bs instead. 2099 * 2100 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2101 */ 2102 /* op vAA, vBB, vCC */ 2103 FETCH_B(r2, 1, 0) @ r2<- BB 2104 mov r9, rINST, lsr #8 @ r9<- AA 2105 FETCH_B(r3, 1, 1) @ r3<- CC 2106 GET_VREG(r0, r2) @ r0<- vBB (array object) 2107 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2108 cmp r0, #0 @ null array object? 2109 beq common_errNullObject @ yes, bail 2110 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2111 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2112 cmp r1, r3 @ compare unsigned index, length 2113 bcs common_errArrayIndex @ index >= length, bail 2114 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2115 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2116 GET_INST_OPCODE(ip) @ extract opcode from rINST 2117 SET_VREG(r2, r9) @ vAA<- r2 2118 GOTO_OPCODE(ip) @ jump to next instruction 2119 2120 2121 2122/* ------------------------------ */ 2123 .balign 64 2124.L_OP_AGET_BOOLEAN: /* 0x47 */ 2125/* File: armv5te/OP_AGET_BOOLEAN.S */ 2126/* File: armv5te/OP_AGET.S */ 2127 /* 2128 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2129 * 2130 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2131 * instructions. We use a pair of FETCH_Bs instead. 2132 * 2133 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2134 */ 2135 /* op vAA, vBB, vCC */ 2136 FETCH_B(r2, 1, 0) @ r2<- BB 2137 mov r9, rINST, lsr #8 @ r9<- AA 2138 FETCH_B(r3, 1, 1) @ r3<- CC 2139 GET_VREG(r0, r2) @ r0<- vBB (array object) 2140 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2141 cmp r0, #0 @ null array object? 2142 beq common_errNullObject @ yes, bail 2143 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2144 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2145 cmp r1, r3 @ compare unsigned index, length 2146 bcs common_errArrayIndex @ index >= length, bail 2147 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2148 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2149 GET_INST_OPCODE(ip) @ extract opcode from rINST 2150 SET_VREG(r2, r9) @ vAA<- r2 2151 GOTO_OPCODE(ip) @ jump to next instruction 2152 2153 2154 2155/* ------------------------------ */ 2156 .balign 64 2157.L_OP_AGET_BYTE: /* 0x48 */ 2158/* File: armv5te/OP_AGET_BYTE.S */ 2159/* File: armv5te/OP_AGET.S */ 2160 /* 2161 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2162 * 2163 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2164 * instructions. We use a pair of FETCH_Bs instead. 2165 * 2166 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2167 */ 2168 /* op vAA, vBB, vCC */ 2169 FETCH_B(r2, 1, 0) @ r2<- BB 2170 mov r9, rINST, lsr #8 @ r9<- AA 2171 FETCH_B(r3, 1, 1) @ r3<- CC 2172 GET_VREG(r0, r2) @ r0<- vBB (array object) 2173 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2174 cmp r0, #0 @ null array object? 2175 beq common_errNullObject @ yes, bail 2176 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2177 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2178 cmp r1, r3 @ compare unsigned index, length 2179 bcs common_errArrayIndex @ index >= length, bail 2180 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2181 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2182 GET_INST_OPCODE(ip) @ extract opcode from rINST 2183 SET_VREG(r2, r9) @ vAA<- r2 2184 GOTO_OPCODE(ip) @ jump to next instruction 2185 2186 2187 2188/* ------------------------------ */ 2189 .balign 64 2190.L_OP_AGET_CHAR: /* 0x49 */ 2191/* File: armv5te/OP_AGET_CHAR.S */ 2192/* File: armv5te/OP_AGET.S */ 2193 /* 2194 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2195 * 2196 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2197 * instructions. We use a pair of FETCH_Bs instead. 2198 * 2199 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2200 */ 2201 /* op vAA, vBB, vCC */ 2202 FETCH_B(r2, 1, 0) @ r2<- BB 2203 mov r9, rINST, lsr #8 @ r9<- AA 2204 FETCH_B(r3, 1, 1) @ r3<- CC 2205 GET_VREG(r0, r2) @ r0<- vBB (array object) 2206 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2207 cmp r0, #0 @ null array object? 2208 beq common_errNullObject @ yes, bail 2209 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2210 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2211 cmp r1, r3 @ compare unsigned index, length 2212 bcs common_errArrayIndex @ index >= length, bail 2213 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2214 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2215 GET_INST_OPCODE(ip) @ extract opcode from rINST 2216 SET_VREG(r2, r9) @ vAA<- r2 2217 GOTO_OPCODE(ip) @ jump to next instruction 2218 2219 2220 2221/* ------------------------------ */ 2222 .balign 64 2223.L_OP_AGET_SHORT: /* 0x4a */ 2224/* File: armv5te/OP_AGET_SHORT.S */ 2225/* File: armv5te/OP_AGET.S */ 2226 /* 2227 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2228 * 2229 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2230 * instructions. We use a pair of FETCH_Bs instead. 2231 * 2232 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2233 */ 2234 /* op vAA, vBB, vCC */ 2235 FETCH_B(r2, 1, 0) @ r2<- BB 2236 mov r9, rINST, lsr #8 @ r9<- AA 2237 FETCH_B(r3, 1, 1) @ r3<- CC 2238 GET_VREG(r0, r2) @ r0<- vBB (array object) 2239 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2240 cmp r0, #0 @ null array object? 2241 beq common_errNullObject @ yes, bail 2242 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2243 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2244 cmp r1, r3 @ compare unsigned index, length 2245 bcs common_errArrayIndex @ index >= length, bail 2246 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2247 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2248 GET_INST_OPCODE(ip) @ extract opcode from rINST 2249 SET_VREG(r2, r9) @ vAA<- r2 2250 GOTO_OPCODE(ip) @ jump to next instruction 2251 2252 2253 2254/* ------------------------------ */ 2255 .balign 64 2256.L_OP_APUT: /* 0x4b */ 2257/* File: armv5te/OP_APUT.S */ 2258 /* 2259 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2260 * 2261 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2262 * instructions. We use a pair of FETCH_Bs instead. 2263 * 2264 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2265 */ 2266 /* op vAA, vBB, vCC */ 2267 FETCH_B(r2, 1, 0) @ r2<- BB 2268 mov r9, rINST, lsr #8 @ r9<- AA 2269 FETCH_B(r3, 1, 1) @ r3<- CC 2270 GET_VREG(r0, r2) @ r0<- vBB (array object) 2271 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2272 cmp r0, #0 @ null array object? 2273 beq common_errNullObject @ yes, bail 2274 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2275 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2276 cmp r1, r3 @ compare unsigned index, length 2277 bcs common_errArrayIndex @ index >= length, bail 2278 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2279 GET_VREG(r2, r9) @ r2<- vAA 2280 GET_INST_OPCODE(ip) @ extract opcode from rINST 2281 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2282 GOTO_OPCODE(ip) @ jump to next instruction 2283 2284 2285/* ------------------------------ */ 2286 .balign 64 2287.L_OP_APUT_WIDE: /* 0x4c */ 2288/* File: armv4t/OP_APUT_WIDE.S */ 2289 /* 2290 * Array put, 64 bits. vBB[vCC] <- vAA. 2291 */ 2292 /* aput-wide vAA, vBB, vCC */ 2293 FETCH(r0, 1) @ r0<- CCBB 2294 mov r9, rINST, lsr #8 @ r9<- AA 2295 and r2, r0, #255 @ r2<- BB 2296 mov r3, r0, lsr #8 @ r3<- CC 2297 GET_VREG(r0, r2) @ r0<- vBB (array object) 2298 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2299 cmp r0, #0 @ null array object? 2300 beq common_errNullObject @ yes, bail 2301 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2302 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2303 cmp r1, r3 @ compare unsigned index, length 2304 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2305 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2306 b common_errArrayIndex @ index >= length, bail 2307 @ May want to swap the order of these two branches depending on how the 2308 @ branch prediction (if any) handles conditional forward branches vs. 2309 @ unconditional forward branches. 2310 2311/* ------------------------------ */ 2312 .balign 64 2313.L_OP_APUT_OBJECT: /* 0x4d */ 2314/* File: armv5te/OP_APUT_OBJECT.S */ 2315 /* 2316 * Store an object into an array. vBB[vCC] <- vAA. 2317 * 2318 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2319 * instructions. We use a pair of FETCH_Bs instead. 2320 */ 2321 /* op vAA, vBB, vCC */ 2322 FETCH(r0, 1) @ r0<- CCBB 2323 mov r9, rINST, lsr #8 @ r9<- AA 2324 and r2, r0, #255 @ r2<- BB 2325 mov r3, r0, lsr #8 @ r3<- CC 2326 GET_VREG(r1, r2) @ r1<- vBB (array object) 2327 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2328 cmp r1, #0 @ null array object? 2329 GET_VREG(r9, r9) @ r9<- vAA 2330 beq common_errNullObject @ yes, bail 2331 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2332 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2333 cmp r0, r3 @ compare unsigned index, length 2334 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2335 b common_errArrayIndex @ index >= length, bail 2336 2337 2338/* ------------------------------ */ 2339 .balign 64 2340.L_OP_APUT_BOOLEAN: /* 0x4e */ 2341/* File: armv5te/OP_APUT_BOOLEAN.S */ 2342/* File: armv5te/OP_APUT.S */ 2343 /* 2344 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2345 * 2346 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2347 * instructions. We use a pair of FETCH_Bs instead. 2348 * 2349 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2350 */ 2351 /* op vAA, vBB, vCC */ 2352 FETCH_B(r2, 1, 0) @ r2<- BB 2353 mov r9, rINST, lsr #8 @ r9<- AA 2354 FETCH_B(r3, 1, 1) @ r3<- CC 2355 GET_VREG(r0, r2) @ r0<- vBB (array object) 2356 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2357 cmp r0, #0 @ null array object? 2358 beq common_errNullObject @ yes, bail 2359 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2360 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2361 cmp r1, r3 @ compare unsigned index, length 2362 bcs common_errArrayIndex @ index >= length, bail 2363 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2364 GET_VREG(r2, r9) @ r2<- vAA 2365 GET_INST_OPCODE(ip) @ extract opcode from rINST 2366 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2367 GOTO_OPCODE(ip) @ jump to next instruction 2368 2369 2370 2371/* ------------------------------ */ 2372 .balign 64 2373.L_OP_APUT_BYTE: /* 0x4f */ 2374/* File: armv5te/OP_APUT_BYTE.S */ 2375/* File: armv5te/OP_APUT.S */ 2376 /* 2377 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2378 * 2379 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2380 * instructions. We use a pair of FETCH_Bs instead. 2381 * 2382 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2383 */ 2384 /* op vAA, vBB, vCC */ 2385 FETCH_B(r2, 1, 0) @ r2<- BB 2386 mov r9, rINST, lsr #8 @ r9<- AA 2387 FETCH_B(r3, 1, 1) @ r3<- CC 2388 GET_VREG(r0, r2) @ r0<- vBB (array object) 2389 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2390 cmp r0, #0 @ null array object? 2391 beq common_errNullObject @ yes, bail 2392 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2393 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2394 cmp r1, r3 @ compare unsigned index, length 2395 bcs common_errArrayIndex @ index >= length, bail 2396 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2397 GET_VREG(r2, r9) @ r2<- vAA 2398 GET_INST_OPCODE(ip) @ extract opcode from rINST 2399 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2400 GOTO_OPCODE(ip) @ jump to next instruction 2401 2402 2403 2404/* ------------------------------ */ 2405 .balign 64 2406.L_OP_APUT_CHAR: /* 0x50 */ 2407/* File: armv5te/OP_APUT_CHAR.S */ 2408/* File: armv5te/OP_APUT.S */ 2409 /* 2410 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2411 * 2412 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2413 * instructions. We use a pair of FETCH_Bs instead. 2414 * 2415 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2416 */ 2417 /* op vAA, vBB, vCC */ 2418 FETCH_B(r2, 1, 0) @ r2<- BB 2419 mov r9, rINST, lsr #8 @ r9<- AA 2420 FETCH_B(r3, 1, 1) @ r3<- CC 2421 GET_VREG(r0, r2) @ r0<- vBB (array object) 2422 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2423 cmp r0, #0 @ null array object? 2424 beq common_errNullObject @ yes, bail 2425 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2426 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2427 cmp r1, r3 @ compare unsigned index, length 2428 bcs common_errArrayIndex @ index >= length, bail 2429 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2430 GET_VREG(r2, r9) @ r2<- vAA 2431 GET_INST_OPCODE(ip) @ extract opcode from rINST 2432 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2433 GOTO_OPCODE(ip) @ jump to next instruction 2434 2435 2436 2437/* ------------------------------ */ 2438 .balign 64 2439.L_OP_APUT_SHORT: /* 0x51 */ 2440/* File: armv5te/OP_APUT_SHORT.S */ 2441/* File: armv5te/OP_APUT.S */ 2442 /* 2443 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2444 * 2445 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2446 * instructions. We use a pair of FETCH_Bs instead. 2447 * 2448 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2449 */ 2450 /* op vAA, vBB, vCC */ 2451 FETCH_B(r2, 1, 0) @ r2<- BB 2452 mov r9, rINST, lsr #8 @ r9<- AA 2453 FETCH_B(r3, 1, 1) @ r3<- CC 2454 GET_VREG(r0, r2) @ r0<- vBB (array object) 2455 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2456 cmp r0, #0 @ null array object? 2457 beq common_errNullObject @ yes, bail 2458 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2459 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2460 cmp r1, r3 @ compare unsigned index, length 2461 bcs common_errArrayIndex @ index >= length, bail 2462 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2463 GET_VREG(r2, r9) @ r2<- vAA 2464 GET_INST_OPCODE(ip) @ extract opcode from rINST 2465 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2466 GOTO_OPCODE(ip) @ jump to next instruction 2467 2468 2469 2470/* ------------------------------ */ 2471 .balign 64 2472.L_OP_IGET: /* 0x52 */ 2473/* File: armv5te/OP_IGET.S */ 2474 /* 2475 * General 32-bit instance field get. 2476 * 2477 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2478 */ 2479 /* op vA, vB, field@CCCC */ 2480 mov r0, rINST, lsr #12 @ r0<- B 2481 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2482 FETCH(r1, 1) @ r1<- field ref CCCC 2483 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2484 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2485 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2486 cmp r0, #0 @ is resolved entry null? 2487 bne .LOP_IGET_finish @ no, already resolved 24888: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2489 EXPORT_PC() @ resolve() could throw 2490 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2491 bl dvmResolveInstField @ r0<- resolved InstField ptr 2492 cmp r0, #0 2493 bne .LOP_IGET_finish 2494 b common_exceptionThrown 2495 2496/* ------------------------------ */ 2497 .balign 64 2498.L_OP_IGET_WIDE: /* 0x53 */ 2499/* File: armv4t/OP_IGET_WIDE.S */ 2500 /* 2501 * Wide 32-bit instance field get. 2502 */ 2503 /* iget-wide vA, vB, field@CCCC */ 2504 mov r0, rINST, lsr #12 @ r0<- B 2505 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2506 FETCH(r1, 1) @ r1<- field ref CCCC 2507 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2508 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2509 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2510 cmp r0, #0 @ is resolved entry null? 2511 bne .LOP_IGET_WIDE_finish @ no, already resolved 25128: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2513 EXPORT_PC() @ resolve() could throw 2514 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2515 bl dvmResolveInstField @ r0<- resolved InstField ptr 2516 cmp r0, #0 2517 bne .LOP_IGET_WIDE_finish 2518 b common_exceptionThrown 2519 2520/* ------------------------------ */ 2521 .balign 64 2522.L_OP_IGET_OBJECT: /* 0x54 */ 2523/* File: armv5te/OP_IGET_OBJECT.S */ 2524/* File: armv5te/OP_IGET.S */ 2525 /* 2526 * General 32-bit instance field get. 2527 * 2528 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2529 */ 2530 /* op vA, vB, field@CCCC */ 2531 mov r0, rINST, lsr #12 @ r0<- B 2532 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2533 FETCH(r1, 1) @ r1<- field ref CCCC 2534 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2535 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2536 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2537 cmp r0, #0 @ is resolved entry null? 2538 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25398: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2540 EXPORT_PC() @ resolve() could throw 2541 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2542 bl dvmResolveInstField @ r0<- resolved InstField ptr 2543 cmp r0, #0 2544 bne .LOP_IGET_OBJECT_finish 2545 b common_exceptionThrown 2546 2547 2548/* ------------------------------ */ 2549 .balign 64 2550.L_OP_IGET_BOOLEAN: /* 0x55 */ 2551/* File: armv5te/OP_IGET_BOOLEAN.S */ 2552@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2553/* File: armv5te/OP_IGET.S */ 2554 /* 2555 * General 32-bit instance field get. 2556 * 2557 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2558 */ 2559 /* op vA, vB, field@CCCC */ 2560 mov r0, rINST, lsr #12 @ r0<- B 2561 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2562 FETCH(r1, 1) @ r1<- field ref CCCC 2563 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2564 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2565 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2566 cmp r0, #0 @ is resolved entry null? 2567 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25688: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2569 EXPORT_PC() @ resolve() could throw 2570 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2571 bl dvmResolveInstField @ r0<- resolved InstField ptr 2572 cmp r0, #0 2573 bne .LOP_IGET_BOOLEAN_finish 2574 b common_exceptionThrown 2575 2576 2577/* ------------------------------ */ 2578 .balign 64 2579.L_OP_IGET_BYTE: /* 0x56 */ 2580/* File: armv5te/OP_IGET_BYTE.S */ 2581@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2582/* File: armv5te/OP_IGET.S */ 2583 /* 2584 * General 32-bit instance field get. 2585 * 2586 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2587 */ 2588 /* op vA, vB, field@CCCC */ 2589 mov r0, rINST, lsr #12 @ r0<- B 2590 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2591 FETCH(r1, 1) @ r1<- field ref CCCC 2592 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2593 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2594 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2595 cmp r0, #0 @ is resolved entry null? 2596 bne .LOP_IGET_BYTE_finish @ no, already resolved 25978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2598 EXPORT_PC() @ resolve() could throw 2599 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2600 bl dvmResolveInstField @ r0<- resolved InstField ptr 2601 cmp r0, #0 2602 bne .LOP_IGET_BYTE_finish 2603 b common_exceptionThrown 2604 2605 2606/* ------------------------------ */ 2607 .balign 64 2608.L_OP_IGET_CHAR: /* 0x57 */ 2609/* File: armv5te/OP_IGET_CHAR.S */ 2610@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2611/* File: armv5te/OP_IGET.S */ 2612 /* 2613 * General 32-bit instance field get. 2614 * 2615 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2616 */ 2617 /* op vA, vB, field@CCCC */ 2618 mov r0, rINST, lsr #12 @ r0<- B 2619 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2620 FETCH(r1, 1) @ r1<- field ref CCCC 2621 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2622 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2623 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2624 cmp r0, #0 @ is resolved entry null? 2625 bne .LOP_IGET_CHAR_finish @ no, already resolved 26268: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2627 EXPORT_PC() @ resolve() could throw 2628 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2629 bl dvmResolveInstField @ r0<- resolved InstField ptr 2630 cmp r0, #0 2631 bne .LOP_IGET_CHAR_finish 2632 b common_exceptionThrown 2633 2634 2635/* ------------------------------ */ 2636 .balign 64 2637.L_OP_IGET_SHORT: /* 0x58 */ 2638/* File: armv5te/OP_IGET_SHORT.S */ 2639@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2640/* File: armv5te/OP_IGET.S */ 2641 /* 2642 * General 32-bit instance field get. 2643 * 2644 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2645 */ 2646 /* op vA, vB, field@CCCC */ 2647 mov r0, rINST, lsr #12 @ r0<- B 2648 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2649 FETCH(r1, 1) @ r1<- field ref CCCC 2650 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2651 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2652 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2653 cmp r0, #0 @ is resolved entry null? 2654 bne .LOP_IGET_SHORT_finish @ no, already resolved 26558: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2656 EXPORT_PC() @ resolve() could throw 2657 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2658 bl dvmResolveInstField @ r0<- resolved InstField ptr 2659 cmp r0, #0 2660 bne .LOP_IGET_SHORT_finish 2661 b common_exceptionThrown 2662 2663 2664/* ------------------------------ */ 2665 .balign 64 2666.L_OP_IPUT: /* 0x59 */ 2667/* File: armv5te/OP_IPUT.S */ 2668 /* 2669 * General 32-bit instance field put. 2670 * 2671 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2672 */ 2673 /* op vA, vB, field@CCCC */ 2674 mov r0, rINST, lsr #12 @ r0<- B 2675 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2676 FETCH(r1, 1) @ r1<- field ref CCCC 2677 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2678 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2679 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2680 cmp r0, #0 @ is resolved entry null? 2681 bne .LOP_IPUT_finish @ no, already resolved 26828: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2683 EXPORT_PC() @ resolve() could throw 2684 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2685 bl dvmResolveInstField @ r0<- resolved InstField ptr 2686 cmp r0, #0 @ success? 2687 bne .LOP_IPUT_finish @ yes, finish up 2688 b common_exceptionThrown 2689 2690/* ------------------------------ */ 2691 .balign 64 2692.L_OP_IPUT_WIDE: /* 0x5a */ 2693/* File: armv4t/OP_IPUT_WIDE.S */ 2694 /* iput-wide vA, vB, field@CCCC */ 2695 mov r0, rINST, lsr #12 @ r0<- B 2696 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2697 FETCH(r1, 1) @ r1<- field ref CCCC 2698 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2699 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2700 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2701 cmp r0, #0 @ is resolved entry null? 2702 bne .LOP_IPUT_WIDE_finish @ no, already resolved 27038: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2704 EXPORT_PC() @ resolve() could throw 2705 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2706 bl dvmResolveInstField @ r0<- resolved InstField ptr 2707 cmp r0, #0 @ success? 2708 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2709 b common_exceptionThrown 2710 2711/* ------------------------------ */ 2712 .balign 64 2713.L_OP_IPUT_OBJECT: /* 0x5b */ 2714/* File: armv5te/OP_IPUT_OBJECT.S */ 2715/* File: armv5te/OP_IPUT.S */ 2716 /* 2717 * General 32-bit instance field put. 2718 * 2719 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2720 */ 2721 /* op vA, vB, field@CCCC */ 2722 mov r0, rINST, lsr #12 @ r0<- B 2723 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2724 FETCH(r1, 1) @ r1<- field ref CCCC 2725 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2726 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2727 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2728 cmp r0, #0 @ is resolved entry null? 2729 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 27308: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2731 EXPORT_PC() @ resolve() could throw 2732 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2733 bl dvmResolveInstField @ r0<- resolved InstField ptr 2734 cmp r0, #0 @ success? 2735 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2736 b common_exceptionThrown 2737 2738 2739/* ------------------------------ */ 2740 .balign 64 2741.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2742/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2743@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2744/* File: armv5te/OP_IPUT.S */ 2745 /* 2746 * General 32-bit instance field put. 2747 * 2748 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2749 */ 2750 /* op vA, vB, field@CCCC */ 2751 mov r0, rINST, lsr #12 @ r0<- B 2752 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2753 FETCH(r1, 1) @ r1<- field ref CCCC 2754 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2755 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2756 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2757 cmp r0, #0 @ is resolved entry null? 2758 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27598: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2760 EXPORT_PC() @ resolve() could throw 2761 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2762 bl dvmResolveInstField @ r0<- resolved InstField ptr 2763 cmp r0, #0 @ success? 2764 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2765 b common_exceptionThrown 2766 2767 2768/* ------------------------------ */ 2769 .balign 64 2770.L_OP_IPUT_BYTE: /* 0x5d */ 2771/* File: armv5te/OP_IPUT_BYTE.S */ 2772@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2773/* File: armv5te/OP_IPUT.S */ 2774 /* 2775 * General 32-bit instance field put. 2776 * 2777 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2778 */ 2779 /* op vA, vB, field@CCCC */ 2780 mov r0, rINST, lsr #12 @ r0<- B 2781 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2782 FETCH(r1, 1) @ r1<- field ref CCCC 2783 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2784 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2785 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2786 cmp r0, #0 @ is resolved entry null? 2787 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27888: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2789 EXPORT_PC() @ resolve() could throw 2790 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2791 bl dvmResolveInstField @ r0<- resolved InstField ptr 2792 cmp r0, #0 @ success? 2793 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2794 b common_exceptionThrown 2795 2796 2797/* ------------------------------ */ 2798 .balign 64 2799.L_OP_IPUT_CHAR: /* 0x5e */ 2800/* File: armv5te/OP_IPUT_CHAR.S */ 2801@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2802/* File: armv5te/OP_IPUT.S */ 2803 /* 2804 * General 32-bit instance field put. 2805 * 2806 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2807 */ 2808 /* op vA, vB, field@CCCC */ 2809 mov r0, rINST, lsr #12 @ r0<- B 2810 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2811 FETCH(r1, 1) @ r1<- field ref CCCC 2812 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2813 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2814 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2815 cmp r0, #0 @ is resolved entry null? 2816 bne .LOP_IPUT_CHAR_finish @ no, already resolved 28178: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2818 EXPORT_PC() @ resolve() could throw 2819 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2820 bl dvmResolveInstField @ r0<- resolved InstField ptr 2821 cmp r0, #0 @ success? 2822 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2823 b common_exceptionThrown 2824 2825 2826/* ------------------------------ */ 2827 .balign 64 2828.L_OP_IPUT_SHORT: /* 0x5f */ 2829/* File: armv5te/OP_IPUT_SHORT.S */ 2830@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2831/* File: armv5te/OP_IPUT.S */ 2832 /* 2833 * General 32-bit instance field put. 2834 * 2835 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2836 */ 2837 /* op vA, vB, field@CCCC */ 2838 mov r0, rINST, lsr #12 @ r0<- B 2839 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2840 FETCH(r1, 1) @ r1<- field ref CCCC 2841 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2842 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2843 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2844 cmp r0, #0 @ is resolved entry null? 2845 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2847 EXPORT_PC() @ resolve() could throw 2848 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2849 bl dvmResolveInstField @ r0<- resolved InstField ptr 2850 cmp r0, #0 @ success? 2851 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2852 b common_exceptionThrown 2853 2854 2855/* ------------------------------ */ 2856 .balign 64 2857.L_OP_SGET: /* 0x60 */ 2858/* File: armv5te/OP_SGET.S */ 2859 /* 2860 * General 32-bit SGET handler. 2861 * 2862 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2863 */ 2864 /* op vAA, field@BBBB */ 2865 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2866 FETCH(r1, 1) @ r1<- field ref BBBB 2867 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2868 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2869 cmp r0, #0 @ is resolved entry null? 2870 beq .LOP_SGET_resolve @ yes, do resolve 2871.LOP_SGET_finish: @ field ptr in r0 2872 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2873 mov r2, rINST, lsr #8 @ r2<- AA 2874 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2875 SET_VREG(r1, r2) @ fp[AA]<- r1 2876 GET_INST_OPCODE(ip) @ extract opcode from rINST 2877 GOTO_OPCODE(ip) @ jump to next instruction 2878 2879/* ------------------------------ */ 2880 .balign 64 2881.L_OP_SGET_WIDE: /* 0x61 */ 2882/* File: armv4t/OP_SGET_WIDE.S */ 2883 /* 2884 * 64-bit SGET handler. 2885 */ 2886 /* sget-wide vAA, field@BBBB */ 2887 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2888 FETCH(r1, 1) @ r1<- field ref BBBB 2889 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2890 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2891 cmp r0, #0 @ is resolved entry null? 2892 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2893.LOP_SGET_WIDE_finish: 2894 mov r1, rINST, lsr #8 @ r1<- AA 2895 add r0, r0, #offStaticField_value 2896 ldmia r0, {r2-r3} @ r2/r3<- field value (aligned) 2897 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2898 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2899 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2900 GET_INST_OPCODE(ip) @ extract opcode from rINST 2901 GOTO_OPCODE(ip) @ jump to next instruction 2902 2903/* ------------------------------ */ 2904 .balign 64 2905.L_OP_SGET_OBJECT: /* 0x62 */ 2906/* File: armv5te/OP_SGET_OBJECT.S */ 2907/* File: armv5te/OP_SGET.S */ 2908 /* 2909 * General 32-bit SGET handler. 2910 * 2911 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2912 */ 2913 /* op vAA, field@BBBB */ 2914 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2915 FETCH(r1, 1) @ r1<- field ref BBBB 2916 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2917 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2918 cmp r0, #0 @ is resolved entry null? 2919 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2920.LOP_SGET_OBJECT_finish: @ field ptr in r0 2921 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2922 mov r2, rINST, lsr #8 @ r2<- AA 2923 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2924 SET_VREG(r1, r2) @ fp[AA]<- r1 2925 GET_INST_OPCODE(ip) @ extract opcode from rINST 2926 GOTO_OPCODE(ip) @ jump to next instruction 2927 2928 2929/* ------------------------------ */ 2930 .balign 64 2931.L_OP_SGET_BOOLEAN: /* 0x63 */ 2932/* File: armv5te/OP_SGET_BOOLEAN.S */ 2933/* File: armv5te/OP_SGET.S */ 2934 /* 2935 * General 32-bit SGET handler. 2936 * 2937 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2938 */ 2939 /* op vAA, field@BBBB */ 2940 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2941 FETCH(r1, 1) @ r1<- field ref BBBB 2942 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2943 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2944 cmp r0, #0 @ is resolved entry null? 2945 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2946.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2947 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2948 mov r2, rINST, lsr #8 @ r2<- AA 2949 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2950 SET_VREG(r1, r2) @ fp[AA]<- r1 2951 GET_INST_OPCODE(ip) @ extract opcode from rINST 2952 GOTO_OPCODE(ip) @ jump to next instruction 2953 2954 2955/* ------------------------------ */ 2956 .balign 64 2957.L_OP_SGET_BYTE: /* 0x64 */ 2958/* File: armv5te/OP_SGET_BYTE.S */ 2959/* File: armv5te/OP_SGET.S */ 2960 /* 2961 * General 32-bit SGET handler. 2962 * 2963 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2964 */ 2965 /* op vAA, field@BBBB */ 2966 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2967 FETCH(r1, 1) @ r1<- field ref BBBB 2968 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2969 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2970 cmp r0, #0 @ is resolved entry null? 2971 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2972.LOP_SGET_BYTE_finish: @ field ptr in r0 2973 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2974 mov r2, rINST, lsr #8 @ r2<- AA 2975 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2976 SET_VREG(r1, r2) @ fp[AA]<- r1 2977 GET_INST_OPCODE(ip) @ extract opcode from rINST 2978 GOTO_OPCODE(ip) @ jump to next instruction 2979 2980 2981/* ------------------------------ */ 2982 .balign 64 2983.L_OP_SGET_CHAR: /* 0x65 */ 2984/* File: armv5te/OP_SGET_CHAR.S */ 2985/* File: armv5te/OP_SGET.S */ 2986 /* 2987 * General 32-bit SGET handler. 2988 * 2989 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2990 */ 2991 /* op vAA, field@BBBB */ 2992 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2993 FETCH(r1, 1) @ r1<- field ref BBBB 2994 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2995 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2996 cmp r0, #0 @ is resolved entry null? 2997 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2998.LOP_SGET_CHAR_finish: @ field ptr in r0 2999 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3000 mov r2, rINST, lsr #8 @ r2<- AA 3001 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3002 SET_VREG(r1, r2) @ fp[AA]<- r1 3003 GET_INST_OPCODE(ip) @ extract opcode from rINST 3004 GOTO_OPCODE(ip) @ jump to next instruction 3005 3006 3007/* ------------------------------ */ 3008 .balign 64 3009.L_OP_SGET_SHORT: /* 0x66 */ 3010/* File: armv5te/OP_SGET_SHORT.S */ 3011/* File: armv5te/OP_SGET.S */ 3012 /* 3013 * General 32-bit SGET handler. 3014 * 3015 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 3016 */ 3017 /* op vAA, field@BBBB */ 3018 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3019 FETCH(r1, 1) @ r1<- field ref BBBB 3020 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3021 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3022 cmp r0, #0 @ is resolved entry null? 3023 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 3024.LOP_SGET_SHORT_finish: @ field ptr in r0 3025 ldr r1, [r0, #offStaticField_value] @ r1<- field value 3026 mov r2, rINST, lsr #8 @ r2<- AA 3027 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3028 SET_VREG(r1, r2) @ fp[AA]<- r1 3029 GET_INST_OPCODE(ip) @ extract opcode from rINST 3030 GOTO_OPCODE(ip) @ jump to next instruction 3031 3032 3033/* ------------------------------ */ 3034 .balign 64 3035.L_OP_SPUT: /* 0x67 */ 3036/* File: armv5te/OP_SPUT.S */ 3037 /* 3038 * General 32-bit SPUT handler. 3039 * 3040 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3041 */ 3042 /* op vAA, field@BBBB */ 3043 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3044 FETCH(r1, 1) @ r1<- field ref BBBB 3045 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3046 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3047 cmp r0, #0 @ is resolved entry null? 3048 beq .LOP_SPUT_resolve @ yes, do resolve 3049.LOP_SPUT_finish: @ field ptr in r0 3050 mov r2, rINST, lsr #8 @ r2<- AA 3051 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3052 GET_VREG(r1, r2) @ r1<- fp[AA] 3053 GET_INST_OPCODE(ip) @ extract opcode from rINST 3054 str r1, [r0, #offStaticField_value] @ field<- vAA 3055 GOTO_OPCODE(ip) @ jump to next instruction 3056 3057/* ------------------------------ */ 3058 .balign 64 3059.L_OP_SPUT_WIDE: /* 0x68 */ 3060/* File: armv4t/OP_SPUT_WIDE.S */ 3061 /* 3062 * 64-bit SPUT handler. 3063 */ 3064 /* sput-wide vAA, field@BBBB */ 3065 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3066 FETCH(r1, 1) @ r1<- field ref BBBB 3067 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3068 mov r9, rINST, lsr #8 @ r9<- AA 3069 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3070 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3071 cmp r0, #0 @ is resolved entry null? 3072 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3073.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3074 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3075 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3076 GET_INST_OPCODE(ip) @ extract opcode from rINST 3077 add r0, r0, #offStaticField_value 3078 stmia r0, {r2-r3} @ field<- vAA/vAA+1 3079 GOTO_OPCODE(ip) @ jump to next instruction 3080 3081/* ------------------------------ */ 3082 .balign 64 3083.L_OP_SPUT_OBJECT: /* 0x69 */ 3084/* File: armv5te/OP_SPUT_OBJECT.S */ 3085/* File: armv5te/OP_SPUT.S */ 3086 /* 3087 * General 32-bit SPUT handler. 3088 * 3089 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3090 */ 3091 /* op vAA, field@BBBB */ 3092 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3093 FETCH(r1, 1) @ r1<- field ref BBBB 3094 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3095 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3096 cmp r0, #0 @ is resolved entry null? 3097 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3098.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3099 mov r2, rINST, lsr #8 @ r2<- AA 3100 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3101 GET_VREG(r1, r2) @ r1<- fp[AA] 3102 GET_INST_OPCODE(ip) @ extract opcode from rINST 3103 str r1, [r0, #offStaticField_value] @ field<- vAA 3104 GOTO_OPCODE(ip) @ jump to next instruction 3105 3106 3107/* ------------------------------ */ 3108 .balign 64 3109.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3110/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3111/* File: armv5te/OP_SPUT.S */ 3112 /* 3113 * General 32-bit SPUT handler. 3114 * 3115 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3116 */ 3117 /* op vAA, field@BBBB */ 3118 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3119 FETCH(r1, 1) @ r1<- field ref BBBB 3120 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3121 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3122 cmp r0, #0 @ is resolved entry null? 3123 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3124.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3125 mov r2, rINST, lsr #8 @ r2<- AA 3126 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3127 GET_VREG(r1, r2) @ r1<- fp[AA] 3128 GET_INST_OPCODE(ip) @ extract opcode from rINST 3129 str r1, [r0, #offStaticField_value] @ field<- vAA 3130 GOTO_OPCODE(ip) @ jump to next instruction 3131 3132 3133/* ------------------------------ */ 3134 .balign 64 3135.L_OP_SPUT_BYTE: /* 0x6b */ 3136/* File: armv5te/OP_SPUT_BYTE.S */ 3137/* File: armv5te/OP_SPUT.S */ 3138 /* 3139 * General 32-bit SPUT handler. 3140 * 3141 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3142 */ 3143 /* op vAA, field@BBBB */ 3144 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3145 FETCH(r1, 1) @ r1<- field ref BBBB 3146 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3147 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3148 cmp r0, #0 @ is resolved entry null? 3149 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3150.LOP_SPUT_BYTE_finish: @ field ptr in r0 3151 mov r2, rINST, lsr #8 @ r2<- AA 3152 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3153 GET_VREG(r1, r2) @ r1<- fp[AA] 3154 GET_INST_OPCODE(ip) @ extract opcode from rINST 3155 str r1, [r0, #offStaticField_value] @ field<- vAA 3156 GOTO_OPCODE(ip) @ jump to next instruction 3157 3158 3159/* ------------------------------ */ 3160 .balign 64 3161.L_OP_SPUT_CHAR: /* 0x6c */ 3162/* File: armv5te/OP_SPUT_CHAR.S */ 3163/* File: armv5te/OP_SPUT.S */ 3164 /* 3165 * General 32-bit SPUT handler. 3166 * 3167 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3168 */ 3169 /* op vAA, field@BBBB */ 3170 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3171 FETCH(r1, 1) @ r1<- field ref BBBB 3172 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3173 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3174 cmp r0, #0 @ is resolved entry null? 3175 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3176.LOP_SPUT_CHAR_finish: @ field ptr in r0 3177 mov r2, rINST, lsr #8 @ r2<- AA 3178 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3179 GET_VREG(r1, r2) @ r1<- fp[AA] 3180 GET_INST_OPCODE(ip) @ extract opcode from rINST 3181 str r1, [r0, #offStaticField_value] @ field<- vAA 3182 GOTO_OPCODE(ip) @ jump to next instruction 3183 3184 3185/* ------------------------------ */ 3186 .balign 64 3187.L_OP_SPUT_SHORT: /* 0x6d */ 3188/* File: armv5te/OP_SPUT_SHORT.S */ 3189/* File: armv5te/OP_SPUT.S */ 3190 /* 3191 * General 32-bit SPUT handler. 3192 * 3193 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3194 */ 3195 /* op vAA, field@BBBB */ 3196 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3197 FETCH(r1, 1) @ r1<- field ref BBBB 3198 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3199 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3200 cmp r0, #0 @ is resolved entry null? 3201 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3202.LOP_SPUT_SHORT_finish: @ field ptr in r0 3203 mov r2, rINST, lsr #8 @ r2<- AA 3204 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3205 GET_VREG(r1, r2) @ r1<- fp[AA] 3206 GET_INST_OPCODE(ip) @ extract opcode from rINST 3207 str r1, [r0, #offStaticField_value] @ field<- vAA 3208 GOTO_OPCODE(ip) @ jump to next instruction 3209 3210 3211/* ------------------------------ */ 3212 .balign 64 3213.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3214/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3215 /* 3216 * Handle a virtual method call. 3217 * 3218 * for: invoke-virtual, invoke-virtual/range 3219 */ 3220 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3221 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3222 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3223 FETCH(r1, 1) @ r1<- BBBB 3224 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3225 FETCH(r10, 2) @ r10<- GFED or CCCC 3226 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3227 .if (!0) 3228 and r10, r10, #15 @ r10<- D (or stays CCCC) 3229 .endif 3230 cmp r0, #0 @ already resolved? 3231 EXPORT_PC() @ must export for invoke 3232 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3233 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3234 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3235 mov r2, #METHOD_VIRTUAL @ resolver method type 3236 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3237 cmp r0, #0 @ got null? 3238 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3239 b common_exceptionThrown @ yes, handle exception 3240 3241/* ------------------------------ */ 3242 .balign 64 3243.L_OP_INVOKE_SUPER: /* 0x6f */ 3244/* File: armv5te/OP_INVOKE_SUPER.S */ 3245 /* 3246 * Handle a "super" method call. 3247 * 3248 * for: invoke-super, invoke-super/range 3249 */ 3250 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3251 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3252 FETCH(r10, 2) @ r10<- GFED or CCCC 3253 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3254 .if (!0) 3255 and r10, r10, #15 @ r10<- D (or stays CCCC) 3256 .endif 3257 FETCH(r1, 1) @ r1<- BBBB 3258 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3259 GET_VREG(r2, r10) @ r2<- "this" ptr 3260 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3261 cmp r2, #0 @ null "this"? 3262 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3263 beq common_errNullObject @ null "this", throw exception 3264 cmp r0, #0 @ already resolved? 3265 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3266 EXPORT_PC() @ must export for invoke 3267 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3268 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3269 3270/* ------------------------------ */ 3271 .balign 64 3272.L_OP_INVOKE_DIRECT: /* 0x70 */ 3273/* File: armv5te/OP_INVOKE_DIRECT.S */ 3274 /* 3275 * Handle a direct method call. 3276 * 3277 * (We could defer the "is 'this' pointer null" test to the common 3278 * method invocation code, and use a flag to indicate that static 3279 * calls don't count. If we do this as part of copying the arguments 3280 * out we could avoiding loading the first arg twice.) 3281 * 3282 * for: invoke-direct, invoke-direct/range 3283 */ 3284 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3285 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3286 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3287 FETCH(r1, 1) @ r1<- BBBB 3288 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3289 FETCH(r10, 2) @ r10<- GFED or CCCC 3290 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3291 .if (!0) 3292 and r10, r10, #15 @ r10<- D (or stays CCCC) 3293 .endif 3294 cmp r0, #0 @ already resolved? 3295 EXPORT_PC() @ must export for invoke 3296 GET_VREG(r2, r10) @ r2<- "this" ptr 3297 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3298.LOP_INVOKE_DIRECT_finish: 3299 cmp r2, #0 @ null "this" ref? 3300 bne common_invokeMethodNoRange @ no, continue on 3301 b common_errNullObject @ yes, throw exception 3302 3303/* ------------------------------ */ 3304 .balign 64 3305.L_OP_INVOKE_STATIC: /* 0x71 */ 3306/* File: armv5te/OP_INVOKE_STATIC.S */ 3307 /* 3308 * Handle a static method call. 3309 * 3310 * for: invoke-static, invoke-static/range 3311 */ 3312 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3313 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3314 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3315 FETCH(r1, 1) @ r1<- BBBB 3316 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3317 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3318 cmp r0, #0 @ already resolved? 3319 EXPORT_PC() @ must export for invoke 3320 bne common_invokeMethodNoRange @ yes, continue on 33210: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3322 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3323 mov r2, #METHOD_STATIC @ resolver method type 3324 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3325 cmp r0, #0 @ got null? 3326 bne common_invokeMethodNoRange @ no, continue 3327 b common_exceptionThrown @ yes, handle exception 3328 3329 3330/* ------------------------------ */ 3331 .balign 64 3332.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3333/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3334 /* 3335 * Handle an interface method call. 3336 * 3337 * for: invoke-interface, invoke-interface/range 3338 */ 3339 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3340 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3341 FETCH(r2, 2) @ r2<- FEDC or CCCC 3342 FETCH(r1, 1) @ r1<- BBBB 3343 .if (!0) 3344 and r2, r2, #15 @ r2<- C (or stays CCCC) 3345 .endif 3346 EXPORT_PC() @ must export for invoke 3347 GET_VREG(r0, r2) @ r0<- first arg ("this") 3348 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3349 cmp r0, #0 @ null obj? 3350 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3351 beq common_errNullObject @ yes, fail 3352 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3353 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3354 cmp r0, #0 @ failed? 3355 beq common_exceptionThrown @ yes, handle exception 3356 b common_invokeMethodNoRange @ jump to common handler 3357 3358 3359/* ------------------------------ */ 3360 .balign 64 3361.L_OP_UNUSED_73: /* 0x73 */ 3362/* File: armv5te/OP_UNUSED_73.S */ 3363/* File: armv5te/unused.S */ 3364 bl common_abort 3365 3366 3367 3368/* ------------------------------ */ 3369 .balign 64 3370.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3371/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3372/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3373 /* 3374 * Handle a virtual method call. 3375 * 3376 * for: invoke-virtual, invoke-virtual/range 3377 */ 3378 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3379 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3380 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3381 FETCH(r1, 1) @ r1<- BBBB 3382 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3383 FETCH(r10, 2) @ r10<- GFED or CCCC 3384 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3385 .if (!1) 3386 and r10, r10, #15 @ r10<- D (or stays CCCC) 3387 .endif 3388 cmp r0, #0 @ already resolved? 3389 EXPORT_PC() @ must export for invoke 3390 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3391 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3392 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3393 mov r2, #METHOD_VIRTUAL @ resolver method type 3394 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3395 cmp r0, #0 @ got null? 3396 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3397 b common_exceptionThrown @ yes, handle exception 3398 3399 3400/* ------------------------------ */ 3401 .balign 64 3402.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3403/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3404/* File: armv5te/OP_INVOKE_SUPER.S */ 3405 /* 3406 * Handle a "super" method call. 3407 * 3408 * for: invoke-super, invoke-super/range 3409 */ 3410 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3411 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3412 FETCH(r10, 2) @ r10<- GFED or CCCC 3413 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3414 .if (!1) 3415 and r10, r10, #15 @ r10<- D (or stays CCCC) 3416 .endif 3417 FETCH(r1, 1) @ r1<- BBBB 3418 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3419 GET_VREG(r2, r10) @ r2<- "this" ptr 3420 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3421 cmp r2, #0 @ null "this"? 3422 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3423 beq common_errNullObject @ null "this", throw exception 3424 cmp r0, #0 @ already resolved? 3425 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3426 EXPORT_PC() @ must export for invoke 3427 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3428 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3429 3430 3431/* ------------------------------ */ 3432 .balign 64 3433.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3434/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3435/* File: armv5te/OP_INVOKE_DIRECT.S */ 3436 /* 3437 * Handle a direct method call. 3438 * 3439 * (We could defer the "is 'this' pointer null" test to the common 3440 * method invocation code, and use a flag to indicate that static 3441 * calls don't count. If we do this as part of copying the arguments 3442 * out we could avoiding loading the first arg twice.) 3443 * 3444 * for: invoke-direct, invoke-direct/range 3445 */ 3446 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3447 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3448 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3449 FETCH(r1, 1) @ r1<- BBBB 3450 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3451 FETCH(r10, 2) @ r10<- GFED or CCCC 3452 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3453 .if (!1) 3454 and r10, r10, #15 @ r10<- D (or stays CCCC) 3455 .endif 3456 cmp r0, #0 @ already resolved? 3457 EXPORT_PC() @ must export for invoke 3458 GET_VREG(r2, r10) @ r2<- "this" ptr 3459 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3460.LOP_INVOKE_DIRECT_RANGE_finish: 3461 cmp r2, #0 @ null "this" ref? 3462 bne common_invokeMethodRange @ no, continue on 3463 b common_errNullObject @ yes, throw exception 3464 3465 3466/* ------------------------------ */ 3467 .balign 64 3468.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3469/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3470/* File: armv5te/OP_INVOKE_STATIC.S */ 3471 /* 3472 * Handle a static method call. 3473 * 3474 * for: invoke-static, invoke-static/range 3475 */ 3476 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3477 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3478 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3479 FETCH(r1, 1) @ r1<- BBBB 3480 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3481 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3482 cmp r0, #0 @ already resolved? 3483 EXPORT_PC() @ must export for invoke 3484 bne common_invokeMethodRange @ yes, continue on 34850: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3486 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3487 mov r2, #METHOD_STATIC @ resolver method type 3488 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3489 cmp r0, #0 @ got null? 3490 bne common_invokeMethodRange @ no, continue 3491 b common_exceptionThrown @ yes, handle exception 3492 3493 3494 3495/* ------------------------------ */ 3496 .balign 64 3497.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3498/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3499/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3500 /* 3501 * Handle an interface method call. 3502 * 3503 * for: invoke-interface, invoke-interface/range 3504 */ 3505 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3506 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3507 FETCH(r2, 2) @ r2<- FEDC or CCCC 3508 FETCH(r1, 1) @ r1<- BBBB 3509 .if (!1) 3510 and r2, r2, #15 @ r2<- C (or stays CCCC) 3511 .endif 3512 EXPORT_PC() @ must export for invoke 3513 GET_VREG(r0, r2) @ r0<- first arg ("this") 3514 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3515 cmp r0, #0 @ null obj? 3516 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3517 beq common_errNullObject @ yes, fail 3518 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3519 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3520 cmp r0, #0 @ failed? 3521 beq common_exceptionThrown @ yes, handle exception 3522 b common_invokeMethodRange @ jump to common handler 3523 3524 3525 3526/* ------------------------------ */ 3527 .balign 64 3528.L_OP_UNUSED_79: /* 0x79 */ 3529/* File: armv5te/OP_UNUSED_79.S */ 3530/* File: armv5te/unused.S */ 3531 bl common_abort 3532 3533 3534 3535/* ------------------------------ */ 3536 .balign 64 3537.L_OP_UNUSED_7A: /* 0x7a */ 3538/* File: armv5te/OP_UNUSED_7A.S */ 3539/* File: armv5te/unused.S */ 3540 bl common_abort 3541 3542 3543 3544/* ------------------------------ */ 3545 .balign 64 3546.L_OP_NEG_INT: /* 0x7b */ 3547/* File: armv5te/OP_NEG_INT.S */ 3548/* File: armv5te/unop.S */ 3549 /* 3550 * Generic 32-bit unary operation. Provide an "instr" line that 3551 * specifies an instruction that performs "result = op r0". 3552 * This could be an ARM instruction or a function call. 3553 * 3554 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3555 * int-to-byte, int-to-char, int-to-short 3556 */ 3557 /* unop vA, vB */ 3558 mov r3, rINST, lsr #12 @ r3<- B 3559 mov r9, rINST, lsr #8 @ r9<- A+ 3560 GET_VREG(r0, r3) @ r0<- vB 3561 and r9, r9, #15 3562 @ optional op; may set condition codes 3563 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3564 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3565 GET_INST_OPCODE(ip) @ extract opcode from rINST 3566 SET_VREG(r0, r9) @ vAA<- r0 3567 GOTO_OPCODE(ip) @ jump to next instruction 3568 /* 9-10 instructions */ 3569 3570 3571/* ------------------------------ */ 3572 .balign 64 3573.L_OP_NOT_INT: /* 0x7c */ 3574/* File: armv5te/OP_NOT_INT.S */ 3575/* File: armv5te/unop.S */ 3576 /* 3577 * Generic 32-bit unary operation. Provide an "instr" line that 3578 * specifies an instruction that performs "result = op r0". 3579 * This could be an ARM instruction or a function call. 3580 * 3581 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3582 * int-to-byte, int-to-char, int-to-short 3583 */ 3584 /* unop vA, vB */ 3585 mov r3, rINST, lsr #12 @ r3<- B 3586 mov r9, rINST, lsr #8 @ r9<- A+ 3587 GET_VREG(r0, r3) @ r0<- vB 3588 and r9, r9, #15 3589 @ optional op; may set condition codes 3590 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3591 mvn r0, r0 @ r0<- op, r0-r3 changed 3592 GET_INST_OPCODE(ip) @ extract opcode from rINST 3593 SET_VREG(r0, r9) @ vAA<- r0 3594 GOTO_OPCODE(ip) @ jump to next instruction 3595 /* 9-10 instructions */ 3596 3597 3598/* ------------------------------ */ 3599 .balign 64 3600.L_OP_NEG_LONG: /* 0x7d */ 3601/* File: armv5te/OP_NEG_LONG.S */ 3602/* File: armv5te/unopWide.S */ 3603 /* 3604 * Generic 64-bit unary operation. Provide an "instr" line that 3605 * specifies an instruction that performs "result = op r0/r1". 3606 * This could be an ARM instruction or a function call. 3607 * 3608 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3609 */ 3610 /* unop vA, vB */ 3611 mov r9, rINST, lsr #8 @ r9<- A+ 3612 mov r3, rINST, lsr #12 @ r3<- B 3613 and r9, r9, #15 3614 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3615 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3616 ldmia r3, {r0-r1} @ r0/r1<- vAA 3617 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3618 rsbs r0, r0, #0 @ optional op; may set condition codes 3619 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3620 GET_INST_OPCODE(ip) @ extract opcode from rINST 3621 stmia r9, {r0-r1} @ vAA<- r0/r1 3622 GOTO_OPCODE(ip) @ jump to next instruction 3623 /* 12-13 instructions */ 3624 3625 3626 3627/* ------------------------------ */ 3628 .balign 64 3629.L_OP_NOT_LONG: /* 0x7e */ 3630/* File: armv5te/OP_NOT_LONG.S */ 3631/* File: armv5te/unopWide.S */ 3632 /* 3633 * Generic 64-bit unary operation. Provide an "instr" line that 3634 * specifies an instruction that performs "result = op r0/r1". 3635 * This could be an ARM instruction or a function call. 3636 * 3637 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3638 */ 3639 /* unop vA, vB */ 3640 mov r9, rINST, lsr #8 @ r9<- A+ 3641 mov r3, rINST, lsr #12 @ r3<- B 3642 and r9, r9, #15 3643 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3644 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3645 ldmia r3, {r0-r1} @ r0/r1<- vAA 3646 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3647 mvn r0, r0 @ optional op; may set condition codes 3648 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3649 GET_INST_OPCODE(ip) @ extract opcode from rINST 3650 stmia r9, {r0-r1} @ vAA<- r0/r1 3651 GOTO_OPCODE(ip) @ jump to next instruction 3652 /* 12-13 instructions */ 3653 3654 3655 3656/* ------------------------------ */ 3657 .balign 64 3658.L_OP_NEG_FLOAT: /* 0x7f */ 3659/* File: armv5te/OP_NEG_FLOAT.S */ 3660/* File: armv5te/unop.S */ 3661 /* 3662 * Generic 32-bit unary operation. Provide an "instr" line that 3663 * specifies an instruction that performs "result = op r0". 3664 * This could be an ARM instruction or a function call. 3665 * 3666 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3667 * int-to-byte, int-to-char, int-to-short 3668 */ 3669 /* unop vA, vB */ 3670 mov r3, rINST, lsr #12 @ r3<- B 3671 mov r9, rINST, lsr #8 @ r9<- A+ 3672 GET_VREG(r0, r3) @ r0<- vB 3673 and r9, r9, #15 3674 @ optional op; may set condition codes 3675 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3676 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3677 GET_INST_OPCODE(ip) @ extract opcode from rINST 3678 SET_VREG(r0, r9) @ vAA<- r0 3679 GOTO_OPCODE(ip) @ jump to next instruction 3680 /* 9-10 instructions */ 3681 3682 3683/* ------------------------------ */ 3684 .balign 64 3685.L_OP_NEG_DOUBLE: /* 0x80 */ 3686/* File: armv5te/OP_NEG_DOUBLE.S */ 3687/* File: armv5te/unopWide.S */ 3688 /* 3689 * Generic 64-bit unary operation. Provide an "instr" line that 3690 * specifies an instruction that performs "result = op r0/r1". 3691 * This could be an ARM instruction or a function call. 3692 * 3693 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3694 */ 3695 /* unop vA, vB */ 3696 mov r9, rINST, lsr #8 @ r9<- A+ 3697 mov r3, rINST, lsr #12 @ r3<- B 3698 and r9, r9, #15 3699 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3700 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3701 ldmia r3, {r0-r1} @ r0/r1<- vAA 3702 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3703 @ optional op; may set condition codes 3704 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3705 GET_INST_OPCODE(ip) @ extract opcode from rINST 3706 stmia r9, {r0-r1} @ vAA<- r0/r1 3707 GOTO_OPCODE(ip) @ jump to next instruction 3708 /* 12-13 instructions */ 3709 3710 3711 3712/* ------------------------------ */ 3713 .balign 64 3714.L_OP_INT_TO_LONG: /* 0x81 */ 3715/* File: armv5te/OP_INT_TO_LONG.S */ 3716/* File: armv5te/unopWider.S */ 3717 /* 3718 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3719 * that specifies an instruction that performs "result = op r0", where 3720 * "result" is a 64-bit quantity in r0/r1. 3721 * 3722 * For: int-to-long, int-to-double, float-to-long, float-to-double 3723 */ 3724 /* unop vA, vB */ 3725 mov r9, rINST, lsr #8 @ r9<- A+ 3726 mov r3, rINST, lsr #12 @ r3<- B 3727 and r9, r9, #15 3728 GET_VREG(r0, r3) @ r0<- vB 3729 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3730 @ optional op; may set condition codes 3731 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3732 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3733 GET_INST_OPCODE(ip) @ extract opcode from rINST 3734 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3735 GOTO_OPCODE(ip) @ jump to next instruction 3736 /* 10-11 instructions */ 3737 3738 3739/* ------------------------------ */ 3740 .balign 64 3741.L_OP_INT_TO_FLOAT: /* 0x82 */ 3742/* File: armv5te/OP_INT_TO_FLOAT.S */ 3743/* File: armv5te/unop.S */ 3744 /* 3745 * Generic 32-bit unary operation. Provide an "instr" line that 3746 * specifies an instruction that performs "result = op r0". 3747 * This could be an ARM instruction or a function call. 3748 * 3749 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3750 * int-to-byte, int-to-char, int-to-short 3751 */ 3752 /* unop vA, vB */ 3753 mov r3, rINST, lsr #12 @ r3<- B 3754 mov r9, rINST, lsr #8 @ r9<- A+ 3755 GET_VREG(r0, r3) @ r0<- vB 3756 and r9, r9, #15 3757 @ optional op; may set condition codes 3758 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3759 bl __aeabi_i2f @ r0<- op, r0-r3 changed 3760 GET_INST_OPCODE(ip) @ extract opcode from rINST 3761 SET_VREG(r0, r9) @ vAA<- r0 3762 GOTO_OPCODE(ip) @ jump to next instruction 3763 /* 9-10 instructions */ 3764 3765 3766/* ------------------------------ */ 3767 .balign 64 3768.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3769/* File: armv5te/OP_INT_TO_DOUBLE.S */ 3770/* File: armv5te/unopWider.S */ 3771 /* 3772 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3773 * that specifies an instruction that performs "result = op r0", where 3774 * "result" is a 64-bit quantity in r0/r1. 3775 * 3776 * For: int-to-long, int-to-double, float-to-long, float-to-double 3777 */ 3778 /* unop vA, vB */ 3779 mov r9, rINST, lsr #8 @ r9<- A+ 3780 mov r3, rINST, lsr #12 @ r3<- B 3781 and r9, r9, #15 3782 GET_VREG(r0, r3) @ r0<- vB 3783 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3784 @ optional op; may set condition codes 3785 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3786 bl __aeabi_i2d @ r0<- op, r0-r3 changed 3787 GET_INST_OPCODE(ip) @ extract opcode from rINST 3788 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3789 GOTO_OPCODE(ip) @ jump to next instruction 3790 /* 10-11 instructions */ 3791 3792 3793/* ------------------------------ */ 3794 .balign 64 3795.L_OP_LONG_TO_INT: /* 0x84 */ 3796/* File: armv5te/OP_LONG_TO_INT.S */ 3797/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3798/* File: armv5te/OP_MOVE.S */ 3799 /* for move, move-object, long-to-int */ 3800 /* op vA, vB */ 3801 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3802 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3803 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3804 GET_VREG(r2, r1) @ r2<- fp[B] 3805 and r0, r0, #15 3806 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3807 SET_VREG(r2, r0) @ fp[A]<- r2 3808 GOTO_OPCODE(ip) @ execute next instruction 3809 3810 3811 3812/* ------------------------------ */ 3813 .balign 64 3814.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3815/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3816/* File: armv5te/unopNarrower.S */ 3817 /* 3818 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3819 * that specifies an instruction that performs "result = op r0/r1", where 3820 * "result" is a 32-bit quantity in r0. 3821 * 3822 * For: long-to-float, double-to-int, double-to-float 3823 * 3824 * (This would work for long-to-int, but that instruction is actually 3825 * an exact match for OP_MOVE.) 3826 */ 3827 /* unop vA, vB */ 3828 mov r3, rINST, lsr #12 @ r3<- B 3829 mov r9, rINST, lsr #8 @ r9<- A+ 3830 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3831 and r9, r9, #15 3832 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3833 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3834 @ optional op; may set condition codes 3835 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3836 GET_INST_OPCODE(ip) @ extract opcode from rINST 3837 SET_VREG(r0, r9) @ vA<- r0 3838 GOTO_OPCODE(ip) @ jump to next instruction 3839 /* 10-11 instructions */ 3840 3841 3842/* ------------------------------ */ 3843 .balign 64 3844.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3845/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3846/* File: armv5te/unopWide.S */ 3847 /* 3848 * Generic 64-bit unary operation. Provide an "instr" line that 3849 * specifies an instruction that performs "result = op r0/r1". 3850 * This could be an ARM instruction or a function call. 3851 * 3852 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3853 */ 3854 /* unop vA, vB */ 3855 mov r9, rINST, lsr #8 @ r9<- A+ 3856 mov r3, rINST, lsr #12 @ r3<- B 3857 and r9, r9, #15 3858 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3859 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3860 ldmia r3, {r0-r1} @ r0/r1<- vAA 3861 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3862 @ optional op; may set condition codes 3863 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3864 GET_INST_OPCODE(ip) @ extract opcode from rINST 3865 stmia r9, {r0-r1} @ vAA<- r0/r1 3866 GOTO_OPCODE(ip) @ jump to next instruction 3867 /* 12-13 instructions */ 3868 3869 3870 3871/* ------------------------------ */ 3872 .balign 64 3873.L_OP_FLOAT_TO_INT: /* 0x87 */ 3874/* File: armv5te/OP_FLOAT_TO_INT.S */ 3875/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3876/* File: armv5te/unop.S */ 3877 /* 3878 * Generic 32-bit unary operation. Provide an "instr" line that 3879 * specifies an instruction that performs "result = op r0". 3880 * This could be an ARM instruction or a function call. 3881 * 3882 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3883 * int-to-byte, int-to-char, int-to-short 3884 */ 3885 /* unop vA, vB */ 3886 mov r3, rINST, lsr #12 @ r3<- B 3887 mov r9, rINST, lsr #8 @ r9<- A+ 3888 GET_VREG(r0, r3) @ r0<- vB 3889 and r9, r9, #15 3890 @ optional op; may set condition codes 3891 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3892 bl __aeabi_f2iz @ r0<- op, r0-r3 changed 3893 GET_INST_OPCODE(ip) @ extract opcode from rINST 3894 SET_VREG(r0, r9) @ vAA<- r0 3895 GOTO_OPCODE(ip) @ jump to next instruction 3896 /* 9-10 instructions */ 3897 3898 3899#if 0 3900@include "armv5te/unop.S" {"instr":"bl f2i_doconv"} 3901@break 3902/* 3903 * Convert the float in r0 to an int in r0. 3904 * 3905 * We have to clip values to int min/max per the specification. The 3906 * expected common case is a "reasonable" value that converts directly 3907 * to modest integer. The EABI convert function isn't doing this for us. 3908 */ 3909f2i_doconv: 3910 stmfd sp!, {r4, lr} 3911 mov r1, #0x4f000000 @ (float)maxint 3912 mov r4, r0 3913 bl __aeabi_fcmpge @ is arg >= maxint? 3914 cmp r0, #0 @ nonzero == yes 3915 mvnne r0, #0x80000000 @ return maxint (7fffffff) 3916 ldmnefd sp!, {r4, pc} 3917 3918 mov r0, r4 @ recover arg 3919 mov r1, #0xcf000000 @ (float)minint 3920 bl __aeabi_fcmple @ is arg <= minint? 3921 cmp r0, #0 @ nonzero == yes 3922 movne r0, #0x80000000 @ return minint (80000000) 3923 ldmnefd sp!, {r4, pc} 3924 3925 mov r0, r4 @ recover arg 3926 mov r1, r4 3927 bl __aeabi_fcmpeq @ is arg == self? 3928 cmp r0, #0 @ zero == no 3929 ldmeqfd sp!, {r4, pc} @ return zero for NaN 3930 3931 mov r0, r4 @ recover arg 3932 bl __aeabi_f2iz @ convert float to int 3933 ldmfd sp!, {r4, pc} 3934#endif 3935 3936 3937/* ------------------------------ */ 3938 .balign 64 3939.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3940/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3941@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3942/* File: armv5te/unopWider.S */ 3943 /* 3944 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3945 * that specifies an instruction that performs "result = op r0", where 3946 * "result" is a 64-bit quantity in r0/r1. 3947 * 3948 * For: int-to-long, int-to-double, float-to-long, float-to-double 3949 */ 3950 /* unop vA, vB */ 3951 mov r9, rINST, lsr #8 @ r9<- A+ 3952 mov r3, rINST, lsr #12 @ r3<- B 3953 and r9, r9, #15 3954 GET_VREG(r0, r3) @ r0<- vB 3955 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3956 @ optional op; may set condition codes 3957 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3958 bl f2l_doconv @ r0<- op, r0-r3 changed 3959 GET_INST_OPCODE(ip) @ extract opcode from rINST 3960 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3961 GOTO_OPCODE(ip) @ jump to next instruction 3962 /* 10-11 instructions */ 3963 3964 3965 3966/* ------------------------------ */ 3967 .balign 64 3968.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3969/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */ 3970/* File: armv5te/unopWider.S */ 3971 /* 3972 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3973 * that specifies an instruction that performs "result = op r0", where 3974 * "result" is a 64-bit quantity in r0/r1. 3975 * 3976 * For: int-to-long, int-to-double, float-to-long, float-to-double 3977 */ 3978 /* unop vA, vB */ 3979 mov r9, rINST, lsr #8 @ r9<- A+ 3980 mov r3, rINST, lsr #12 @ r3<- B 3981 and r9, r9, #15 3982 GET_VREG(r0, r3) @ r0<- vB 3983 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3984 @ optional op; may set condition codes 3985 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3986 bl __aeabi_f2d @ r0<- op, r0-r3 changed 3987 GET_INST_OPCODE(ip) @ extract opcode from rINST 3988 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3989 GOTO_OPCODE(ip) @ jump to next instruction 3990 /* 10-11 instructions */ 3991 3992 3993/* ------------------------------ */ 3994 .balign 64 3995.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3996/* File: armv5te/OP_DOUBLE_TO_INT.S */ 3997/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3998/* File: armv5te/unopNarrower.S */ 3999 /* 4000 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4001 * that specifies an instruction that performs "result = op r0/r1", where 4002 * "result" is a 32-bit quantity in r0. 4003 * 4004 * For: long-to-float, double-to-int, double-to-float 4005 * 4006 * (This would work for long-to-int, but that instruction is actually 4007 * an exact match for OP_MOVE.) 4008 */ 4009 /* unop vA, vB */ 4010 mov r3, rINST, lsr #12 @ r3<- B 4011 mov r9, rINST, lsr #8 @ r9<- A+ 4012 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4013 and r9, r9, #15 4014 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4015 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4016 @ optional op; may set condition codes 4017 bl __aeabi_d2iz @ r0<- op, r0-r3 changed 4018 GET_INST_OPCODE(ip) @ extract opcode from rINST 4019 SET_VREG(r0, r9) @ vA<- r0 4020 GOTO_OPCODE(ip) @ jump to next instruction 4021 /* 10-11 instructions */ 4022 4023 4024#if 0 4025@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"} 4026@break 4027/* 4028 * Convert the double in r0/r1 to an int in r0. 4029 * 4030 * We have to clip values to int min/max per the specification. The 4031 * expected common case is a "reasonable" value that converts directly 4032 * to modest integer. The EABI convert function isn't doing this for us. 4033 */ 4034d2i_doconv: 4035 stmfd sp!, {r4, r5, lr} @ save regs 4036 mov r2, #0x80000000 @ maxint, as a double (low word) 4037 mov r2, r2, asr #9 @ 0xffc00000 4038 sub sp, sp, #4 @ align for EABI 4039 mvn r3, #0xbe000000 @ maxint, as a double (high word) 4040 sub r3, r3, #0x00200000 @ 0x41dfffff 4041 mov r4, r0 @ save a copy of r0 4042 mov r5, r1 @ and r1 4043 bl __aeabi_dcmpge @ is arg >= maxint? 4044 cmp r0, #0 @ nonzero == yes 4045 mvnne r0, #0x80000000 @ return maxint (0x7fffffff) 4046 bne 1f 4047 4048 mov r0, r4 @ recover arg 4049 mov r1, r5 4050 mov r3, #0xc1000000 @ minint, as a double (high word) 4051 add r3, r3, #0x00e00000 @ 0xc1e00000 4052 mov r2, #0 @ minint, as a double (low word) 4053 bl __aeabi_dcmple @ is arg <= minint? 4054 cmp r0, #0 @ nonzero == yes 4055 movne r0, #0x80000000 @ return minint (80000000) 4056 bne 1f 4057 4058 mov r0, r4 @ recover arg 4059 mov r1, r5 4060 mov r2, r4 @ compare against self 4061 mov r3, r5 4062 bl __aeabi_dcmpeq @ is arg == self? 4063 cmp r0, #0 @ zero == no 4064 beq 1f @ return zero for NaN 4065 4066 mov r0, r4 @ recover arg 4067 mov r1, r5 4068 bl __aeabi_d2iz @ convert double to int 4069 40701: 4071 add sp, sp, #4 4072 ldmfd sp!, {r4, r5, pc} 4073#endif 4074 4075 4076/* ------------------------------ */ 4077 .balign 64 4078.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 4079/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 4080@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 4081/* File: armv5te/unopWide.S */ 4082 /* 4083 * Generic 64-bit unary operation. Provide an "instr" line that 4084 * specifies an instruction that performs "result = op r0/r1". 4085 * This could be an ARM instruction or a function call. 4086 * 4087 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 4088 */ 4089 /* unop vA, vB */ 4090 mov r9, rINST, lsr #8 @ r9<- A+ 4091 mov r3, rINST, lsr #12 @ r3<- B 4092 and r9, r9, #15 4093 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4094 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 4095 ldmia r3, {r0-r1} @ r0/r1<- vAA 4096 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4097 @ optional op; may set condition codes 4098 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 4099 GET_INST_OPCODE(ip) @ extract opcode from rINST 4100 stmia r9, {r0-r1} @ vAA<- r0/r1 4101 GOTO_OPCODE(ip) @ jump to next instruction 4102 /* 12-13 instructions */ 4103 4104 4105 4106 4107/* ------------------------------ */ 4108 .balign 64 4109.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 4110/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */ 4111/* File: armv5te/unopNarrower.S */ 4112 /* 4113 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4114 * that specifies an instruction that performs "result = op r0/r1", where 4115 * "result" is a 32-bit quantity in r0. 4116 * 4117 * For: long-to-float, double-to-int, double-to-float 4118 * 4119 * (This would work for long-to-int, but that instruction is actually 4120 * an exact match for OP_MOVE.) 4121 */ 4122 /* unop vA, vB */ 4123 mov r3, rINST, lsr #12 @ r3<- B 4124 mov r9, rINST, lsr #8 @ r9<- A+ 4125 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4126 and r9, r9, #15 4127 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4128 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4129 @ optional op; may set condition codes 4130 bl __aeabi_d2f @ r0<- op, r0-r3 changed 4131 GET_INST_OPCODE(ip) @ extract opcode from rINST 4132 SET_VREG(r0, r9) @ vA<- r0 4133 GOTO_OPCODE(ip) @ jump to next instruction 4134 /* 10-11 instructions */ 4135 4136 4137/* ------------------------------ */ 4138 .balign 64 4139.L_OP_INT_TO_BYTE: /* 0x8d */ 4140/* File: armv5te/OP_INT_TO_BYTE.S */ 4141/* File: armv5te/unop.S */ 4142 /* 4143 * Generic 32-bit unary operation. Provide an "instr" line that 4144 * specifies an instruction that performs "result = op r0". 4145 * This could be an ARM instruction or a function call. 4146 * 4147 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4148 * int-to-byte, int-to-char, int-to-short 4149 */ 4150 /* unop vA, vB */ 4151 mov r3, rINST, lsr #12 @ r3<- B 4152 mov r9, rINST, lsr #8 @ r9<- A+ 4153 GET_VREG(r0, r3) @ r0<- vB 4154 and r9, r9, #15 4155 mov r0, r0, asl #24 @ optional op; may set condition codes 4156 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4157 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4158 GET_INST_OPCODE(ip) @ extract opcode from rINST 4159 SET_VREG(r0, r9) @ vAA<- r0 4160 GOTO_OPCODE(ip) @ jump to next instruction 4161 /* 9-10 instructions */ 4162 4163 4164/* ------------------------------ */ 4165 .balign 64 4166.L_OP_INT_TO_CHAR: /* 0x8e */ 4167/* File: armv5te/OP_INT_TO_CHAR.S */ 4168/* File: armv5te/unop.S */ 4169 /* 4170 * Generic 32-bit unary operation. Provide an "instr" line that 4171 * specifies an instruction that performs "result = op r0". 4172 * This could be an ARM instruction or a function call. 4173 * 4174 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4175 * int-to-byte, int-to-char, int-to-short 4176 */ 4177 /* unop vA, vB */ 4178 mov r3, rINST, lsr #12 @ r3<- B 4179 mov r9, rINST, lsr #8 @ r9<- A+ 4180 GET_VREG(r0, r3) @ r0<- vB 4181 and r9, r9, #15 4182 mov r0, r0, asl #16 @ optional op; may set condition codes 4183 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4184 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4185 GET_INST_OPCODE(ip) @ extract opcode from rINST 4186 SET_VREG(r0, r9) @ vAA<- r0 4187 GOTO_OPCODE(ip) @ jump to next instruction 4188 /* 9-10 instructions */ 4189 4190 4191/* ------------------------------ */ 4192 .balign 64 4193.L_OP_INT_TO_SHORT: /* 0x8f */ 4194/* File: armv5te/OP_INT_TO_SHORT.S */ 4195/* File: armv5te/unop.S */ 4196 /* 4197 * Generic 32-bit unary operation. Provide an "instr" line that 4198 * specifies an instruction that performs "result = op r0". 4199 * This could be an ARM instruction or a function call. 4200 * 4201 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4202 * int-to-byte, int-to-char, int-to-short 4203 */ 4204 /* unop vA, vB */ 4205 mov r3, rINST, lsr #12 @ r3<- B 4206 mov r9, rINST, lsr #8 @ r9<- A+ 4207 GET_VREG(r0, r3) @ r0<- vB 4208 and r9, r9, #15 4209 mov r0, r0, asl #16 @ optional op; may set condition codes 4210 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4211 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4212 GET_INST_OPCODE(ip) @ extract opcode from rINST 4213 SET_VREG(r0, r9) @ vAA<- r0 4214 GOTO_OPCODE(ip) @ jump to next instruction 4215 /* 9-10 instructions */ 4216 4217 4218/* ------------------------------ */ 4219 .balign 64 4220.L_OP_ADD_INT: /* 0x90 */ 4221/* File: armv5te/OP_ADD_INT.S */ 4222/* File: armv5te/binop.S */ 4223 /* 4224 * Generic 32-bit binary operation. Provide an "instr" line that 4225 * specifies an instruction that performs "result = r0 op r1". 4226 * This could be an ARM instruction or a function call. (If the result 4227 * comes back in a register other than r0, you can override "result".) 4228 * 4229 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4230 * vCC (r1). Useful for integer division and modulus. Note that we 4231 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4232 * handles it correctly. 4233 * 4234 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4235 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4236 * mul-float, div-float, rem-float 4237 */ 4238 /* binop vAA, vBB, vCC */ 4239 FETCH(r0, 1) @ r0<- CCBB 4240 mov r9, rINST, lsr #8 @ r9<- AA 4241 mov r3, r0, lsr #8 @ r3<- CC 4242 and r2, r0, #255 @ r2<- BB 4243 GET_VREG(r1, r3) @ r1<- vCC 4244 GET_VREG(r0, r2) @ r0<- vBB 4245 .if 0 4246 cmp r1, #0 @ is second operand zero? 4247 beq common_errDivideByZero 4248 .endif 4249 4250 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4251 @ optional op; may set condition codes 4252 add r0, r0, r1 @ r0<- op, r0-r3 changed 4253 GET_INST_OPCODE(ip) @ extract opcode from rINST 4254 SET_VREG(r0, r9) @ vAA<- r0 4255 GOTO_OPCODE(ip) @ jump to next instruction 4256 /* 11-14 instructions */ 4257 4258 4259 4260/* ------------------------------ */ 4261 .balign 64 4262.L_OP_SUB_INT: /* 0x91 */ 4263/* File: armv5te/OP_SUB_INT.S */ 4264/* File: armv5te/binop.S */ 4265 /* 4266 * Generic 32-bit binary operation. Provide an "instr" line that 4267 * specifies an instruction that performs "result = r0 op r1". 4268 * This could be an ARM instruction or a function call. (If the result 4269 * comes back in a register other than r0, you can override "result".) 4270 * 4271 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4272 * vCC (r1). Useful for integer division and modulus. Note that we 4273 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4274 * handles it correctly. 4275 * 4276 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4277 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4278 * mul-float, div-float, rem-float 4279 */ 4280 /* binop vAA, vBB, vCC */ 4281 FETCH(r0, 1) @ r0<- CCBB 4282 mov r9, rINST, lsr #8 @ r9<- AA 4283 mov r3, r0, lsr #8 @ r3<- CC 4284 and r2, r0, #255 @ r2<- BB 4285 GET_VREG(r1, r3) @ r1<- vCC 4286 GET_VREG(r0, r2) @ r0<- vBB 4287 .if 0 4288 cmp r1, #0 @ is second operand zero? 4289 beq common_errDivideByZero 4290 .endif 4291 4292 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4293 @ optional op; may set condition codes 4294 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4295 GET_INST_OPCODE(ip) @ extract opcode from rINST 4296 SET_VREG(r0, r9) @ vAA<- r0 4297 GOTO_OPCODE(ip) @ jump to next instruction 4298 /* 11-14 instructions */ 4299 4300 4301 4302/* ------------------------------ */ 4303 .balign 64 4304.L_OP_MUL_INT: /* 0x92 */ 4305/* File: armv5te/OP_MUL_INT.S */ 4306/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4307/* File: armv5te/binop.S */ 4308 /* 4309 * Generic 32-bit binary operation. Provide an "instr" line that 4310 * specifies an instruction that performs "result = r0 op r1". 4311 * This could be an ARM instruction or a function call. (If the result 4312 * comes back in a register other than r0, you can override "result".) 4313 * 4314 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4315 * vCC (r1). Useful for integer division and modulus. Note that we 4316 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4317 * handles it correctly. 4318 * 4319 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4320 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4321 * mul-float, div-float, rem-float 4322 */ 4323 /* binop vAA, vBB, vCC */ 4324 FETCH(r0, 1) @ r0<- CCBB 4325 mov r9, rINST, lsr #8 @ r9<- AA 4326 mov r3, r0, lsr #8 @ r3<- CC 4327 and r2, r0, #255 @ r2<- BB 4328 GET_VREG(r1, r3) @ r1<- vCC 4329 GET_VREG(r0, r2) @ r0<- vBB 4330 .if 0 4331 cmp r1, #0 @ is second operand zero? 4332 beq common_errDivideByZero 4333 .endif 4334 4335 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4336 @ optional op; may set condition codes 4337 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4338 GET_INST_OPCODE(ip) @ extract opcode from rINST 4339 SET_VREG(r0, r9) @ vAA<- r0 4340 GOTO_OPCODE(ip) @ jump to next instruction 4341 /* 11-14 instructions */ 4342 4343 4344 4345/* ------------------------------ */ 4346 .balign 64 4347.L_OP_DIV_INT: /* 0x93 */ 4348/* File: armv5te/OP_DIV_INT.S */ 4349/* File: armv5te/binop.S */ 4350 /* 4351 * Generic 32-bit binary operation. Provide an "instr" line that 4352 * specifies an instruction that performs "result = r0 op r1". 4353 * This could be an ARM instruction or a function call. (If the result 4354 * comes back in a register other than r0, you can override "result".) 4355 * 4356 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4357 * vCC (r1). Useful for integer division and modulus. Note that we 4358 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4359 * handles it correctly. 4360 * 4361 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4362 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4363 * mul-float, div-float, rem-float 4364 */ 4365 /* binop vAA, vBB, vCC */ 4366 FETCH(r0, 1) @ r0<- CCBB 4367 mov r9, rINST, lsr #8 @ r9<- AA 4368 mov r3, r0, lsr #8 @ r3<- CC 4369 and r2, r0, #255 @ r2<- BB 4370 GET_VREG(r1, r3) @ r1<- vCC 4371 GET_VREG(r0, r2) @ r0<- vBB 4372 .if 1 4373 cmp r1, #0 @ is second operand zero? 4374 beq common_errDivideByZero 4375 .endif 4376 4377 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4378 @ optional op; may set condition codes 4379 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4380 GET_INST_OPCODE(ip) @ extract opcode from rINST 4381 SET_VREG(r0, r9) @ vAA<- r0 4382 GOTO_OPCODE(ip) @ jump to next instruction 4383 /* 11-14 instructions */ 4384 4385 4386 4387/* ------------------------------ */ 4388 .balign 64 4389.L_OP_REM_INT: /* 0x94 */ 4390/* File: armv5te/OP_REM_INT.S */ 4391/* idivmod returns quotient in r0 and remainder in r1 */ 4392/* File: armv5te/binop.S */ 4393 /* 4394 * Generic 32-bit binary operation. Provide an "instr" line that 4395 * specifies an instruction that performs "result = r0 op r1". 4396 * This could be an ARM instruction or a function call. (If the result 4397 * comes back in a register other than r0, you can override "result".) 4398 * 4399 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4400 * vCC (r1). Useful for integer division and modulus. Note that we 4401 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4402 * handles it correctly. 4403 * 4404 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4405 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4406 * mul-float, div-float, rem-float 4407 */ 4408 /* binop vAA, vBB, vCC */ 4409 FETCH(r0, 1) @ r0<- CCBB 4410 mov r9, rINST, lsr #8 @ r9<- AA 4411 mov r3, r0, lsr #8 @ r3<- CC 4412 and r2, r0, #255 @ r2<- BB 4413 GET_VREG(r1, r3) @ r1<- vCC 4414 GET_VREG(r0, r2) @ r0<- vBB 4415 .if 1 4416 cmp r1, #0 @ is second operand zero? 4417 beq common_errDivideByZero 4418 .endif 4419 4420 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4421 @ optional op; may set condition codes 4422 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4423 GET_INST_OPCODE(ip) @ extract opcode from rINST 4424 SET_VREG(r1, r9) @ vAA<- r1 4425 GOTO_OPCODE(ip) @ jump to next instruction 4426 /* 11-14 instructions */ 4427 4428 4429 4430/* ------------------------------ */ 4431 .balign 64 4432.L_OP_AND_INT: /* 0x95 */ 4433/* File: armv5te/OP_AND_INT.S */ 4434/* File: armv5te/binop.S */ 4435 /* 4436 * Generic 32-bit binary operation. Provide an "instr" line that 4437 * specifies an instruction that performs "result = r0 op r1". 4438 * This could be an ARM instruction or a function call. (If the result 4439 * comes back in a register other than r0, you can override "result".) 4440 * 4441 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4442 * vCC (r1). Useful for integer division and modulus. Note that we 4443 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4444 * handles it correctly. 4445 * 4446 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4447 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4448 * mul-float, div-float, rem-float 4449 */ 4450 /* binop vAA, vBB, vCC */ 4451 FETCH(r0, 1) @ r0<- CCBB 4452 mov r9, rINST, lsr #8 @ r9<- AA 4453 mov r3, r0, lsr #8 @ r3<- CC 4454 and r2, r0, #255 @ r2<- BB 4455 GET_VREG(r1, r3) @ r1<- vCC 4456 GET_VREG(r0, r2) @ r0<- vBB 4457 .if 0 4458 cmp r1, #0 @ is second operand zero? 4459 beq common_errDivideByZero 4460 .endif 4461 4462 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4463 @ optional op; may set condition codes 4464 and r0, r0, r1 @ r0<- op, r0-r3 changed 4465 GET_INST_OPCODE(ip) @ extract opcode from rINST 4466 SET_VREG(r0, r9) @ vAA<- r0 4467 GOTO_OPCODE(ip) @ jump to next instruction 4468 /* 11-14 instructions */ 4469 4470 4471 4472/* ------------------------------ */ 4473 .balign 64 4474.L_OP_OR_INT: /* 0x96 */ 4475/* File: armv5te/OP_OR_INT.S */ 4476/* File: armv5te/binop.S */ 4477 /* 4478 * Generic 32-bit binary operation. Provide an "instr" line that 4479 * specifies an instruction that performs "result = r0 op r1". 4480 * This could be an ARM instruction or a function call. (If the result 4481 * comes back in a register other than r0, you can override "result".) 4482 * 4483 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4484 * vCC (r1). Useful for integer division and modulus. Note that we 4485 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4486 * handles it correctly. 4487 * 4488 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4489 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4490 * mul-float, div-float, rem-float 4491 */ 4492 /* binop vAA, vBB, vCC */ 4493 FETCH(r0, 1) @ r0<- CCBB 4494 mov r9, rINST, lsr #8 @ r9<- AA 4495 mov r3, r0, lsr #8 @ r3<- CC 4496 and r2, r0, #255 @ r2<- BB 4497 GET_VREG(r1, r3) @ r1<- vCC 4498 GET_VREG(r0, r2) @ r0<- vBB 4499 .if 0 4500 cmp r1, #0 @ is second operand zero? 4501 beq common_errDivideByZero 4502 .endif 4503 4504 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4505 @ optional op; may set condition codes 4506 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4507 GET_INST_OPCODE(ip) @ extract opcode from rINST 4508 SET_VREG(r0, r9) @ vAA<- r0 4509 GOTO_OPCODE(ip) @ jump to next instruction 4510 /* 11-14 instructions */ 4511 4512 4513 4514/* ------------------------------ */ 4515 .balign 64 4516.L_OP_XOR_INT: /* 0x97 */ 4517/* File: armv5te/OP_XOR_INT.S */ 4518/* File: armv5te/binop.S */ 4519 /* 4520 * Generic 32-bit binary operation. Provide an "instr" line that 4521 * specifies an instruction that performs "result = r0 op r1". 4522 * This could be an ARM instruction or a function call. (If the result 4523 * comes back in a register other than r0, you can override "result".) 4524 * 4525 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4526 * vCC (r1). Useful for integer division and modulus. Note that we 4527 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4528 * handles it correctly. 4529 * 4530 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4531 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4532 * mul-float, div-float, rem-float 4533 */ 4534 /* binop vAA, vBB, vCC */ 4535 FETCH(r0, 1) @ r0<- CCBB 4536 mov r9, rINST, lsr #8 @ r9<- AA 4537 mov r3, r0, lsr #8 @ r3<- CC 4538 and r2, r0, #255 @ r2<- BB 4539 GET_VREG(r1, r3) @ r1<- vCC 4540 GET_VREG(r0, r2) @ r0<- vBB 4541 .if 0 4542 cmp r1, #0 @ is second operand zero? 4543 beq common_errDivideByZero 4544 .endif 4545 4546 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4547 @ optional op; may set condition codes 4548 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4549 GET_INST_OPCODE(ip) @ extract opcode from rINST 4550 SET_VREG(r0, r9) @ vAA<- r0 4551 GOTO_OPCODE(ip) @ jump to next instruction 4552 /* 11-14 instructions */ 4553 4554 4555 4556/* ------------------------------ */ 4557 .balign 64 4558.L_OP_SHL_INT: /* 0x98 */ 4559/* File: armv5te/OP_SHL_INT.S */ 4560/* File: armv5te/binop.S */ 4561 /* 4562 * Generic 32-bit binary operation. Provide an "instr" line that 4563 * specifies an instruction that performs "result = r0 op r1". 4564 * This could be an ARM instruction or a function call. (If the result 4565 * comes back in a register other than r0, you can override "result".) 4566 * 4567 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4568 * vCC (r1). Useful for integer division and modulus. Note that we 4569 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4570 * handles it correctly. 4571 * 4572 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4573 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4574 * mul-float, div-float, rem-float 4575 */ 4576 /* binop vAA, vBB, vCC */ 4577 FETCH(r0, 1) @ r0<- CCBB 4578 mov r9, rINST, lsr #8 @ r9<- AA 4579 mov r3, r0, lsr #8 @ r3<- CC 4580 and r2, r0, #255 @ r2<- BB 4581 GET_VREG(r1, r3) @ r1<- vCC 4582 GET_VREG(r0, r2) @ r0<- vBB 4583 .if 0 4584 cmp r1, #0 @ is second operand zero? 4585 beq common_errDivideByZero 4586 .endif 4587 4588 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4589 and r1, r1, #31 @ optional op; may set condition codes 4590 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4591 GET_INST_OPCODE(ip) @ extract opcode from rINST 4592 SET_VREG(r0, r9) @ vAA<- r0 4593 GOTO_OPCODE(ip) @ jump to next instruction 4594 /* 11-14 instructions */ 4595 4596 4597 4598/* ------------------------------ */ 4599 .balign 64 4600.L_OP_SHR_INT: /* 0x99 */ 4601/* File: armv5te/OP_SHR_INT.S */ 4602/* File: armv5te/binop.S */ 4603 /* 4604 * Generic 32-bit binary operation. Provide an "instr" line that 4605 * specifies an instruction that performs "result = r0 op r1". 4606 * This could be an ARM instruction or a function call. (If the result 4607 * comes back in a register other than r0, you can override "result".) 4608 * 4609 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4610 * vCC (r1). Useful for integer division and modulus. Note that we 4611 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4612 * handles it correctly. 4613 * 4614 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4615 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4616 * mul-float, div-float, rem-float 4617 */ 4618 /* binop vAA, vBB, vCC */ 4619 FETCH(r0, 1) @ r0<- CCBB 4620 mov r9, rINST, lsr #8 @ r9<- AA 4621 mov r3, r0, lsr #8 @ r3<- CC 4622 and r2, r0, #255 @ r2<- BB 4623 GET_VREG(r1, r3) @ r1<- vCC 4624 GET_VREG(r0, r2) @ r0<- vBB 4625 .if 0 4626 cmp r1, #0 @ is second operand zero? 4627 beq common_errDivideByZero 4628 .endif 4629 4630 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4631 and r1, r1, #31 @ optional op; may set condition codes 4632 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4633 GET_INST_OPCODE(ip) @ extract opcode from rINST 4634 SET_VREG(r0, r9) @ vAA<- r0 4635 GOTO_OPCODE(ip) @ jump to next instruction 4636 /* 11-14 instructions */ 4637 4638 4639 4640/* ------------------------------ */ 4641 .balign 64 4642.L_OP_USHR_INT: /* 0x9a */ 4643/* File: armv5te/OP_USHR_INT.S */ 4644/* File: armv5te/binop.S */ 4645 /* 4646 * Generic 32-bit binary operation. Provide an "instr" line that 4647 * specifies an instruction that performs "result = r0 op r1". 4648 * This could be an ARM instruction or a function call. (If the result 4649 * comes back in a register other than r0, you can override "result".) 4650 * 4651 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4652 * vCC (r1). Useful for integer division and modulus. Note that we 4653 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4654 * handles it correctly. 4655 * 4656 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4657 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4658 * mul-float, div-float, rem-float 4659 */ 4660 /* binop vAA, vBB, vCC */ 4661 FETCH(r0, 1) @ r0<- CCBB 4662 mov r9, rINST, lsr #8 @ r9<- AA 4663 mov r3, r0, lsr #8 @ r3<- CC 4664 and r2, r0, #255 @ r2<- BB 4665 GET_VREG(r1, r3) @ r1<- vCC 4666 GET_VREG(r0, r2) @ r0<- vBB 4667 .if 0 4668 cmp r1, #0 @ is second operand zero? 4669 beq common_errDivideByZero 4670 .endif 4671 4672 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4673 and r1, r1, #31 @ optional op; may set condition codes 4674 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4675 GET_INST_OPCODE(ip) @ extract opcode from rINST 4676 SET_VREG(r0, r9) @ vAA<- r0 4677 GOTO_OPCODE(ip) @ jump to next instruction 4678 /* 11-14 instructions */ 4679 4680 4681 4682/* ------------------------------ */ 4683 .balign 64 4684.L_OP_ADD_LONG: /* 0x9b */ 4685/* File: armv5te/OP_ADD_LONG.S */ 4686/* File: armv5te/binopWide.S */ 4687 /* 4688 * Generic 64-bit binary operation. Provide an "instr" line that 4689 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4690 * This could be an ARM instruction or a function call. (If the result 4691 * comes back in a register other than r0, you can override "result".) 4692 * 4693 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4694 * vCC (r1). Useful for integer division and modulus. 4695 * 4696 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4697 * xor-long, add-double, sub-double, mul-double, div-double, 4698 * rem-double 4699 * 4700 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4701 */ 4702 /* binop vAA, vBB, vCC */ 4703 FETCH(r0, 1) @ r0<- CCBB 4704 mov r9, rINST, lsr #8 @ r9<- AA 4705 and r2, r0, #255 @ r2<- BB 4706 mov r3, r0, lsr #8 @ r3<- CC 4707 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4708 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4709 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4710 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4711 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4712 .if 0 4713 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4714 beq common_errDivideByZero 4715 .endif 4716 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4717 4718 adds r0, r0, r2 @ optional op; may set condition codes 4719 adc r1, r1, r3 @ result<- op, r0-r3 changed 4720 GET_INST_OPCODE(ip) @ extract opcode from rINST 4721 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4722 GOTO_OPCODE(ip) @ jump to next instruction 4723 /* 14-17 instructions */ 4724 4725 4726 4727/* ------------------------------ */ 4728 .balign 64 4729.L_OP_SUB_LONG: /* 0x9c */ 4730/* File: armv5te/OP_SUB_LONG.S */ 4731/* File: armv5te/binopWide.S */ 4732 /* 4733 * Generic 64-bit binary operation. Provide an "instr" line that 4734 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4735 * This could be an ARM instruction or a function call. (If the result 4736 * comes back in a register other than r0, you can override "result".) 4737 * 4738 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4739 * vCC (r1). Useful for integer division and modulus. 4740 * 4741 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4742 * xor-long, add-double, sub-double, mul-double, div-double, 4743 * rem-double 4744 * 4745 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4746 */ 4747 /* binop vAA, vBB, vCC */ 4748 FETCH(r0, 1) @ r0<- CCBB 4749 mov r9, rINST, lsr #8 @ r9<- AA 4750 and r2, r0, #255 @ r2<- BB 4751 mov r3, r0, lsr #8 @ r3<- CC 4752 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4753 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4754 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4755 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4756 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4757 .if 0 4758 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4759 beq common_errDivideByZero 4760 .endif 4761 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4762 4763 subs r0, r0, r2 @ optional op; may set condition codes 4764 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4765 GET_INST_OPCODE(ip) @ extract opcode from rINST 4766 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4767 GOTO_OPCODE(ip) @ jump to next instruction 4768 /* 14-17 instructions */ 4769 4770 4771 4772/* ------------------------------ */ 4773 .balign 64 4774.L_OP_MUL_LONG: /* 0x9d */ 4775/* File: armv5te/OP_MUL_LONG.S */ 4776 /* 4777 * Signed 64-bit integer multiply. 4778 * 4779 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4780 * WX 4781 * x YZ 4782 * -------- 4783 * ZW ZX 4784 * YW YX 4785 * 4786 * The low word of the result holds ZX, the high word holds 4787 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4788 * it doesn't fit in the low 64 bits. 4789 * 4790 * Unlike most ARM math operations, multiply instructions have 4791 * restrictions on using the same register more than once (Rd and Rm 4792 * cannot be the same). 4793 */ 4794 /* mul-long vAA, vBB, vCC */ 4795 FETCH(r0, 1) @ r0<- CCBB 4796 and r2, r0, #255 @ r2<- BB 4797 mov r3, r0, lsr #8 @ r3<- CC 4798 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4799 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4800 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4801 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4802 mul ip, r2, r1 @ ip<- ZxW 4803 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4804 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4805 mov r0, rINST, lsr #8 @ r0<- AA 4806 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4807 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4808 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4809 b .LOP_MUL_LONG_finish 4810 4811/* ------------------------------ */ 4812 .balign 64 4813.L_OP_DIV_LONG: /* 0x9e */ 4814/* File: armv5te/OP_DIV_LONG.S */ 4815/* File: armv5te/binopWide.S */ 4816 /* 4817 * Generic 64-bit binary operation. Provide an "instr" line that 4818 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4819 * This could be an ARM instruction or a function call. (If the result 4820 * comes back in a register other than r0, you can override "result".) 4821 * 4822 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4823 * vCC (r1). Useful for integer division and modulus. 4824 * 4825 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4826 * xor-long, add-double, sub-double, mul-double, div-double, 4827 * rem-double 4828 * 4829 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4830 */ 4831 /* binop vAA, vBB, vCC */ 4832 FETCH(r0, 1) @ r0<- CCBB 4833 mov r9, rINST, lsr #8 @ r9<- AA 4834 and r2, r0, #255 @ r2<- BB 4835 mov r3, r0, lsr #8 @ r3<- CC 4836 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4837 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4838 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4839 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4840 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4841 .if 1 4842 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4843 beq common_errDivideByZero 4844 .endif 4845 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4846 4847 @ optional op; may set condition codes 4848 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4849 GET_INST_OPCODE(ip) @ extract opcode from rINST 4850 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4851 GOTO_OPCODE(ip) @ jump to next instruction 4852 /* 14-17 instructions */ 4853 4854 4855 4856/* ------------------------------ */ 4857 .balign 64 4858.L_OP_REM_LONG: /* 0x9f */ 4859/* File: armv5te/OP_REM_LONG.S */ 4860/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4861/* File: armv5te/binopWide.S */ 4862 /* 4863 * Generic 64-bit binary operation. Provide an "instr" line that 4864 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4865 * This could be an ARM instruction or a function call. (If the result 4866 * comes back in a register other than r0, you can override "result".) 4867 * 4868 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4869 * vCC (r1). Useful for integer division and modulus. 4870 * 4871 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4872 * xor-long, add-double, sub-double, mul-double, div-double, 4873 * rem-double 4874 * 4875 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4876 */ 4877 /* binop vAA, vBB, vCC */ 4878 FETCH(r0, 1) @ r0<- CCBB 4879 mov r9, rINST, lsr #8 @ r9<- AA 4880 and r2, r0, #255 @ r2<- BB 4881 mov r3, r0, lsr #8 @ r3<- CC 4882 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4883 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4884 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4885 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4886 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4887 .if 1 4888 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4889 beq common_errDivideByZero 4890 .endif 4891 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4892 4893 @ optional op; may set condition codes 4894 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4895 GET_INST_OPCODE(ip) @ extract opcode from rINST 4896 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4897 GOTO_OPCODE(ip) @ jump to next instruction 4898 /* 14-17 instructions */ 4899 4900 4901 4902/* ------------------------------ */ 4903 .balign 64 4904.L_OP_AND_LONG: /* 0xa0 */ 4905/* File: armv5te/OP_AND_LONG.S */ 4906/* File: armv5te/binopWide.S */ 4907 /* 4908 * Generic 64-bit binary operation. Provide an "instr" line that 4909 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4910 * This could be an ARM instruction or a function call. (If the result 4911 * comes back in a register other than r0, you can override "result".) 4912 * 4913 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4914 * vCC (r1). Useful for integer division and modulus. 4915 * 4916 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4917 * xor-long, add-double, sub-double, mul-double, div-double, 4918 * rem-double 4919 * 4920 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4921 */ 4922 /* binop vAA, vBB, vCC */ 4923 FETCH(r0, 1) @ r0<- CCBB 4924 mov r9, rINST, lsr #8 @ r9<- AA 4925 and r2, r0, #255 @ r2<- BB 4926 mov r3, r0, lsr #8 @ r3<- CC 4927 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4928 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4929 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4930 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4931 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4932 .if 0 4933 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4934 beq common_errDivideByZero 4935 .endif 4936 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4937 4938 and r0, r0, r2 @ optional op; may set condition codes 4939 and r1, r1, r3 @ result<- op, r0-r3 changed 4940 GET_INST_OPCODE(ip) @ extract opcode from rINST 4941 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4942 GOTO_OPCODE(ip) @ jump to next instruction 4943 /* 14-17 instructions */ 4944 4945 4946 4947/* ------------------------------ */ 4948 .balign 64 4949.L_OP_OR_LONG: /* 0xa1 */ 4950/* File: armv5te/OP_OR_LONG.S */ 4951/* File: armv5te/binopWide.S */ 4952 /* 4953 * Generic 64-bit binary operation. Provide an "instr" line that 4954 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4955 * This could be an ARM instruction or a function call. (If the result 4956 * comes back in a register other than r0, you can override "result".) 4957 * 4958 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4959 * vCC (r1). Useful for integer division and modulus. 4960 * 4961 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4962 * xor-long, add-double, sub-double, mul-double, div-double, 4963 * rem-double 4964 * 4965 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4966 */ 4967 /* binop vAA, vBB, vCC */ 4968 FETCH(r0, 1) @ r0<- CCBB 4969 mov r9, rINST, lsr #8 @ r9<- AA 4970 and r2, r0, #255 @ r2<- BB 4971 mov r3, r0, lsr #8 @ r3<- CC 4972 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4973 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4974 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4975 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4976 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4977 .if 0 4978 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4979 beq common_errDivideByZero 4980 .endif 4981 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4982 4983 orr r0, r0, r2 @ optional op; may set condition codes 4984 orr r1, r1, r3 @ result<- op, r0-r3 changed 4985 GET_INST_OPCODE(ip) @ extract opcode from rINST 4986 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4987 GOTO_OPCODE(ip) @ jump to next instruction 4988 /* 14-17 instructions */ 4989 4990 4991 4992/* ------------------------------ */ 4993 .balign 64 4994.L_OP_XOR_LONG: /* 0xa2 */ 4995/* File: armv5te/OP_XOR_LONG.S */ 4996/* File: armv5te/binopWide.S */ 4997 /* 4998 * Generic 64-bit binary operation. Provide an "instr" line that 4999 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5000 * This could be an ARM instruction or a function call. (If the result 5001 * comes back in a register other than r0, you can override "result".) 5002 * 5003 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5004 * vCC (r1). Useful for integer division and modulus. 5005 * 5006 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5007 * xor-long, add-double, sub-double, mul-double, div-double, 5008 * rem-double 5009 * 5010 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5011 */ 5012 /* binop vAA, vBB, vCC */ 5013 FETCH(r0, 1) @ r0<- CCBB 5014 mov r9, rINST, lsr #8 @ r9<- AA 5015 and r2, r0, #255 @ r2<- BB 5016 mov r3, r0, lsr #8 @ r3<- CC 5017 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5018 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5019 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5020 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5021 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5022 .if 0 5023 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5024 beq common_errDivideByZero 5025 .endif 5026 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5027 5028 eor r0, r0, r2 @ optional op; may set condition codes 5029 eor r1, r1, r3 @ result<- op, r0-r3 changed 5030 GET_INST_OPCODE(ip) @ extract opcode from rINST 5031 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5032 GOTO_OPCODE(ip) @ jump to next instruction 5033 /* 14-17 instructions */ 5034 5035 5036 5037/* ------------------------------ */ 5038 .balign 64 5039.L_OP_SHL_LONG: /* 0xa3 */ 5040/* File: armv5te/OP_SHL_LONG.S */ 5041 /* 5042 * Long integer shift. This is different from the generic 32/64-bit 5043 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5044 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5045 * 6 bits of the shift distance. 5046 */ 5047 /* shl-long vAA, vBB, vCC */ 5048 FETCH(r0, 1) @ r0<- CCBB 5049 mov r9, rINST, lsr #8 @ r9<- AA 5050 and r3, r0, #255 @ r3<- BB 5051 mov r0, r0, lsr #8 @ r0<- CC 5052 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5053 GET_VREG(r2, r0) @ r2<- vCC 5054 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5055 and r2, r2, #63 @ r2<- r2 & 0x3f 5056 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5057 5058 mov r1, r1, asl r2 @ r1<- r1 << r2 5059 rsb r3, r2, #32 @ r3<- 32 - r2 5060 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 5061 subs ip, r2, #32 @ ip<- r2 - 32 5062 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 5063 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5064 b .LOP_SHL_LONG_finish 5065 5066/* ------------------------------ */ 5067 .balign 64 5068.L_OP_SHR_LONG: /* 0xa4 */ 5069/* File: armv5te/OP_SHR_LONG.S */ 5070 /* 5071 * Long integer shift. This is different from the generic 32/64-bit 5072 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5073 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5074 * 6 bits of the shift distance. 5075 */ 5076 /* shr-long vAA, vBB, vCC */ 5077 FETCH(r0, 1) @ r0<- CCBB 5078 mov r9, rINST, lsr #8 @ r9<- AA 5079 and r3, r0, #255 @ r3<- BB 5080 mov r0, r0, lsr #8 @ r0<- CC 5081 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5082 GET_VREG(r2, r0) @ r2<- vCC 5083 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5084 and r2, r2, #63 @ r0<- r0 & 0x3f 5085 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5086 5087 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5088 rsb r3, r2, #32 @ r3<- 32 - r2 5089 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5090 subs ip, r2, #32 @ ip<- r2 - 32 5091 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 5092 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5093 b .LOP_SHR_LONG_finish 5094 5095/* ------------------------------ */ 5096 .balign 64 5097.L_OP_USHR_LONG: /* 0xa5 */ 5098/* File: armv5te/OP_USHR_LONG.S */ 5099 /* 5100 * Long integer shift. This is different from the generic 32/64-bit 5101 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5102 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5103 * 6 bits of the shift distance. 5104 */ 5105 /* ushr-long vAA, vBB, vCC */ 5106 FETCH(r0, 1) @ r0<- CCBB 5107 mov r9, rINST, lsr #8 @ r9<- AA 5108 and r3, r0, #255 @ r3<- BB 5109 mov r0, r0, lsr #8 @ r0<- CC 5110 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5111 GET_VREG(r2, r0) @ r2<- vCC 5112 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5113 and r2, r2, #63 @ r0<- r0 & 0x3f 5114 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5115 5116 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5117 rsb r3, r2, #32 @ r3<- 32 - r2 5118 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5119 subs ip, r2, #32 @ ip<- r2 - 32 5120 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 5121 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5122 b .LOP_USHR_LONG_finish 5123 5124/* ------------------------------ */ 5125 .balign 64 5126.L_OP_ADD_FLOAT: /* 0xa6 */ 5127/* File: armv5te/OP_ADD_FLOAT.S */ 5128/* File: armv5te/binop.S */ 5129 /* 5130 * Generic 32-bit binary operation. Provide an "instr" line that 5131 * specifies an instruction that performs "result = r0 op r1". 5132 * This could be an ARM instruction or a function call. (If the result 5133 * comes back in a register other than r0, you can override "result".) 5134 * 5135 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5136 * vCC (r1). Useful for integer division and modulus. Note that we 5137 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5138 * handles it correctly. 5139 * 5140 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5141 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5142 * mul-float, div-float, rem-float 5143 */ 5144 /* binop vAA, vBB, vCC */ 5145 FETCH(r0, 1) @ r0<- CCBB 5146 mov r9, rINST, lsr #8 @ r9<- AA 5147 mov r3, r0, lsr #8 @ r3<- CC 5148 and r2, r0, #255 @ r2<- BB 5149 GET_VREG(r1, r3) @ r1<- vCC 5150 GET_VREG(r0, r2) @ r0<- vBB 5151 .if 0 5152 cmp r1, #0 @ is second operand zero? 5153 beq common_errDivideByZero 5154 .endif 5155 5156 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5157 @ optional op; may set condition codes 5158 bl __aeabi_fadd @ r0<- op, r0-r3 changed 5159 GET_INST_OPCODE(ip) @ extract opcode from rINST 5160 SET_VREG(r0, r9) @ vAA<- r0 5161 GOTO_OPCODE(ip) @ jump to next instruction 5162 /* 11-14 instructions */ 5163 5164 5165 5166/* ------------------------------ */ 5167 .balign 64 5168.L_OP_SUB_FLOAT: /* 0xa7 */ 5169/* File: armv5te/OP_SUB_FLOAT.S */ 5170/* File: armv5te/binop.S */ 5171 /* 5172 * Generic 32-bit binary operation. Provide an "instr" line that 5173 * specifies an instruction that performs "result = r0 op r1". 5174 * This could be an ARM instruction or a function call. (If the result 5175 * comes back in a register other than r0, you can override "result".) 5176 * 5177 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5178 * vCC (r1). Useful for integer division and modulus. Note that we 5179 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5180 * handles it correctly. 5181 * 5182 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5183 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5184 * mul-float, div-float, rem-float 5185 */ 5186 /* binop vAA, vBB, vCC */ 5187 FETCH(r0, 1) @ r0<- CCBB 5188 mov r9, rINST, lsr #8 @ r9<- AA 5189 mov r3, r0, lsr #8 @ r3<- CC 5190 and r2, r0, #255 @ r2<- BB 5191 GET_VREG(r1, r3) @ r1<- vCC 5192 GET_VREG(r0, r2) @ r0<- vBB 5193 .if 0 5194 cmp r1, #0 @ is second operand zero? 5195 beq common_errDivideByZero 5196 .endif 5197 5198 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5199 @ optional op; may set condition codes 5200 bl __aeabi_fsub @ r0<- op, r0-r3 changed 5201 GET_INST_OPCODE(ip) @ extract opcode from rINST 5202 SET_VREG(r0, r9) @ vAA<- r0 5203 GOTO_OPCODE(ip) @ jump to next instruction 5204 /* 11-14 instructions */ 5205 5206 5207 5208/* ------------------------------ */ 5209 .balign 64 5210.L_OP_MUL_FLOAT: /* 0xa8 */ 5211/* File: armv5te/OP_MUL_FLOAT.S */ 5212/* File: armv5te/binop.S */ 5213 /* 5214 * Generic 32-bit binary operation. Provide an "instr" line that 5215 * specifies an instruction that performs "result = r0 op r1". 5216 * This could be an ARM instruction or a function call. (If the result 5217 * comes back in a register other than r0, you can override "result".) 5218 * 5219 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5220 * vCC (r1). Useful for integer division and modulus. Note that we 5221 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5222 * handles it correctly. 5223 * 5224 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5225 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5226 * mul-float, div-float, rem-float 5227 */ 5228 /* binop vAA, vBB, vCC */ 5229 FETCH(r0, 1) @ r0<- CCBB 5230 mov r9, rINST, lsr #8 @ r9<- AA 5231 mov r3, r0, lsr #8 @ r3<- CC 5232 and r2, r0, #255 @ r2<- BB 5233 GET_VREG(r1, r3) @ r1<- vCC 5234 GET_VREG(r0, r2) @ r0<- vBB 5235 .if 0 5236 cmp r1, #0 @ is second operand zero? 5237 beq common_errDivideByZero 5238 .endif 5239 5240 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5241 @ optional op; may set condition codes 5242 bl __aeabi_fmul @ r0<- op, r0-r3 changed 5243 GET_INST_OPCODE(ip) @ extract opcode from rINST 5244 SET_VREG(r0, r9) @ vAA<- r0 5245 GOTO_OPCODE(ip) @ jump to next instruction 5246 /* 11-14 instructions */ 5247 5248 5249 5250/* ------------------------------ */ 5251 .balign 64 5252.L_OP_DIV_FLOAT: /* 0xa9 */ 5253/* File: armv5te/OP_DIV_FLOAT.S */ 5254/* File: armv5te/binop.S */ 5255 /* 5256 * Generic 32-bit binary operation. Provide an "instr" line that 5257 * specifies an instruction that performs "result = r0 op r1". 5258 * This could be an ARM instruction or a function call. (If the result 5259 * comes back in a register other than r0, you can override "result".) 5260 * 5261 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5262 * vCC (r1). Useful for integer division and modulus. Note that we 5263 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5264 * handles it correctly. 5265 * 5266 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5267 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5268 * mul-float, div-float, rem-float 5269 */ 5270 /* binop vAA, vBB, vCC */ 5271 FETCH(r0, 1) @ r0<- CCBB 5272 mov r9, rINST, lsr #8 @ r9<- AA 5273 mov r3, r0, lsr #8 @ r3<- CC 5274 and r2, r0, #255 @ r2<- BB 5275 GET_VREG(r1, r3) @ r1<- vCC 5276 GET_VREG(r0, r2) @ r0<- vBB 5277 .if 0 5278 cmp r1, #0 @ is second operand zero? 5279 beq common_errDivideByZero 5280 .endif 5281 5282 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5283 @ optional op; may set condition codes 5284 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 5285 GET_INST_OPCODE(ip) @ extract opcode from rINST 5286 SET_VREG(r0, r9) @ vAA<- r0 5287 GOTO_OPCODE(ip) @ jump to next instruction 5288 /* 11-14 instructions */ 5289 5290 5291 5292/* ------------------------------ */ 5293 .balign 64 5294.L_OP_REM_FLOAT: /* 0xaa */ 5295/* File: armv5te/OP_REM_FLOAT.S */ 5296/* EABI doesn't define a float remainder function, but libm does */ 5297/* File: armv5te/binop.S */ 5298 /* 5299 * Generic 32-bit binary operation. Provide an "instr" line that 5300 * specifies an instruction that performs "result = r0 op r1". 5301 * This could be an ARM instruction or a function call. (If the result 5302 * comes back in a register other than r0, you can override "result".) 5303 * 5304 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5305 * vCC (r1). Useful for integer division and modulus. Note that we 5306 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5307 * handles it correctly. 5308 * 5309 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5310 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5311 * mul-float, div-float, rem-float 5312 */ 5313 /* binop vAA, vBB, vCC */ 5314 FETCH(r0, 1) @ r0<- CCBB 5315 mov r9, rINST, lsr #8 @ r9<- AA 5316 mov r3, r0, lsr #8 @ r3<- CC 5317 and r2, r0, #255 @ r2<- BB 5318 GET_VREG(r1, r3) @ r1<- vCC 5319 GET_VREG(r0, r2) @ r0<- vBB 5320 .if 0 5321 cmp r1, #0 @ is second operand zero? 5322 beq common_errDivideByZero 5323 .endif 5324 5325 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5326 @ optional op; may set condition codes 5327 bl fmodf @ r0<- op, r0-r3 changed 5328 GET_INST_OPCODE(ip) @ extract opcode from rINST 5329 SET_VREG(r0, r9) @ vAA<- r0 5330 GOTO_OPCODE(ip) @ jump to next instruction 5331 /* 11-14 instructions */ 5332 5333 5334 5335/* ------------------------------ */ 5336 .balign 64 5337.L_OP_ADD_DOUBLE: /* 0xab */ 5338/* File: armv5te/OP_ADD_DOUBLE.S */ 5339/* File: armv5te/binopWide.S */ 5340 /* 5341 * Generic 64-bit binary operation. Provide an "instr" line that 5342 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5343 * This could be an ARM instruction or a function call. (If the result 5344 * comes back in a register other than r0, you can override "result".) 5345 * 5346 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5347 * vCC (r1). Useful for integer division and modulus. 5348 * 5349 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5350 * xor-long, add-double, sub-double, mul-double, div-double, 5351 * rem-double 5352 * 5353 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5354 */ 5355 /* binop vAA, vBB, vCC */ 5356 FETCH(r0, 1) @ r0<- CCBB 5357 mov r9, rINST, lsr #8 @ r9<- AA 5358 and r2, r0, #255 @ r2<- BB 5359 mov r3, r0, lsr #8 @ r3<- CC 5360 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5361 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5362 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5363 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5364 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5365 .if 0 5366 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5367 beq common_errDivideByZero 5368 .endif 5369 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5370 5371 @ optional op; may set condition codes 5372 bl __aeabi_dadd @ result<- op, r0-r3 changed 5373 GET_INST_OPCODE(ip) @ extract opcode from rINST 5374 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5375 GOTO_OPCODE(ip) @ jump to next instruction 5376 /* 14-17 instructions */ 5377 5378 5379 5380/* ------------------------------ */ 5381 .balign 64 5382.L_OP_SUB_DOUBLE: /* 0xac */ 5383/* File: armv5te/OP_SUB_DOUBLE.S */ 5384/* File: armv5te/binopWide.S */ 5385 /* 5386 * Generic 64-bit binary operation. Provide an "instr" line that 5387 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5388 * This could be an ARM instruction or a function call. (If the result 5389 * comes back in a register other than r0, you can override "result".) 5390 * 5391 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5392 * vCC (r1). Useful for integer division and modulus. 5393 * 5394 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5395 * xor-long, add-double, sub-double, mul-double, div-double, 5396 * rem-double 5397 * 5398 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5399 */ 5400 /* binop vAA, vBB, vCC */ 5401 FETCH(r0, 1) @ r0<- CCBB 5402 mov r9, rINST, lsr #8 @ r9<- AA 5403 and r2, r0, #255 @ r2<- BB 5404 mov r3, r0, lsr #8 @ r3<- CC 5405 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5406 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5407 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5408 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5409 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5410 .if 0 5411 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5412 beq common_errDivideByZero 5413 .endif 5414 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5415 5416 @ optional op; may set condition codes 5417 bl __aeabi_dsub @ result<- op, r0-r3 changed 5418 GET_INST_OPCODE(ip) @ extract opcode from rINST 5419 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5420 GOTO_OPCODE(ip) @ jump to next instruction 5421 /* 14-17 instructions */ 5422 5423 5424 5425/* ------------------------------ */ 5426 .balign 64 5427.L_OP_MUL_DOUBLE: /* 0xad */ 5428/* File: armv5te/OP_MUL_DOUBLE.S */ 5429/* File: armv5te/binopWide.S */ 5430 /* 5431 * Generic 64-bit binary operation. Provide an "instr" line that 5432 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5433 * This could be an ARM instruction or a function call. (If the result 5434 * comes back in a register other than r0, you can override "result".) 5435 * 5436 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5437 * vCC (r1). Useful for integer division and modulus. 5438 * 5439 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5440 * xor-long, add-double, sub-double, mul-double, div-double, 5441 * rem-double 5442 * 5443 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5444 */ 5445 /* binop vAA, vBB, vCC */ 5446 FETCH(r0, 1) @ r0<- CCBB 5447 mov r9, rINST, lsr #8 @ r9<- AA 5448 and r2, r0, #255 @ r2<- BB 5449 mov r3, r0, lsr #8 @ r3<- CC 5450 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5451 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5452 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5453 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5454 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5455 .if 0 5456 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5457 beq common_errDivideByZero 5458 .endif 5459 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5460 5461 @ optional op; may set condition codes 5462 bl __aeabi_dmul @ result<- op, r0-r3 changed 5463 GET_INST_OPCODE(ip) @ extract opcode from rINST 5464 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5465 GOTO_OPCODE(ip) @ jump to next instruction 5466 /* 14-17 instructions */ 5467 5468 5469 5470/* ------------------------------ */ 5471 .balign 64 5472.L_OP_DIV_DOUBLE: /* 0xae */ 5473/* File: armv5te/OP_DIV_DOUBLE.S */ 5474/* File: armv5te/binopWide.S */ 5475 /* 5476 * Generic 64-bit binary operation. Provide an "instr" line that 5477 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5478 * This could be an ARM instruction or a function call. (If the result 5479 * comes back in a register other than r0, you can override "result".) 5480 * 5481 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5482 * vCC (r1). Useful for integer division and modulus. 5483 * 5484 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5485 * xor-long, add-double, sub-double, mul-double, div-double, 5486 * rem-double 5487 * 5488 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5489 */ 5490 /* binop vAA, vBB, vCC */ 5491 FETCH(r0, 1) @ r0<- CCBB 5492 mov r9, rINST, lsr #8 @ r9<- AA 5493 and r2, r0, #255 @ r2<- BB 5494 mov r3, r0, lsr #8 @ r3<- CC 5495 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5496 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5497 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5498 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5499 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5500 .if 0 5501 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5502 beq common_errDivideByZero 5503 .endif 5504 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5505 5506 @ optional op; may set condition codes 5507 bl __aeabi_ddiv @ result<- op, r0-r3 changed 5508 GET_INST_OPCODE(ip) @ extract opcode from rINST 5509 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5510 GOTO_OPCODE(ip) @ jump to next instruction 5511 /* 14-17 instructions */ 5512 5513 5514 5515/* ------------------------------ */ 5516 .balign 64 5517.L_OP_REM_DOUBLE: /* 0xaf */ 5518/* File: armv5te/OP_REM_DOUBLE.S */ 5519/* EABI doesn't define a double remainder function, but libm does */ 5520/* File: armv5te/binopWide.S */ 5521 /* 5522 * Generic 64-bit binary operation. Provide an "instr" line that 5523 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5524 * This could be an ARM instruction or a function call. (If the result 5525 * comes back in a register other than r0, you can override "result".) 5526 * 5527 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5528 * vCC (r1). Useful for integer division and modulus. 5529 * 5530 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5531 * xor-long, add-double, sub-double, mul-double, div-double, 5532 * rem-double 5533 * 5534 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5535 */ 5536 /* binop vAA, vBB, vCC */ 5537 FETCH(r0, 1) @ r0<- CCBB 5538 mov r9, rINST, lsr #8 @ r9<- AA 5539 and r2, r0, #255 @ r2<- BB 5540 mov r3, r0, lsr #8 @ r3<- CC 5541 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5542 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5543 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5544 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5545 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5546 .if 0 5547 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5548 beq common_errDivideByZero 5549 .endif 5550 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5551 5552 @ optional op; may set condition codes 5553 bl fmod @ result<- op, r0-r3 changed 5554 GET_INST_OPCODE(ip) @ extract opcode from rINST 5555 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5556 GOTO_OPCODE(ip) @ jump to next instruction 5557 /* 14-17 instructions */ 5558 5559 5560 5561/* ------------------------------ */ 5562 .balign 64 5563.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5564/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5565/* File: armv5te/binop2addr.S */ 5566 /* 5567 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5568 * that specifies an instruction that performs "result = r0 op r1". 5569 * This could be an ARM instruction or a function call. (If the result 5570 * comes back in a register other than r0, you can override "result".) 5571 * 5572 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5573 * vCC (r1). Useful for integer division and modulus. 5574 * 5575 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5576 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5577 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5578 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5579 */ 5580 /* binop/2addr vA, vB */ 5581 mov r9, rINST, lsr #8 @ r9<- A+ 5582 mov r3, rINST, lsr #12 @ r3<- B 5583 and r9, r9, #15 5584 GET_VREG(r1, r3) @ r1<- vB 5585 GET_VREG(r0, r9) @ r0<- vA 5586 .if 0 5587 cmp r1, #0 @ is second operand zero? 5588 beq common_errDivideByZero 5589 .endif 5590 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5591 5592 @ optional op; may set condition codes 5593 add r0, r0, r1 @ r0<- op, r0-r3 changed 5594 GET_INST_OPCODE(ip) @ extract opcode from rINST 5595 SET_VREG(r0, r9) @ vAA<- r0 5596 GOTO_OPCODE(ip) @ jump to next instruction 5597 /* 10-13 instructions */ 5598 5599 5600 5601/* ------------------------------ */ 5602 .balign 64 5603.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5604/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5605/* File: armv5te/binop2addr.S */ 5606 /* 5607 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5608 * that specifies an instruction that performs "result = r0 op r1". 5609 * This could be an ARM instruction or a function call. (If the result 5610 * comes back in a register other than r0, you can override "result".) 5611 * 5612 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5613 * vCC (r1). Useful for integer division and modulus. 5614 * 5615 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5616 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5617 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5618 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5619 */ 5620 /* binop/2addr vA, vB */ 5621 mov r9, rINST, lsr #8 @ r9<- A+ 5622 mov r3, rINST, lsr #12 @ r3<- B 5623 and r9, r9, #15 5624 GET_VREG(r1, r3) @ r1<- vB 5625 GET_VREG(r0, r9) @ r0<- vA 5626 .if 0 5627 cmp r1, #0 @ is second operand zero? 5628 beq common_errDivideByZero 5629 .endif 5630 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5631 5632 @ optional op; may set condition codes 5633 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5634 GET_INST_OPCODE(ip) @ extract opcode from rINST 5635 SET_VREG(r0, r9) @ vAA<- r0 5636 GOTO_OPCODE(ip) @ jump to next instruction 5637 /* 10-13 instructions */ 5638 5639 5640 5641/* ------------------------------ */ 5642 .balign 64 5643.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5644/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5645/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5646/* File: armv5te/binop2addr.S */ 5647 /* 5648 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5649 * that specifies an instruction that performs "result = r0 op r1". 5650 * This could be an ARM instruction or a function call. (If the result 5651 * comes back in a register other than r0, you can override "result".) 5652 * 5653 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5654 * vCC (r1). Useful for integer division and modulus. 5655 * 5656 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5657 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5658 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5659 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5660 */ 5661 /* binop/2addr vA, vB */ 5662 mov r9, rINST, lsr #8 @ r9<- A+ 5663 mov r3, rINST, lsr #12 @ r3<- B 5664 and r9, r9, #15 5665 GET_VREG(r1, r3) @ r1<- vB 5666 GET_VREG(r0, r9) @ r0<- vA 5667 .if 0 5668 cmp r1, #0 @ is second operand zero? 5669 beq common_errDivideByZero 5670 .endif 5671 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5672 5673 @ optional op; may set condition codes 5674 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5675 GET_INST_OPCODE(ip) @ extract opcode from rINST 5676 SET_VREG(r0, r9) @ vAA<- r0 5677 GOTO_OPCODE(ip) @ jump to next instruction 5678 /* 10-13 instructions */ 5679 5680 5681 5682/* ------------------------------ */ 5683 .balign 64 5684.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5685/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5686/* File: armv5te/binop2addr.S */ 5687 /* 5688 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5689 * that specifies an instruction that performs "result = r0 op r1". 5690 * This could be an ARM instruction or a function call. (If the result 5691 * comes back in a register other than r0, you can override "result".) 5692 * 5693 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5694 * vCC (r1). Useful for integer division and modulus. 5695 * 5696 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5697 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5698 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5699 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5700 */ 5701 /* binop/2addr vA, vB */ 5702 mov r9, rINST, lsr #8 @ r9<- A+ 5703 mov r3, rINST, lsr #12 @ r3<- B 5704 and r9, r9, #15 5705 GET_VREG(r1, r3) @ r1<- vB 5706 GET_VREG(r0, r9) @ r0<- vA 5707 .if 1 5708 cmp r1, #0 @ is second operand zero? 5709 beq common_errDivideByZero 5710 .endif 5711 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5712 5713 @ optional op; may set condition codes 5714 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5715 GET_INST_OPCODE(ip) @ extract opcode from rINST 5716 SET_VREG(r0, r9) @ vAA<- r0 5717 GOTO_OPCODE(ip) @ jump to next instruction 5718 /* 10-13 instructions */ 5719 5720 5721 5722/* ------------------------------ */ 5723 .balign 64 5724.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5725/* File: armv5te/OP_REM_INT_2ADDR.S */ 5726/* idivmod returns quotient in r0 and remainder in r1 */ 5727/* File: armv5te/binop2addr.S */ 5728 /* 5729 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5730 * that specifies an instruction that performs "result = r0 op r1". 5731 * This could be an ARM instruction or a function call. (If the result 5732 * comes back in a register other than r0, you can override "result".) 5733 * 5734 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5735 * vCC (r1). Useful for integer division and modulus. 5736 * 5737 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5738 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5739 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5740 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5741 */ 5742 /* binop/2addr vA, vB */ 5743 mov r9, rINST, lsr #8 @ r9<- A+ 5744 mov r3, rINST, lsr #12 @ r3<- B 5745 and r9, r9, #15 5746 GET_VREG(r1, r3) @ r1<- vB 5747 GET_VREG(r0, r9) @ r0<- vA 5748 .if 1 5749 cmp r1, #0 @ is second operand zero? 5750 beq common_errDivideByZero 5751 .endif 5752 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5753 5754 @ optional op; may set condition codes 5755 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5756 GET_INST_OPCODE(ip) @ extract opcode from rINST 5757 SET_VREG(r1, r9) @ vAA<- r1 5758 GOTO_OPCODE(ip) @ jump to next instruction 5759 /* 10-13 instructions */ 5760 5761 5762 5763/* ------------------------------ */ 5764 .balign 64 5765.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5766/* File: armv5te/OP_AND_INT_2ADDR.S */ 5767/* File: armv5te/binop2addr.S */ 5768 /* 5769 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5770 * that specifies an instruction that performs "result = r0 op r1". 5771 * This could be an ARM instruction or a function call. (If the result 5772 * comes back in a register other than r0, you can override "result".) 5773 * 5774 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5775 * vCC (r1). Useful for integer division and modulus. 5776 * 5777 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5778 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5779 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5780 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5781 */ 5782 /* binop/2addr vA, vB */ 5783 mov r9, rINST, lsr #8 @ r9<- A+ 5784 mov r3, rINST, lsr #12 @ r3<- B 5785 and r9, r9, #15 5786 GET_VREG(r1, r3) @ r1<- vB 5787 GET_VREG(r0, r9) @ r0<- vA 5788 .if 0 5789 cmp r1, #0 @ is second operand zero? 5790 beq common_errDivideByZero 5791 .endif 5792 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5793 5794 @ optional op; may set condition codes 5795 and r0, r0, r1 @ r0<- op, r0-r3 changed 5796 GET_INST_OPCODE(ip) @ extract opcode from rINST 5797 SET_VREG(r0, r9) @ vAA<- r0 5798 GOTO_OPCODE(ip) @ jump to next instruction 5799 /* 10-13 instructions */ 5800 5801 5802 5803/* ------------------------------ */ 5804 .balign 64 5805.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5806/* File: armv5te/OP_OR_INT_2ADDR.S */ 5807/* File: armv5te/binop2addr.S */ 5808 /* 5809 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5810 * that specifies an instruction that performs "result = r0 op r1". 5811 * This could be an ARM instruction or a function call. (If the result 5812 * comes back in a register other than r0, you can override "result".) 5813 * 5814 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5815 * vCC (r1). Useful for integer division and modulus. 5816 * 5817 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5818 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5819 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5820 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5821 */ 5822 /* binop/2addr vA, vB */ 5823 mov r9, rINST, lsr #8 @ r9<- A+ 5824 mov r3, rINST, lsr #12 @ r3<- B 5825 and r9, r9, #15 5826 GET_VREG(r1, r3) @ r1<- vB 5827 GET_VREG(r0, r9) @ r0<- vA 5828 .if 0 5829 cmp r1, #0 @ is second operand zero? 5830 beq common_errDivideByZero 5831 .endif 5832 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5833 5834 @ optional op; may set condition codes 5835 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5836 GET_INST_OPCODE(ip) @ extract opcode from rINST 5837 SET_VREG(r0, r9) @ vAA<- r0 5838 GOTO_OPCODE(ip) @ jump to next instruction 5839 /* 10-13 instructions */ 5840 5841 5842 5843/* ------------------------------ */ 5844 .balign 64 5845.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5846/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5847/* File: armv5te/binop2addr.S */ 5848 /* 5849 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5850 * that specifies an instruction that performs "result = r0 op r1". 5851 * This could be an ARM instruction or a function call. (If the result 5852 * comes back in a register other than r0, you can override "result".) 5853 * 5854 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5855 * vCC (r1). Useful for integer division and modulus. 5856 * 5857 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5858 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5859 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5860 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5861 */ 5862 /* binop/2addr vA, vB */ 5863 mov r9, rINST, lsr #8 @ r9<- A+ 5864 mov r3, rINST, lsr #12 @ r3<- B 5865 and r9, r9, #15 5866 GET_VREG(r1, r3) @ r1<- vB 5867 GET_VREG(r0, r9) @ r0<- vA 5868 .if 0 5869 cmp r1, #0 @ is second operand zero? 5870 beq common_errDivideByZero 5871 .endif 5872 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5873 5874 @ optional op; may set condition codes 5875 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5876 GET_INST_OPCODE(ip) @ extract opcode from rINST 5877 SET_VREG(r0, r9) @ vAA<- r0 5878 GOTO_OPCODE(ip) @ jump to next instruction 5879 /* 10-13 instructions */ 5880 5881 5882 5883/* ------------------------------ */ 5884 .balign 64 5885.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5886/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5887/* File: armv5te/binop2addr.S */ 5888 /* 5889 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5890 * that specifies an instruction that performs "result = r0 op r1". 5891 * This could be an ARM instruction or a function call. (If the result 5892 * comes back in a register other than r0, you can override "result".) 5893 * 5894 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5895 * vCC (r1). Useful for integer division and modulus. 5896 * 5897 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5898 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5899 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5900 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5901 */ 5902 /* binop/2addr vA, vB */ 5903 mov r9, rINST, lsr #8 @ r9<- A+ 5904 mov r3, rINST, lsr #12 @ r3<- B 5905 and r9, r9, #15 5906 GET_VREG(r1, r3) @ r1<- vB 5907 GET_VREG(r0, r9) @ r0<- vA 5908 .if 0 5909 cmp r1, #0 @ is second operand zero? 5910 beq common_errDivideByZero 5911 .endif 5912 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5913 5914 and r1, r1, #31 @ optional op; may set condition codes 5915 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5916 GET_INST_OPCODE(ip) @ extract opcode from rINST 5917 SET_VREG(r0, r9) @ vAA<- r0 5918 GOTO_OPCODE(ip) @ jump to next instruction 5919 /* 10-13 instructions */ 5920 5921 5922 5923/* ------------------------------ */ 5924 .balign 64 5925.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5926/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5927/* File: armv5te/binop2addr.S */ 5928 /* 5929 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5930 * that specifies an instruction that performs "result = r0 op r1". 5931 * This could be an ARM instruction or a function call. (If the result 5932 * comes back in a register other than r0, you can override "result".) 5933 * 5934 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5935 * vCC (r1). Useful for integer division and modulus. 5936 * 5937 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5938 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5939 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5940 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5941 */ 5942 /* binop/2addr vA, vB */ 5943 mov r9, rINST, lsr #8 @ r9<- A+ 5944 mov r3, rINST, lsr #12 @ r3<- B 5945 and r9, r9, #15 5946 GET_VREG(r1, r3) @ r1<- vB 5947 GET_VREG(r0, r9) @ r0<- vA 5948 .if 0 5949 cmp r1, #0 @ is second operand zero? 5950 beq common_errDivideByZero 5951 .endif 5952 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5953 5954 and r1, r1, #31 @ optional op; may set condition codes 5955 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5956 GET_INST_OPCODE(ip) @ extract opcode from rINST 5957 SET_VREG(r0, r9) @ vAA<- r0 5958 GOTO_OPCODE(ip) @ jump to next instruction 5959 /* 10-13 instructions */ 5960 5961 5962 5963/* ------------------------------ */ 5964 .balign 64 5965.L_OP_USHR_INT_2ADDR: /* 0xba */ 5966/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5967/* File: armv5te/binop2addr.S */ 5968 /* 5969 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5970 * that specifies an instruction that performs "result = r0 op r1". 5971 * This could be an ARM instruction or a function call. (If the result 5972 * comes back in a register other than r0, you can override "result".) 5973 * 5974 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5975 * vCC (r1). Useful for integer division and modulus. 5976 * 5977 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5978 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5979 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5980 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5981 */ 5982 /* binop/2addr vA, vB */ 5983 mov r9, rINST, lsr #8 @ r9<- A+ 5984 mov r3, rINST, lsr #12 @ r3<- B 5985 and r9, r9, #15 5986 GET_VREG(r1, r3) @ r1<- vB 5987 GET_VREG(r0, r9) @ r0<- vA 5988 .if 0 5989 cmp r1, #0 @ is second operand zero? 5990 beq common_errDivideByZero 5991 .endif 5992 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5993 5994 and r1, r1, #31 @ optional op; may set condition codes 5995 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5996 GET_INST_OPCODE(ip) @ extract opcode from rINST 5997 SET_VREG(r0, r9) @ vAA<- r0 5998 GOTO_OPCODE(ip) @ jump to next instruction 5999 /* 10-13 instructions */ 6000 6001 6002 6003/* ------------------------------ */ 6004 .balign 64 6005.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 6006/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 6007/* File: armv5te/binopWide2addr.S */ 6008 /* 6009 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6010 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6011 * This could be an ARM instruction or a function call. (If the result 6012 * comes back in a register other than r0, you can override "result".) 6013 * 6014 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6015 * vCC (r1). Useful for integer division and modulus. 6016 * 6017 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6018 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6019 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6020 * rem-double/2addr 6021 */ 6022 /* binop/2addr vA, vB */ 6023 mov r9, rINST, lsr #8 @ r9<- A+ 6024 mov r1, rINST, lsr #12 @ r1<- B 6025 and r9, r9, #15 6026 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6027 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6028 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6029 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6030 .if 0 6031 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6032 beq common_errDivideByZero 6033 .endif 6034 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6035 6036 adds r0, r0, r2 @ optional op; may set condition codes 6037 adc r1, r1, r3 @ result<- op, r0-r3 changed 6038 GET_INST_OPCODE(ip) @ extract opcode from rINST 6039 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6040 GOTO_OPCODE(ip) @ jump to next instruction 6041 /* 12-15 instructions */ 6042 6043 6044 6045/* ------------------------------ */ 6046 .balign 64 6047.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 6048/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 6049/* File: armv5te/binopWide2addr.S */ 6050 /* 6051 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6052 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6053 * This could be an ARM instruction or a function call. (If the result 6054 * comes back in a register other than r0, you can override "result".) 6055 * 6056 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6057 * vCC (r1). Useful for integer division and modulus. 6058 * 6059 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6060 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6061 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6062 * rem-double/2addr 6063 */ 6064 /* binop/2addr vA, vB */ 6065 mov r9, rINST, lsr #8 @ r9<- A+ 6066 mov r1, rINST, lsr #12 @ r1<- B 6067 and r9, r9, #15 6068 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6069 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6070 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6071 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6072 .if 0 6073 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6074 beq common_errDivideByZero 6075 .endif 6076 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6077 6078 subs r0, r0, r2 @ optional op; may set condition codes 6079 sbc r1, r1, r3 @ result<- op, r0-r3 changed 6080 GET_INST_OPCODE(ip) @ extract opcode from rINST 6081 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6082 GOTO_OPCODE(ip) @ jump to next instruction 6083 /* 12-15 instructions */ 6084 6085 6086 6087/* ------------------------------ */ 6088 .balign 64 6089.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 6090/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 6091 /* 6092 * Signed 64-bit integer multiply, "/2addr" version. 6093 * 6094 * See OP_MUL_LONG for an explanation. 6095 * 6096 * We get a little tight on registers, so to avoid looking up &fp[A] 6097 * again we stuff it into rINST. 6098 */ 6099 /* mul-long/2addr vA, vB */ 6100 mov r9, rINST, lsr #8 @ r9<- A+ 6101 mov r1, rINST, lsr #12 @ r1<- B 6102 and r9, r9, #15 6103 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6104 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 6105 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6106 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 6107 mul ip, r2, r1 @ ip<- ZxW 6108 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 6109 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 6110 mov r0, rINST @ r0<- &fp[A] (free up rINST) 6111 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6112 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 6113 GET_INST_OPCODE(ip) @ extract opcode from rINST 6114 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 6115 GOTO_OPCODE(ip) @ jump to next instruction 6116 6117 6118/* ------------------------------ */ 6119 .balign 64 6120.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 6121/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 6122/* File: armv5te/binopWide2addr.S */ 6123 /* 6124 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6125 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6126 * This could be an ARM instruction or a function call. (If the result 6127 * comes back in a register other than r0, you can override "result".) 6128 * 6129 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6130 * vCC (r1). Useful for integer division and modulus. 6131 * 6132 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6133 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6134 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6135 * rem-double/2addr 6136 */ 6137 /* binop/2addr vA, vB */ 6138 mov r9, rINST, lsr #8 @ r9<- A+ 6139 mov r1, rINST, lsr #12 @ r1<- B 6140 and r9, r9, #15 6141 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6142 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6143 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6144 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6145 .if 1 6146 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6147 beq common_errDivideByZero 6148 .endif 6149 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6150 6151 @ optional op; may set condition codes 6152 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6153 GET_INST_OPCODE(ip) @ extract opcode from rINST 6154 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6155 GOTO_OPCODE(ip) @ jump to next instruction 6156 /* 12-15 instructions */ 6157 6158 6159 6160/* ------------------------------ */ 6161 .balign 64 6162.L_OP_REM_LONG_2ADDR: /* 0xbf */ 6163/* File: armv5te/OP_REM_LONG_2ADDR.S */ 6164/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 6165/* File: armv5te/binopWide2addr.S */ 6166 /* 6167 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6168 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6169 * This could be an ARM instruction or a function call. (If the result 6170 * comes back in a register other than r0, you can override "result".) 6171 * 6172 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6173 * vCC (r1). Useful for integer division and modulus. 6174 * 6175 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6176 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6177 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6178 * rem-double/2addr 6179 */ 6180 /* binop/2addr vA, vB */ 6181 mov r9, rINST, lsr #8 @ r9<- A+ 6182 mov r1, rINST, lsr #12 @ r1<- B 6183 and r9, r9, #15 6184 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6185 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6186 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6187 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6188 .if 1 6189 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6190 beq common_errDivideByZero 6191 .endif 6192 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6193 6194 @ optional op; may set condition codes 6195 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6196 GET_INST_OPCODE(ip) @ extract opcode from rINST 6197 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 6198 GOTO_OPCODE(ip) @ jump to next instruction 6199 /* 12-15 instructions */ 6200 6201 6202 6203/* ------------------------------ */ 6204 .balign 64 6205.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 6206/* File: armv5te/OP_AND_LONG_2ADDR.S */ 6207/* File: armv5te/binopWide2addr.S */ 6208 /* 6209 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6210 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6211 * This could be an ARM instruction or a function call. (If the result 6212 * comes back in a register other than r0, you can override "result".) 6213 * 6214 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6215 * vCC (r1). Useful for integer division and modulus. 6216 * 6217 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6218 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6219 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6220 * rem-double/2addr 6221 */ 6222 /* binop/2addr vA, vB */ 6223 mov r9, rINST, lsr #8 @ r9<- A+ 6224 mov r1, rINST, lsr #12 @ r1<- B 6225 and r9, r9, #15 6226 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6227 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6228 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6229 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6230 .if 0 6231 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6232 beq common_errDivideByZero 6233 .endif 6234 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6235 6236 and r0, r0, r2 @ optional op; may set condition codes 6237 and r1, r1, r3 @ result<- op, r0-r3 changed 6238 GET_INST_OPCODE(ip) @ extract opcode from rINST 6239 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6240 GOTO_OPCODE(ip) @ jump to next instruction 6241 /* 12-15 instructions */ 6242 6243 6244 6245/* ------------------------------ */ 6246 .balign 64 6247.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6248/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6249/* File: armv5te/binopWide2addr.S */ 6250 /* 6251 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6252 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6253 * This could be an ARM instruction or a function call. (If the result 6254 * comes back in a register other than r0, you can override "result".) 6255 * 6256 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6257 * vCC (r1). Useful for integer division and modulus. 6258 * 6259 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6260 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6261 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6262 * rem-double/2addr 6263 */ 6264 /* binop/2addr vA, vB */ 6265 mov r9, rINST, lsr #8 @ r9<- A+ 6266 mov r1, rINST, lsr #12 @ r1<- B 6267 and r9, r9, #15 6268 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6269 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6270 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6271 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6272 .if 0 6273 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6274 beq common_errDivideByZero 6275 .endif 6276 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6277 6278 orr r0, r0, r2 @ optional op; may set condition codes 6279 orr r1, r1, r3 @ result<- op, r0-r3 changed 6280 GET_INST_OPCODE(ip) @ extract opcode from rINST 6281 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6282 GOTO_OPCODE(ip) @ jump to next instruction 6283 /* 12-15 instructions */ 6284 6285 6286 6287/* ------------------------------ */ 6288 .balign 64 6289.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6290/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6291/* File: armv5te/binopWide2addr.S */ 6292 /* 6293 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6294 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6295 * This could be an ARM instruction or a function call. (If the result 6296 * comes back in a register other than r0, you can override "result".) 6297 * 6298 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6299 * vCC (r1). Useful for integer division and modulus. 6300 * 6301 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6302 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6303 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6304 * rem-double/2addr 6305 */ 6306 /* binop/2addr vA, vB */ 6307 mov r9, rINST, lsr #8 @ r9<- A+ 6308 mov r1, rINST, lsr #12 @ r1<- B 6309 and r9, r9, #15 6310 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6311 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6312 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6313 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6314 .if 0 6315 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6316 beq common_errDivideByZero 6317 .endif 6318 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6319 6320 eor r0, r0, r2 @ optional op; may set condition codes 6321 eor r1, r1, r3 @ result<- op, r0-r3 changed 6322 GET_INST_OPCODE(ip) @ extract opcode from rINST 6323 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6324 GOTO_OPCODE(ip) @ jump to next instruction 6325 /* 12-15 instructions */ 6326 6327 6328 6329/* ------------------------------ */ 6330 .balign 64 6331.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6332/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6333 /* 6334 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6335 * 32-bit shift distance. 6336 */ 6337 /* shl-long/2addr vA, vB */ 6338 mov r9, rINST, lsr #8 @ r9<- A+ 6339 mov r3, rINST, lsr #12 @ r3<- B 6340 and r9, r9, #15 6341 GET_VREG(r2, r3) @ r2<- vB 6342 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6343 and r2, r2, #63 @ r2<- r2 & 0x3f 6344 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6345 6346 mov r1, r1, asl r2 @ r1<- r1 << r2 6347 rsb r3, r2, #32 @ r3<- 32 - r2 6348 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6349 subs ip, r2, #32 @ ip<- r2 - 32 6350 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6351 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6352 mov r0, r0, asl r2 @ r0<- r0 << r2 6353 b .LOP_SHL_LONG_2ADDR_finish 6354 6355/* ------------------------------ */ 6356 .balign 64 6357.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6358/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6359 /* 6360 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6361 * 32-bit shift distance. 6362 */ 6363 /* shr-long/2addr vA, vB */ 6364 mov r9, rINST, lsr #8 @ r9<- A+ 6365 mov r3, rINST, lsr #12 @ r3<- B 6366 and r9, r9, #15 6367 GET_VREG(r2, r3) @ r2<- vB 6368 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6369 and r2, r2, #63 @ r2<- r2 & 0x3f 6370 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6371 6372 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6373 rsb r3, r2, #32 @ r3<- 32 - r2 6374 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6375 subs ip, r2, #32 @ ip<- r2 - 32 6376 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6377 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6378 mov r1, r1, asr r2 @ r1<- r1 >> r2 6379 b .LOP_SHR_LONG_2ADDR_finish 6380 6381/* ------------------------------ */ 6382 .balign 64 6383.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6384/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6385 /* 6386 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6387 * 32-bit shift distance. 6388 */ 6389 /* ushr-long/2addr vA, vB */ 6390 mov r9, rINST, lsr #8 @ r9<- A+ 6391 mov r3, rINST, lsr #12 @ r3<- B 6392 and r9, r9, #15 6393 GET_VREG(r2, r3) @ r2<- vB 6394 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6395 and r2, r2, #63 @ r2<- r2 & 0x3f 6396 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6397 6398 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6399 rsb r3, r2, #32 @ r3<- 32 - r2 6400 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6401 subs ip, r2, #32 @ ip<- r2 - 32 6402 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6403 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6404 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6405 b .LOP_USHR_LONG_2ADDR_finish 6406 6407/* ------------------------------ */ 6408 .balign 64 6409.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6410/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */ 6411/* File: armv5te/binop2addr.S */ 6412 /* 6413 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6414 * that specifies an instruction that performs "result = r0 op r1". 6415 * This could be an ARM instruction or a function call. (If the result 6416 * comes back in a register other than r0, you can override "result".) 6417 * 6418 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6419 * vCC (r1). Useful for integer division and modulus. 6420 * 6421 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6422 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6423 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6424 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6425 */ 6426 /* binop/2addr vA, vB */ 6427 mov r9, rINST, lsr #8 @ r9<- A+ 6428 mov r3, rINST, lsr #12 @ r3<- B 6429 and r9, r9, #15 6430 GET_VREG(r1, r3) @ r1<- vB 6431 GET_VREG(r0, r9) @ r0<- vA 6432 .if 0 6433 cmp r1, #0 @ is second operand zero? 6434 beq common_errDivideByZero 6435 .endif 6436 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6437 6438 @ optional op; may set condition codes 6439 bl __aeabi_fadd @ r0<- op, r0-r3 changed 6440 GET_INST_OPCODE(ip) @ extract opcode from rINST 6441 SET_VREG(r0, r9) @ vAA<- r0 6442 GOTO_OPCODE(ip) @ jump to next instruction 6443 /* 10-13 instructions */ 6444 6445 6446 6447/* ------------------------------ */ 6448 .balign 64 6449.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6450/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */ 6451/* File: armv5te/binop2addr.S */ 6452 /* 6453 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6454 * that specifies an instruction that performs "result = r0 op r1". 6455 * This could be an ARM instruction or a function call. (If the result 6456 * comes back in a register other than r0, you can override "result".) 6457 * 6458 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6459 * vCC (r1). Useful for integer division and modulus. 6460 * 6461 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6462 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6463 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6464 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6465 */ 6466 /* binop/2addr vA, vB */ 6467 mov r9, rINST, lsr #8 @ r9<- A+ 6468 mov r3, rINST, lsr #12 @ r3<- B 6469 and r9, r9, #15 6470 GET_VREG(r1, r3) @ r1<- vB 6471 GET_VREG(r0, r9) @ r0<- vA 6472 .if 0 6473 cmp r1, #0 @ is second operand zero? 6474 beq common_errDivideByZero 6475 .endif 6476 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6477 6478 @ optional op; may set condition codes 6479 bl __aeabi_fsub @ r0<- op, r0-r3 changed 6480 GET_INST_OPCODE(ip) @ extract opcode from rINST 6481 SET_VREG(r0, r9) @ vAA<- r0 6482 GOTO_OPCODE(ip) @ jump to next instruction 6483 /* 10-13 instructions */ 6484 6485 6486 6487/* ------------------------------ */ 6488 .balign 64 6489.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6490/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */ 6491/* File: armv5te/binop2addr.S */ 6492 /* 6493 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6494 * that specifies an instruction that performs "result = r0 op r1". 6495 * This could be an ARM instruction or a function call. (If the result 6496 * comes back in a register other than r0, you can override "result".) 6497 * 6498 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6499 * vCC (r1). Useful for integer division and modulus. 6500 * 6501 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6502 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6503 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6504 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6505 */ 6506 /* binop/2addr vA, vB */ 6507 mov r9, rINST, lsr #8 @ r9<- A+ 6508 mov r3, rINST, lsr #12 @ r3<- B 6509 and r9, r9, #15 6510 GET_VREG(r1, r3) @ r1<- vB 6511 GET_VREG(r0, r9) @ r0<- vA 6512 .if 0 6513 cmp r1, #0 @ is second operand zero? 6514 beq common_errDivideByZero 6515 .endif 6516 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6517 6518 @ optional op; may set condition codes 6519 bl __aeabi_fmul @ r0<- op, r0-r3 changed 6520 GET_INST_OPCODE(ip) @ extract opcode from rINST 6521 SET_VREG(r0, r9) @ vAA<- r0 6522 GOTO_OPCODE(ip) @ jump to next instruction 6523 /* 10-13 instructions */ 6524 6525 6526 6527/* ------------------------------ */ 6528 .balign 64 6529.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6530/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */ 6531/* File: armv5te/binop2addr.S */ 6532 /* 6533 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6534 * that specifies an instruction that performs "result = r0 op r1". 6535 * This could be an ARM instruction or a function call. (If the result 6536 * comes back in a register other than r0, you can override "result".) 6537 * 6538 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6539 * vCC (r1). Useful for integer division and modulus. 6540 * 6541 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6542 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6543 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6544 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6545 */ 6546 /* binop/2addr vA, vB */ 6547 mov r9, rINST, lsr #8 @ r9<- A+ 6548 mov r3, rINST, lsr #12 @ r3<- B 6549 and r9, r9, #15 6550 GET_VREG(r1, r3) @ r1<- vB 6551 GET_VREG(r0, r9) @ r0<- vA 6552 .if 0 6553 cmp r1, #0 @ is second operand zero? 6554 beq common_errDivideByZero 6555 .endif 6556 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6557 6558 @ optional op; may set condition codes 6559 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 6560 GET_INST_OPCODE(ip) @ extract opcode from rINST 6561 SET_VREG(r0, r9) @ vAA<- r0 6562 GOTO_OPCODE(ip) @ jump to next instruction 6563 /* 10-13 instructions */ 6564 6565 6566 6567/* ------------------------------ */ 6568 .balign 64 6569.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6570/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6571/* EABI doesn't define a float remainder function, but libm does */ 6572/* File: armv5te/binop2addr.S */ 6573 /* 6574 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6575 * that specifies an instruction that performs "result = r0 op r1". 6576 * This could be an ARM instruction or a function call. (If the result 6577 * comes back in a register other than r0, you can override "result".) 6578 * 6579 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6580 * vCC (r1). Useful for integer division and modulus. 6581 * 6582 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6583 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6584 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6585 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6586 */ 6587 /* binop/2addr vA, vB */ 6588 mov r9, rINST, lsr #8 @ r9<- A+ 6589 mov r3, rINST, lsr #12 @ r3<- B 6590 and r9, r9, #15 6591 GET_VREG(r1, r3) @ r1<- vB 6592 GET_VREG(r0, r9) @ r0<- vA 6593 .if 0 6594 cmp r1, #0 @ is second operand zero? 6595 beq common_errDivideByZero 6596 .endif 6597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6598 6599 @ optional op; may set condition codes 6600 bl fmodf @ r0<- op, r0-r3 changed 6601 GET_INST_OPCODE(ip) @ extract opcode from rINST 6602 SET_VREG(r0, r9) @ vAA<- r0 6603 GOTO_OPCODE(ip) @ jump to next instruction 6604 /* 10-13 instructions */ 6605 6606 6607 6608/* ------------------------------ */ 6609 .balign 64 6610.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6611/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */ 6612/* File: armv5te/binopWide2addr.S */ 6613 /* 6614 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6615 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6616 * This could be an ARM instruction or a function call. (If the result 6617 * comes back in a register other than r0, you can override "result".) 6618 * 6619 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6620 * vCC (r1). Useful for integer division and modulus. 6621 * 6622 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6623 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6624 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6625 * rem-double/2addr 6626 */ 6627 /* binop/2addr vA, vB */ 6628 mov r9, rINST, lsr #8 @ r9<- A+ 6629 mov r1, rINST, lsr #12 @ r1<- B 6630 and r9, r9, #15 6631 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6632 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6633 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6634 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6635 .if 0 6636 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6637 beq common_errDivideByZero 6638 .endif 6639 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6640 6641 @ optional op; may set condition codes 6642 bl __aeabi_dadd @ result<- op, r0-r3 changed 6643 GET_INST_OPCODE(ip) @ extract opcode from rINST 6644 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6645 GOTO_OPCODE(ip) @ jump to next instruction 6646 /* 12-15 instructions */ 6647 6648 6649 6650/* ------------------------------ */ 6651 .balign 64 6652.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6653/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */ 6654/* File: armv5te/binopWide2addr.S */ 6655 /* 6656 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6657 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6658 * This could be an ARM instruction or a function call. (If the result 6659 * comes back in a register other than r0, you can override "result".) 6660 * 6661 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6662 * vCC (r1). Useful for integer division and modulus. 6663 * 6664 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6665 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6666 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6667 * rem-double/2addr 6668 */ 6669 /* binop/2addr vA, vB */ 6670 mov r9, rINST, lsr #8 @ r9<- A+ 6671 mov r1, rINST, lsr #12 @ r1<- B 6672 and r9, r9, #15 6673 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6674 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6675 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6676 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6677 .if 0 6678 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6679 beq common_errDivideByZero 6680 .endif 6681 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6682 6683 @ optional op; may set condition codes 6684 bl __aeabi_dsub @ result<- op, r0-r3 changed 6685 GET_INST_OPCODE(ip) @ extract opcode from rINST 6686 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6687 GOTO_OPCODE(ip) @ jump to next instruction 6688 /* 12-15 instructions */ 6689 6690 6691 6692/* ------------------------------ */ 6693 .balign 64 6694.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6695/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */ 6696/* File: armv5te/binopWide2addr.S */ 6697 /* 6698 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6699 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6700 * This could be an ARM instruction or a function call. (If the result 6701 * comes back in a register other than r0, you can override "result".) 6702 * 6703 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6704 * vCC (r1). Useful for integer division and modulus. 6705 * 6706 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6707 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6708 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6709 * rem-double/2addr 6710 */ 6711 /* binop/2addr vA, vB */ 6712 mov r9, rINST, lsr #8 @ r9<- A+ 6713 mov r1, rINST, lsr #12 @ r1<- B 6714 and r9, r9, #15 6715 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6716 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6717 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6718 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6719 .if 0 6720 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6721 beq common_errDivideByZero 6722 .endif 6723 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6724 6725 @ optional op; may set condition codes 6726 bl __aeabi_dmul @ result<- op, r0-r3 changed 6727 GET_INST_OPCODE(ip) @ extract opcode from rINST 6728 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6729 GOTO_OPCODE(ip) @ jump to next instruction 6730 /* 12-15 instructions */ 6731 6732 6733 6734/* ------------------------------ */ 6735 .balign 64 6736.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6737/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */ 6738/* File: armv5te/binopWide2addr.S */ 6739 /* 6740 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6741 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6742 * This could be an ARM instruction or a function call. (If the result 6743 * comes back in a register other than r0, you can override "result".) 6744 * 6745 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6746 * vCC (r1). Useful for integer division and modulus. 6747 * 6748 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6749 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6750 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6751 * rem-double/2addr 6752 */ 6753 /* binop/2addr vA, vB */ 6754 mov r9, rINST, lsr #8 @ r9<- A+ 6755 mov r1, rINST, lsr #12 @ r1<- B 6756 and r9, r9, #15 6757 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6758 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6759 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6760 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6761 .if 0 6762 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6763 beq common_errDivideByZero 6764 .endif 6765 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6766 6767 @ optional op; may set condition codes 6768 bl __aeabi_ddiv @ result<- op, r0-r3 changed 6769 GET_INST_OPCODE(ip) @ extract opcode from rINST 6770 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6771 GOTO_OPCODE(ip) @ jump to next instruction 6772 /* 12-15 instructions */ 6773 6774 6775 6776/* ------------------------------ */ 6777 .balign 64 6778.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6779/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6780/* EABI doesn't define a double remainder function, but libm does */ 6781/* File: armv5te/binopWide2addr.S */ 6782 /* 6783 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6784 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6785 * This could be an ARM instruction or a function call. (If the result 6786 * comes back in a register other than r0, you can override "result".) 6787 * 6788 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6789 * vCC (r1). Useful for integer division and modulus. 6790 * 6791 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6792 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6793 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6794 * rem-double/2addr 6795 */ 6796 /* binop/2addr vA, vB */ 6797 mov r9, rINST, lsr #8 @ r9<- A+ 6798 mov r1, rINST, lsr #12 @ r1<- B 6799 and r9, r9, #15 6800 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6801 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6802 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6803 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6804 .if 0 6805 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6806 beq common_errDivideByZero 6807 .endif 6808 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6809 6810 @ optional op; may set condition codes 6811 bl fmod @ result<- op, r0-r3 changed 6812 GET_INST_OPCODE(ip) @ extract opcode from rINST 6813 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6814 GOTO_OPCODE(ip) @ jump to next instruction 6815 /* 12-15 instructions */ 6816 6817 6818 6819/* ------------------------------ */ 6820 .balign 64 6821.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6822/* File: armv5te/OP_ADD_INT_LIT16.S */ 6823/* File: armv5te/binopLit16.S */ 6824 /* 6825 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6826 * that specifies an instruction that performs "result = r0 op r1". 6827 * This could be an ARM instruction or a function call. (If the result 6828 * comes back in a register other than r0, you can override "result".) 6829 * 6830 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6831 * vCC (r1). Useful for integer division and modulus. 6832 * 6833 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6834 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6835 */ 6836 /* binop/lit16 vA, vB, #+CCCC */ 6837 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6838 mov r2, rINST, lsr #12 @ r2<- B 6839 mov r9, rINST, lsr #8 @ r9<- A+ 6840 GET_VREG(r0, r2) @ r0<- vB 6841 and r9, r9, #15 6842 .if 0 6843 cmp r1, #0 @ is second operand zero? 6844 beq common_errDivideByZero 6845 .endif 6846 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6847 6848 add r0, r0, r1 @ r0<- op, r0-r3 changed 6849 GET_INST_OPCODE(ip) @ extract opcode from rINST 6850 SET_VREG(r0, r9) @ vAA<- r0 6851 GOTO_OPCODE(ip) @ jump to next instruction 6852 /* 10-13 instructions */ 6853 6854 6855 6856/* ------------------------------ */ 6857 .balign 64 6858.L_OP_RSUB_INT: /* 0xd1 */ 6859/* File: armv5te/OP_RSUB_INT.S */ 6860/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6861/* File: armv5te/binopLit16.S */ 6862 /* 6863 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6864 * that specifies an instruction that performs "result = r0 op r1". 6865 * This could be an ARM instruction or a function call. (If the result 6866 * comes back in a register other than r0, you can override "result".) 6867 * 6868 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6869 * vCC (r1). Useful for integer division and modulus. 6870 * 6871 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6872 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6873 */ 6874 /* binop/lit16 vA, vB, #+CCCC */ 6875 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6876 mov r2, rINST, lsr #12 @ r2<- B 6877 mov r9, rINST, lsr #8 @ r9<- A+ 6878 GET_VREG(r0, r2) @ r0<- vB 6879 and r9, r9, #15 6880 .if 0 6881 cmp r1, #0 @ is second operand zero? 6882 beq common_errDivideByZero 6883 .endif 6884 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6885 6886 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6887 GET_INST_OPCODE(ip) @ extract opcode from rINST 6888 SET_VREG(r0, r9) @ vAA<- r0 6889 GOTO_OPCODE(ip) @ jump to next instruction 6890 /* 10-13 instructions */ 6891 6892 6893 6894/* ------------------------------ */ 6895 .balign 64 6896.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6897/* File: armv5te/OP_MUL_INT_LIT16.S */ 6898/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6899/* File: armv5te/binopLit16.S */ 6900 /* 6901 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6902 * that specifies an instruction that performs "result = r0 op r1". 6903 * This could be an ARM instruction or a function call. (If the result 6904 * comes back in a register other than r0, you can override "result".) 6905 * 6906 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6907 * vCC (r1). Useful for integer division and modulus. 6908 * 6909 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6910 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6911 */ 6912 /* binop/lit16 vA, vB, #+CCCC */ 6913 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6914 mov r2, rINST, lsr #12 @ r2<- B 6915 mov r9, rINST, lsr #8 @ r9<- A+ 6916 GET_VREG(r0, r2) @ r0<- vB 6917 and r9, r9, #15 6918 .if 0 6919 cmp r1, #0 @ is second operand zero? 6920 beq common_errDivideByZero 6921 .endif 6922 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6923 6924 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6925 GET_INST_OPCODE(ip) @ extract opcode from rINST 6926 SET_VREG(r0, r9) @ vAA<- r0 6927 GOTO_OPCODE(ip) @ jump to next instruction 6928 /* 10-13 instructions */ 6929 6930 6931 6932/* ------------------------------ */ 6933 .balign 64 6934.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6935/* File: armv5te/OP_DIV_INT_LIT16.S */ 6936/* File: armv5te/binopLit16.S */ 6937 /* 6938 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6939 * that specifies an instruction that performs "result = r0 op r1". 6940 * This could be an ARM instruction or a function call. (If the result 6941 * comes back in a register other than r0, you can override "result".) 6942 * 6943 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6944 * vCC (r1). Useful for integer division and modulus. 6945 * 6946 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6947 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6948 */ 6949 /* binop/lit16 vA, vB, #+CCCC */ 6950 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6951 mov r2, rINST, lsr #12 @ r2<- B 6952 mov r9, rINST, lsr #8 @ r9<- A+ 6953 GET_VREG(r0, r2) @ r0<- vB 6954 and r9, r9, #15 6955 .if 1 6956 cmp r1, #0 @ is second operand zero? 6957 beq common_errDivideByZero 6958 .endif 6959 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6960 6961 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6962 GET_INST_OPCODE(ip) @ extract opcode from rINST 6963 SET_VREG(r0, r9) @ vAA<- r0 6964 GOTO_OPCODE(ip) @ jump to next instruction 6965 /* 10-13 instructions */ 6966 6967 6968 6969/* ------------------------------ */ 6970 .balign 64 6971.L_OP_REM_INT_LIT16: /* 0xd4 */ 6972/* File: armv5te/OP_REM_INT_LIT16.S */ 6973/* idivmod returns quotient in r0 and remainder in r1 */ 6974/* File: armv5te/binopLit16.S */ 6975 /* 6976 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6977 * that specifies an instruction that performs "result = r0 op r1". 6978 * This could be an ARM instruction or a function call. (If the result 6979 * comes back in a register other than r0, you can override "result".) 6980 * 6981 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6982 * vCC (r1). Useful for integer division and modulus. 6983 * 6984 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6985 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6986 */ 6987 /* binop/lit16 vA, vB, #+CCCC */ 6988 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6989 mov r2, rINST, lsr #12 @ r2<- B 6990 mov r9, rINST, lsr #8 @ r9<- A+ 6991 GET_VREG(r0, r2) @ r0<- vB 6992 and r9, r9, #15 6993 .if 1 6994 cmp r1, #0 @ is second operand zero? 6995 beq common_errDivideByZero 6996 .endif 6997 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6998 6999 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7000 GET_INST_OPCODE(ip) @ extract opcode from rINST 7001 SET_VREG(r1, r9) @ vAA<- r1 7002 GOTO_OPCODE(ip) @ jump to next instruction 7003 /* 10-13 instructions */ 7004 7005 7006 7007/* ------------------------------ */ 7008 .balign 64 7009.L_OP_AND_INT_LIT16: /* 0xd5 */ 7010/* File: armv5te/OP_AND_INT_LIT16.S */ 7011/* File: armv5te/binopLit16.S */ 7012 /* 7013 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7014 * that specifies an instruction that performs "result = r0 op r1". 7015 * This could be an ARM instruction or a function call. (If the result 7016 * comes back in a register other than r0, you can override "result".) 7017 * 7018 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7019 * vCC (r1). Useful for integer division and modulus. 7020 * 7021 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7022 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7023 */ 7024 /* binop/lit16 vA, vB, #+CCCC */ 7025 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7026 mov r2, rINST, lsr #12 @ r2<- B 7027 mov r9, rINST, lsr #8 @ r9<- A+ 7028 GET_VREG(r0, r2) @ r0<- vB 7029 and r9, r9, #15 7030 .if 0 7031 cmp r1, #0 @ is second operand zero? 7032 beq common_errDivideByZero 7033 .endif 7034 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7035 7036 and r0, r0, r1 @ r0<- op, r0-r3 changed 7037 GET_INST_OPCODE(ip) @ extract opcode from rINST 7038 SET_VREG(r0, r9) @ vAA<- r0 7039 GOTO_OPCODE(ip) @ jump to next instruction 7040 /* 10-13 instructions */ 7041 7042 7043 7044/* ------------------------------ */ 7045 .balign 64 7046.L_OP_OR_INT_LIT16: /* 0xd6 */ 7047/* File: armv5te/OP_OR_INT_LIT16.S */ 7048/* File: armv5te/binopLit16.S */ 7049 /* 7050 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7051 * that specifies an instruction that performs "result = r0 op r1". 7052 * This could be an ARM instruction or a function call. (If the result 7053 * comes back in a register other than r0, you can override "result".) 7054 * 7055 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7056 * vCC (r1). Useful for integer division and modulus. 7057 * 7058 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7059 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7060 */ 7061 /* binop/lit16 vA, vB, #+CCCC */ 7062 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7063 mov r2, rINST, lsr #12 @ r2<- B 7064 mov r9, rINST, lsr #8 @ r9<- A+ 7065 GET_VREG(r0, r2) @ r0<- vB 7066 and r9, r9, #15 7067 .if 0 7068 cmp r1, #0 @ is second operand zero? 7069 beq common_errDivideByZero 7070 .endif 7071 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7072 7073 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7074 GET_INST_OPCODE(ip) @ extract opcode from rINST 7075 SET_VREG(r0, r9) @ vAA<- r0 7076 GOTO_OPCODE(ip) @ jump to next instruction 7077 /* 10-13 instructions */ 7078 7079 7080 7081/* ------------------------------ */ 7082 .balign 64 7083.L_OP_XOR_INT_LIT16: /* 0xd7 */ 7084/* File: armv5te/OP_XOR_INT_LIT16.S */ 7085/* File: armv5te/binopLit16.S */ 7086 /* 7087 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 7088 * that specifies an instruction that performs "result = r0 op r1". 7089 * This could be an ARM instruction or a function call. (If the result 7090 * comes back in a register other than r0, you can override "result".) 7091 * 7092 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7093 * vCC (r1). Useful for integer division and modulus. 7094 * 7095 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 7096 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 7097 */ 7098 /* binop/lit16 vA, vB, #+CCCC */ 7099 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 7100 mov r2, rINST, lsr #12 @ r2<- B 7101 mov r9, rINST, lsr #8 @ r9<- A+ 7102 GET_VREG(r0, r2) @ r0<- vB 7103 and r9, r9, #15 7104 .if 0 7105 cmp r1, #0 @ is second operand zero? 7106 beq common_errDivideByZero 7107 .endif 7108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7109 7110 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7111 GET_INST_OPCODE(ip) @ extract opcode from rINST 7112 SET_VREG(r0, r9) @ vAA<- r0 7113 GOTO_OPCODE(ip) @ jump to next instruction 7114 /* 10-13 instructions */ 7115 7116 7117 7118/* ------------------------------ */ 7119 .balign 64 7120.L_OP_ADD_INT_LIT8: /* 0xd8 */ 7121/* File: armv5te/OP_ADD_INT_LIT8.S */ 7122/* File: armv5te/binopLit8.S */ 7123 /* 7124 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7125 * that specifies an instruction that performs "result = r0 op r1". 7126 * This could be an ARM instruction or a function call. (If the result 7127 * comes back in a register other than r0, you can override "result".) 7128 * 7129 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7130 * vCC (r1). Useful for integer division and modulus. 7131 * 7132 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7133 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7134 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7135 */ 7136 /* binop/lit8 vAA, vBB, #+CC */ 7137 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7138 mov r9, rINST, lsr #8 @ r9<- AA 7139 and r2, r3, #255 @ r2<- BB 7140 GET_VREG(r0, r2) @ r0<- vBB 7141 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7142 .if 0 7143 @cmp r1, #0 @ is second operand zero? 7144 beq common_errDivideByZero 7145 .endif 7146 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7147 7148 @ optional op; may set condition codes 7149 add r0, r0, r1 @ r0<- op, r0-r3 changed 7150 GET_INST_OPCODE(ip) @ extract opcode from rINST 7151 SET_VREG(r0, r9) @ vAA<- r0 7152 GOTO_OPCODE(ip) @ jump to next instruction 7153 /* 10-12 instructions */ 7154 7155 7156 7157/* ------------------------------ */ 7158 .balign 64 7159.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 7160/* File: armv5te/OP_RSUB_INT_LIT8.S */ 7161/* File: armv5te/binopLit8.S */ 7162 /* 7163 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7164 * that specifies an instruction that performs "result = r0 op r1". 7165 * This could be an ARM instruction or a function call. (If the result 7166 * comes back in a register other than r0, you can override "result".) 7167 * 7168 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7169 * vCC (r1). Useful for integer division and modulus. 7170 * 7171 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7172 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7173 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7174 */ 7175 /* binop/lit8 vAA, vBB, #+CC */ 7176 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7177 mov r9, rINST, lsr #8 @ r9<- AA 7178 and r2, r3, #255 @ r2<- BB 7179 GET_VREG(r0, r2) @ r0<- vBB 7180 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7181 .if 0 7182 @cmp r1, #0 @ is second operand zero? 7183 beq common_errDivideByZero 7184 .endif 7185 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7186 7187 @ optional op; may set condition codes 7188 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 7189 GET_INST_OPCODE(ip) @ extract opcode from rINST 7190 SET_VREG(r0, r9) @ vAA<- r0 7191 GOTO_OPCODE(ip) @ jump to next instruction 7192 /* 10-12 instructions */ 7193 7194 7195 7196/* ------------------------------ */ 7197 .balign 64 7198.L_OP_MUL_INT_LIT8: /* 0xda */ 7199/* File: armv5te/OP_MUL_INT_LIT8.S */ 7200/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 7201/* File: armv5te/binopLit8.S */ 7202 /* 7203 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7204 * that specifies an instruction that performs "result = r0 op r1". 7205 * This could be an ARM instruction or a function call. (If the result 7206 * comes back in a register other than r0, you can override "result".) 7207 * 7208 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7209 * vCC (r1). Useful for integer division and modulus. 7210 * 7211 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7212 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7213 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7214 */ 7215 /* binop/lit8 vAA, vBB, #+CC */ 7216 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7217 mov r9, rINST, lsr #8 @ r9<- AA 7218 and r2, r3, #255 @ r2<- BB 7219 GET_VREG(r0, r2) @ r0<- vBB 7220 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7221 .if 0 7222 @cmp r1, #0 @ is second operand zero? 7223 beq common_errDivideByZero 7224 .endif 7225 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7226 7227 @ optional op; may set condition codes 7228 mul r0, r1, r0 @ r0<- op, r0-r3 changed 7229 GET_INST_OPCODE(ip) @ extract opcode from rINST 7230 SET_VREG(r0, r9) @ vAA<- r0 7231 GOTO_OPCODE(ip) @ jump to next instruction 7232 /* 10-12 instructions */ 7233 7234 7235 7236/* ------------------------------ */ 7237 .balign 64 7238.L_OP_DIV_INT_LIT8: /* 0xdb */ 7239/* File: armv5te/OP_DIV_INT_LIT8.S */ 7240/* File: armv5te/binopLit8.S */ 7241 /* 7242 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7243 * that specifies an instruction that performs "result = r0 op r1". 7244 * This could be an ARM instruction or a function call. (If the result 7245 * comes back in a register other than r0, you can override "result".) 7246 * 7247 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7248 * vCC (r1). Useful for integer division and modulus. 7249 * 7250 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7251 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7252 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7253 */ 7254 /* binop/lit8 vAA, vBB, #+CC */ 7255 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7256 mov r9, rINST, lsr #8 @ r9<- AA 7257 and r2, r3, #255 @ r2<- BB 7258 GET_VREG(r0, r2) @ r0<- vBB 7259 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7260 .if 1 7261 @cmp r1, #0 @ is second operand zero? 7262 beq common_errDivideByZero 7263 .endif 7264 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7265 7266 @ optional op; may set condition codes 7267 bl __aeabi_idiv @ r0<- op, r0-r3 changed 7268 GET_INST_OPCODE(ip) @ extract opcode from rINST 7269 SET_VREG(r0, r9) @ vAA<- r0 7270 GOTO_OPCODE(ip) @ jump to next instruction 7271 /* 10-12 instructions */ 7272 7273 7274 7275/* ------------------------------ */ 7276 .balign 64 7277.L_OP_REM_INT_LIT8: /* 0xdc */ 7278/* File: armv5te/OP_REM_INT_LIT8.S */ 7279/* idivmod returns quotient in r0 and remainder in r1 */ 7280/* File: armv5te/binopLit8.S */ 7281 /* 7282 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7283 * that specifies an instruction that performs "result = r0 op r1". 7284 * This could be an ARM instruction or a function call. (If the result 7285 * comes back in a register other than r0, you can override "result".) 7286 * 7287 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7288 * vCC (r1). Useful for integer division and modulus. 7289 * 7290 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7291 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7292 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7293 */ 7294 /* binop/lit8 vAA, vBB, #+CC */ 7295 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7296 mov r9, rINST, lsr #8 @ r9<- AA 7297 and r2, r3, #255 @ r2<- BB 7298 GET_VREG(r0, r2) @ r0<- vBB 7299 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7300 .if 1 7301 @cmp r1, #0 @ is second operand zero? 7302 beq common_errDivideByZero 7303 .endif 7304 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7305 7306 @ optional op; may set condition codes 7307 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7308 GET_INST_OPCODE(ip) @ extract opcode from rINST 7309 SET_VREG(r1, r9) @ vAA<- r1 7310 GOTO_OPCODE(ip) @ jump to next instruction 7311 /* 10-12 instructions */ 7312 7313 7314 7315/* ------------------------------ */ 7316 .balign 64 7317.L_OP_AND_INT_LIT8: /* 0xdd */ 7318/* File: armv5te/OP_AND_INT_LIT8.S */ 7319/* File: armv5te/binopLit8.S */ 7320 /* 7321 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7322 * that specifies an instruction that performs "result = r0 op r1". 7323 * This could be an ARM instruction or a function call. (If the result 7324 * comes back in a register other than r0, you can override "result".) 7325 * 7326 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7327 * vCC (r1). Useful for integer division and modulus. 7328 * 7329 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7330 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7331 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7332 */ 7333 /* binop/lit8 vAA, vBB, #+CC */ 7334 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7335 mov r9, rINST, lsr #8 @ r9<- AA 7336 and r2, r3, #255 @ r2<- BB 7337 GET_VREG(r0, r2) @ r0<- vBB 7338 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7339 .if 0 7340 @cmp r1, #0 @ is second operand zero? 7341 beq common_errDivideByZero 7342 .endif 7343 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7344 7345 @ optional op; may set condition codes 7346 and r0, r0, r1 @ r0<- op, r0-r3 changed 7347 GET_INST_OPCODE(ip) @ extract opcode from rINST 7348 SET_VREG(r0, r9) @ vAA<- r0 7349 GOTO_OPCODE(ip) @ jump to next instruction 7350 /* 10-12 instructions */ 7351 7352 7353 7354/* ------------------------------ */ 7355 .balign 64 7356.L_OP_OR_INT_LIT8: /* 0xde */ 7357/* File: armv5te/OP_OR_INT_LIT8.S */ 7358/* File: armv5te/binopLit8.S */ 7359 /* 7360 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7361 * that specifies an instruction that performs "result = r0 op r1". 7362 * This could be an ARM instruction or a function call. (If the result 7363 * comes back in a register other than r0, you can override "result".) 7364 * 7365 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7366 * vCC (r1). Useful for integer division and modulus. 7367 * 7368 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7369 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7370 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7371 */ 7372 /* binop/lit8 vAA, vBB, #+CC */ 7373 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7374 mov r9, rINST, lsr #8 @ r9<- AA 7375 and r2, r3, #255 @ r2<- BB 7376 GET_VREG(r0, r2) @ r0<- vBB 7377 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7378 .if 0 7379 @cmp r1, #0 @ is second operand zero? 7380 beq common_errDivideByZero 7381 .endif 7382 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7383 7384 @ optional op; may set condition codes 7385 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7386 GET_INST_OPCODE(ip) @ extract opcode from rINST 7387 SET_VREG(r0, r9) @ vAA<- r0 7388 GOTO_OPCODE(ip) @ jump to next instruction 7389 /* 10-12 instructions */ 7390 7391 7392 7393/* ------------------------------ */ 7394 .balign 64 7395.L_OP_XOR_INT_LIT8: /* 0xdf */ 7396/* File: armv5te/OP_XOR_INT_LIT8.S */ 7397/* File: armv5te/binopLit8.S */ 7398 /* 7399 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7400 * that specifies an instruction that performs "result = r0 op r1". 7401 * This could be an ARM instruction or a function call. (If the result 7402 * comes back in a register other than r0, you can override "result".) 7403 * 7404 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7405 * vCC (r1). Useful for integer division and modulus. 7406 * 7407 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7408 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7409 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7410 */ 7411 /* binop/lit8 vAA, vBB, #+CC */ 7412 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7413 mov r9, rINST, lsr #8 @ r9<- AA 7414 and r2, r3, #255 @ r2<- BB 7415 GET_VREG(r0, r2) @ r0<- vBB 7416 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7417 .if 0 7418 @cmp r1, #0 @ is second operand zero? 7419 beq common_errDivideByZero 7420 .endif 7421 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7422 7423 @ optional op; may set condition codes 7424 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7425 GET_INST_OPCODE(ip) @ extract opcode from rINST 7426 SET_VREG(r0, r9) @ vAA<- r0 7427 GOTO_OPCODE(ip) @ jump to next instruction 7428 /* 10-12 instructions */ 7429 7430 7431 7432/* ------------------------------ */ 7433 .balign 64 7434.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7435/* File: armv5te/OP_SHL_INT_LIT8.S */ 7436/* File: armv5te/binopLit8.S */ 7437 /* 7438 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7439 * that specifies an instruction that performs "result = r0 op r1". 7440 * This could be an ARM instruction or a function call. (If the result 7441 * comes back in a register other than r0, you can override "result".) 7442 * 7443 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7444 * vCC (r1). Useful for integer division and modulus. 7445 * 7446 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7447 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7448 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7449 */ 7450 /* binop/lit8 vAA, vBB, #+CC */ 7451 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7452 mov r9, rINST, lsr #8 @ r9<- AA 7453 and r2, r3, #255 @ r2<- BB 7454 GET_VREG(r0, r2) @ r0<- vBB 7455 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7456 .if 0 7457 @cmp r1, #0 @ is second operand zero? 7458 beq common_errDivideByZero 7459 .endif 7460 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7461 7462 and r1, r1, #31 @ optional op; may set condition codes 7463 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7464 GET_INST_OPCODE(ip) @ extract opcode from rINST 7465 SET_VREG(r0, r9) @ vAA<- r0 7466 GOTO_OPCODE(ip) @ jump to next instruction 7467 /* 10-12 instructions */ 7468 7469 7470 7471/* ------------------------------ */ 7472 .balign 64 7473.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7474/* File: armv5te/OP_SHR_INT_LIT8.S */ 7475/* File: armv5te/binopLit8.S */ 7476 /* 7477 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7478 * that specifies an instruction that performs "result = r0 op r1". 7479 * This could be an ARM instruction or a function call. (If the result 7480 * comes back in a register other than r0, you can override "result".) 7481 * 7482 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7483 * vCC (r1). Useful for integer division and modulus. 7484 * 7485 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7486 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7487 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7488 */ 7489 /* binop/lit8 vAA, vBB, #+CC */ 7490 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7491 mov r9, rINST, lsr #8 @ r9<- AA 7492 and r2, r3, #255 @ r2<- BB 7493 GET_VREG(r0, r2) @ r0<- vBB 7494 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7495 .if 0 7496 @cmp r1, #0 @ is second operand zero? 7497 beq common_errDivideByZero 7498 .endif 7499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7500 7501 and r1, r1, #31 @ optional op; may set condition codes 7502 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7503 GET_INST_OPCODE(ip) @ extract opcode from rINST 7504 SET_VREG(r0, r9) @ vAA<- r0 7505 GOTO_OPCODE(ip) @ jump to next instruction 7506 /* 10-12 instructions */ 7507 7508 7509 7510/* ------------------------------ */ 7511 .balign 64 7512.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7513/* File: armv5te/OP_USHR_INT_LIT8.S */ 7514/* File: armv5te/binopLit8.S */ 7515 /* 7516 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7517 * that specifies an instruction that performs "result = r0 op r1". 7518 * This could be an ARM instruction or a function call. (If the result 7519 * comes back in a register other than r0, you can override "result".) 7520 * 7521 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7522 * vCC (r1). Useful for integer division and modulus. 7523 * 7524 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7525 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7526 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7527 */ 7528 /* binop/lit8 vAA, vBB, #+CC */ 7529 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7530 mov r9, rINST, lsr #8 @ r9<- AA 7531 and r2, r3, #255 @ r2<- BB 7532 GET_VREG(r0, r2) @ r0<- vBB 7533 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7534 .if 0 7535 @cmp r1, #0 @ is second operand zero? 7536 beq common_errDivideByZero 7537 .endif 7538 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7539 7540 and r1, r1, #31 @ optional op; may set condition codes 7541 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7542 GET_INST_OPCODE(ip) @ extract opcode from rINST 7543 SET_VREG(r0, r9) @ vAA<- r0 7544 GOTO_OPCODE(ip) @ jump to next instruction 7545 /* 10-12 instructions */ 7546 7547 7548 7549/* ------------------------------ */ 7550 .balign 64 7551.L_OP_UNUSED_E3: /* 0xe3 */ 7552/* File: armv5te/OP_UNUSED_E3.S */ 7553/* File: armv5te/unused.S */ 7554 bl common_abort 7555 7556 7557 7558/* ------------------------------ */ 7559 .balign 64 7560.L_OP_UNUSED_E4: /* 0xe4 */ 7561/* File: armv5te/OP_UNUSED_E4.S */ 7562/* File: armv5te/unused.S */ 7563 bl common_abort 7564 7565 7566 7567/* ------------------------------ */ 7568 .balign 64 7569.L_OP_UNUSED_E5: /* 0xe5 */ 7570/* File: armv5te/OP_UNUSED_E5.S */ 7571/* File: armv5te/unused.S */ 7572 bl common_abort 7573 7574 7575 7576/* ------------------------------ */ 7577 .balign 64 7578.L_OP_UNUSED_E6: /* 0xe6 */ 7579/* File: armv5te/OP_UNUSED_E6.S */ 7580/* File: armv5te/unused.S */ 7581 bl common_abort 7582 7583 7584 7585/* ------------------------------ */ 7586 .balign 64 7587.L_OP_UNUSED_E7: /* 0xe7 */ 7588/* File: armv5te/OP_UNUSED_E7.S */ 7589/* File: armv5te/unused.S */ 7590 bl common_abort 7591 7592 7593 7594/* ------------------------------ */ 7595 .balign 64 7596.L_OP_UNUSED_E8: /* 0xe8 */ 7597/* File: armv5te/OP_UNUSED_E8.S */ 7598/* File: armv5te/unused.S */ 7599 bl common_abort 7600 7601 7602 7603/* ------------------------------ */ 7604 .balign 64 7605.L_OP_UNUSED_E9: /* 0xe9 */ 7606/* File: armv5te/OP_UNUSED_E9.S */ 7607/* File: armv5te/unused.S */ 7608 bl common_abort 7609 7610 7611 7612/* ------------------------------ */ 7613 .balign 64 7614.L_OP_UNUSED_EA: /* 0xea */ 7615/* File: armv5te/OP_UNUSED_EA.S */ 7616/* File: armv5te/unused.S */ 7617 bl common_abort 7618 7619 7620 7621/* ------------------------------ */ 7622 .balign 64 7623.L_OP_UNUSED_EB: /* 0xeb */ 7624/* File: armv5te/OP_UNUSED_EB.S */ 7625/* File: armv5te/unused.S */ 7626 bl common_abort 7627 7628 7629 7630/* ------------------------------ */ 7631 .balign 64 7632.L_OP_BREAKPOINT: /* 0xec */ 7633/* File: armv5te/OP_BREAKPOINT.S */ 7634/* File: armv5te/unused.S */ 7635 bl common_abort 7636 7637 7638 7639/* ------------------------------ */ 7640 .balign 64 7641.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7642/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7643 /* 7644 * Handle a throw-verification-error instruction. This throws an 7645 * exception for an error discovered during verification. The 7646 * exception is indicated by AA, with some detail provided by BBBB. 7647 */ 7648 /* op AA, ref@BBBB */ 7649 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7650 FETCH(r2, 1) @ r2<- BBBB 7651 EXPORT_PC() @ export the PC 7652 mov r1, rINST, lsr #8 @ r1<- AA 7653 bl dvmThrowVerificationError @ always throws 7654 b common_exceptionThrown @ handle exception 7655 7656 7657/* ------------------------------ */ 7658 .balign 64 7659.L_OP_EXECUTE_INLINE: /* 0xee */ 7660/* File: armv5te/OP_EXECUTE_INLINE.S */ 7661 /* 7662 * Execute a "native inline" instruction. 7663 * 7664 * We need to call an InlineOp4Func: 7665 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7666 * 7667 * The first four args are in r0-r3, pointer to return value storage 7668 * is on the stack. The function's return value is a flag that tells 7669 * us if an exception was thrown. 7670 */ 7671 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7672 FETCH(r10, 1) @ r10<- BBBB 7673 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7674 EXPORT_PC() @ can throw 7675 sub sp, sp, #8 @ make room for arg, +64 bit align 7676 mov r0, rINST, lsr #12 @ r0<- B 7677 str r1, [sp] @ push &glue->retval 7678 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7679 add sp, sp, #8 @ pop stack 7680 cmp r0, #0 @ test boolean result of inline 7681 beq common_exceptionThrown @ returned false, handle exception 7682 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7683 GET_INST_OPCODE(ip) @ extract opcode from rINST 7684 GOTO_OPCODE(ip) @ jump to next instruction 7685 7686/* ------------------------------ */ 7687 .balign 64 7688.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7689/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7690 /* 7691 * Execute a "native inline" instruction, using "/range" semantics. 7692 * Same idea as execute-inline, but we get the args differently. 7693 * 7694 * We need to call an InlineOp4Func: 7695 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7696 * 7697 * The first four args are in r0-r3, pointer to return value storage 7698 * is on the stack. The function's return value is a flag that tells 7699 * us if an exception was thrown. 7700 */ 7701 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7702 FETCH(r10, 1) @ r10<- BBBB 7703 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7704 EXPORT_PC() @ can throw 7705 sub sp, sp, #8 @ make room for arg, +64 bit align 7706 mov r0, rINST, lsr #8 @ r0<- AA 7707 str r1, [sp] @ push &glue->retval 7708 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7709 add sp, sp, #8 @ pop stack 7710 cmp r0, #0 @ test boolean result of inline 7711 beq common_exceptionThrown @ returned false, handle exception 7712 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7713 GET_INST_OPCODE(ip) @ extract opcode from rINST 7714 GOTO_OPCODE(ip) @ jump to next instruction 7715 7716/* ------------------------------ */ 7717 .balign 64 7718.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7719/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7720 /* 7721 * invoke-direct-empty is a no-op in a "standard" interpreter. 7722 */ 7723 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7724 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7725 GOTO_OPCODE(ip) @ execute it 7726 7727/* ------------------------------ */ 7728 .balign 64 7729.L_OP_UNUSED_F1: /* 0xf1 */ 7730/* File: armv5te/OP_UNUSED_F1.S */ 7731/* File: armv5te/unused.S */ 7732 bl common_abort 7733 7734 7735 7736/* ------------------------------ */ 7737 .balign 64 7738.L_OP_IGET_QUICK: /* 0xf2 */ 7739/* File: armv5te/OP_IGET_QUICK.S */ 7740 /* For: iget-quick, iget-object-quick */ 7741 /* op vA, vB, offset@CCCC */ 7742 mov r2, rINST, lsr #12 @ r2<- B 7743 GET_VREG(r3, r2) @ r3<- object we're operating on 7744 FETCH(r1, 1) @ r1<- field byte offset 7745 cmp r3, #0 @ check object for null 7746 mov r2, rINST, lsr #8 @ r2<- A(+) 7747 beq common_errNullObject @ object was null 7748 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7750 and r2, r2, #15 7751 GET_INST_OPCODE(ip) @ extract opcode from rINST 7752 SET_VREG(r0, r2) @ fp[A]<- r0 7753 GOTO_OPCODE(ip) @ jump to next instruction 7754 7755 7756/* ------------------------------ */ 7757 .balign 64 7758.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7759/* File: armv4t/OP_IGET_WIDE_QUICK.S */ 7760 /* iget-wide-quick vA, vB, offset@CCCC */ 7761 mov r2, rINST, lsr #12 @ r2<- B 7762 GET_VREG(r3, r2) @ r3<- object we're operating on 7763 FETCH(r1, 1) @ r1<- field byte offset 7764 cmp r3, #0 @ check object for null 7765 mov r2, rINST, lsr #8 @ r2<- A(+) 7766 beq common_errNullObject @ object was null 7767 add r9, r3, r1 @ r9<- object + offset 7768 ldmia r9, {r0-r1} @ r0/r1<- obj.field (64 bits, aligned) 7769 and r2, r2, #15 @ r2<- A 7770 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7771 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7772 GET_INST_OPCODE(ip) @ extract opcode from rINST 7773 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7774 GOTO_OPCODE(ip) @ jump to next instruction 7775 7776 7777/* ------------------------------ */ 7778 .balign 64 7779.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7780/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7781/* File: armv5te/OP_IGET_QUICK.S */ 7782 /* For: iget-quick, iget-object-quick */ 7783 /* op vA, vB, offset@CCCC */ 7784 mov r2, rINST, lsr #12 @ r2<- B 7785 GET_VREG(r3, r2) @ r3<- object we're operating on 7786 FETCH(r1, 1) @ r1<- field byte offset 7787 cmp r3, #0 @ check object for null 7788 mov r2, rINST, lsr #8 @ r2<- A(+) 7789 beq common_errNullObject @ object was null 7790 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7791 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7792 and r2, r2, #15 7793 GET_INST_OPCODE(ip) @ extract opcode from rINST 7794 SET_VREG(r0, r2) @ fp[A]<- r0 7795 GOTO_OPCODE(ip) @ jump to next instruction 7796 7797 7798 7799/* ------------------------------ */ 7800 .balign 64 7801.L_OP_IPUT_QUICK: /* 0xf5 */ 7802/* File: armv5te/OP_IPUT_QUICK.S */ 7803 /* For: iput-quick, iput-object-quick */ 7804 /* op vA, vB, offset@CCCC */ 7805 mov r2, rINST, lsr #12 @ r2<- B 7806 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7807 FETCH(r1, 1) @ r1<- field byte offset 7808 cmp r3, #0 @ check object for null 7809 mov r2, rINST, lsr #8 @ r2<- A(+) 7810 beq common_errNullObject @ object was null 7811 and r2, r2, #15 7812 GET_VREG(r0, r2) @ r0<- fp[A] 7813 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7814 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7815 GET_INST_OPCODE(ip) @ extract opcode from rINST 7816 GOTO_OPCODE(ip) @ jump to next instruction 7817 7818 7819/* ------------------------------ */ 7820 .balign 64 7821.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7822/* File: armv4t/OP_IPUT_WIDE_QUICK.S */ 7823 /* iput-wide-quick vA, vB, offset@CCCC */ 7824 mov r0, rINST, lsr #8 @ r0<- A(+) 7825 mov r1, rINST, lsr #12 @ r1<- B 7826 and r0, r0, #15 7827 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7828 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7829 cmp r2, #0 @ check object for null 7830 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7831 beq common_errNullObject @ object was null 7832 FETCH(r3, 1) @ r3<- field byte offset 7833 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7834 add r2, r2, r3 @ r2<- object + byte offset 7835 stmia r2, {r0-r1} @ obj.field (64 bits, aligned)<- r0/r1 7836 GET_INST_OPCODE(ip) @ extract opcode from rINST 7837 GOTO_OPCODE(ip) @ jump to next instruction 7838 7839 7840/* ------------------------------ */ 7841 .balign 64 7842.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7843/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7844/* File: armv5te/OP_IPUT_QUICK.S */ 7845 /* For: iput-quick, iput-object-quick */ 7846 /* op vA, vB, offset@CCCC */ 7847 mov r2, rINST, lsr #12 @ r2<- B 7848 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7849 FETCH(r1, 1) @ r1<- field byte offset 7850 cmp r3, #0 @ check object for null 7851 mov r2, rINST, lsr #8 @ r2<- A(+) 7852 beq common_errNullObject @ object was null 7853 and r2, r2, #15 7854 GET_VREG(r0, r2) @ r0<- fp[A] 7855 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7856 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7857 GET_INST_OPCODE(ip) @ extract opcode from rINST 7858 GOTO_OPCODE(ip) @ jump to next instruction 7859 7860 7861 7862/* ------------------------------ */ 7863 .balign 64 7864.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7865/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7866 /* 7867 * Handle an optimized virtual method call. 7868 * 7869 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7870 */ 7871 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7872 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7873 FETCH(r3, 2) @ r3<- FEDC or CCCC 7874 FETCH(r1, 1) @ r1<- BBBB 7875 .if (!0) 7876 and r3, r3, #15 @ r3<- C (or stays CCCC) 7877 .endif 7878 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7879 cmp r2, #0 @ is "this" null? 7880 beq common_errNullObject @ null "this", throw exception 7881 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7882 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7883 EXPORT_PC() @ invoke must export 7884 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7885 bl common_invokeMethodNoRange @ continue on 7886 7887/* ------------------------------ */ 7888 .balign 64 7889.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7890/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7891/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7892 /* 7893 * Handle an optimized virtual method call. 7894 * 7895 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7896 */ 7897 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7898 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7899 FETCH(r3, 2) @ r3<- FEDC or CCCC 7900 FETCH(r1, 1) @ r1<- BBBB 7901 .if (!1) 7902 and r3, r3, #15 @ r3<- C (or stays CCCC) 7903 .endif 7904 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7905 cmp r2, #0 @ is "this" null? 7906 beq common_errNullObject @ null "this", throw exception 7907 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7908 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7909 EXPORT_PC() @ invoke must export 7910 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7911 bl common_invokeMethodRange @ continue on 7912 7913 7914/* ------------------------------ */ 7915 .balign 64 7916.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7917/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7918 /* 7919 * Handle an optimized "super" method call. 7920 * 7921 * for: [opt] invoke-super-quick, invoke-super-quick/range 7922 */ 7923 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7924 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7925 FETCH(r10, 2) @ r10<- GFED or CCCC 7926 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7927 .if (!0) 7928 and r10, r10, #15 @ r10<- D (or stays CCCC) 7929 .endif 7930 FETCH(r1, 1) @ r1<- BBBB 7931 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7932 EXPORT_PC() @ must export for invoke 7933 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7934 GET_VREG(r3, r10) @ r3<- "this" 7935 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7936 cmp r3, #0 @ null "this" ref? 7937 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7938 beq common_errNullObject @ "this" is null, throw exception 7939 bl common_invokeMethodNoRange @ continue on 7940 7941 7942/* ------------------------------ */ 7943 .balign 64 7944.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7945/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7946/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7947 /* 7948 * Handle an optimized "super" method call. 7949 * 7950 * for: [opt] invoke-super-quick, invoke-super-quick/range 7951 */ 7952 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7953 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7954 FETCH(r10, 2) @ r10<- GFED or CCCC 7955 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7956 .if (!1) 7957 and r10, r10, #15 @ r10<- D (or stays CCCC) 7958 .endif 7959 FETCH(r1, 1) @ r1<- BBBB 7960 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7961 EXPORT_PC() @ must export for invoke 7962 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7963 GET_VREG(r3, r10) @ r3<- "this" 7964 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7965 cmp r3, #0 @ null "this" ref? 7966 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7967 beq common_errNullObject @ "this" is null, throw exception 7968 bl common_invokeMethodRange @ continue on 7969 7970 7971 7972/* ------------------------------ */ 7973 .balign 64 7974.L_OP_UNUSED_FC: /* 0xfc */ 7975/* File: armv5te/OP_UNUSED_FC.S */ 7976/* File: armv5te/unused.S */ 7977 bl common_abort 7978 7979 7980 7981/* ------------------------------ */ 7982 .balign 64 7983.L_OP_UNUSED_FD: /* 0xfd */ 7984/* File: armv5te/OP_UNUSED_FD.S */ 7985/* File: armv5te/unused.S */ 7986 bl common_abort 7987 7988 7989 7990/* ------------------------------ */ 7991 .balign 64 7992.L_OP_UNUSED_FE: /* 0xfe */ 7993/* File: armv5te/OP_UNUSED_FE.S */ 7994/* File: armv5te/unused.S */ 7995 bl common_abort 7996 7997 7998 7999/* ------------------------------ */ 8000 .balign 64 8001.L_OP_UNUSED_FF: /* 0xff */ 8002/* File: armv5te/OP_UNUSED_FF.S */ 8003/* File: armv5te/unused.S */ 8004 bl common_abort 8005 8006 8007 8008 8009 .balign 64 8010 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 8011 .global dvmAsmInstructionEnd 8012dvmAsmInstructionEnd: 8013 8014/* 8015 * =========================================================================== 8016 * Sister implementations 8017 * =========================================================================== 8018 */ 8019 .global dvmAsmSisterStart 8020 .type dvmAsmSisterStart, %function 8021 .text 8022 .balign 4 8023dvmAsmSisterStart: 8024 8025/* continuation for OP_CONST_STRING */ 8026 8027 /* 8028 * Continuation if the String has not yet been resolved. 8029 * r1: BBBB (String ref) 8030 * r9: target register 8031 */ 8032.LOP_CONST_STRING_resolve: 8033 EXPORT_PC() 8034 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8035 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8036 bl dvmResolveString @ r0<- String reference 8037 cmp r0, #0 @ failed? 8038 beq common_exceptionThrown @ yup, handle the exception 8039 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8040 GET_INST_OPCODE(ip) @ extract opcode from rINST 8041 SET_VREG(r0, r9) @ vAA<- r0 8042 GOTO_OPCODE(ip) @ jump to next instruction 8043 8044 8045/* continuation for OP_CONST_STRING_JUMBO */ 8046 8047 /* 8048 * Continuation if the String has not yet been resolved. 8049 * r1: BBBBBBBB (String ref) 8050 * r9: target register 8051 */ 8052.LOP_CONST_STRING_JUMBO_resolve: 8053 EXPORT_PC() 8054 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8055 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8056 bl dvmResolveString @ r0<- String reference 8057 cmp r0, #0 @ failed? 8058 beq common_exceptionThrown @ yup, handle the exception 8059 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 8060 GET_INST_OPCODE(ip) @ extract opcode from rINST 8061 SET_VREG(r0, r9) @ vAA<- r0 8062 GOTO_OPCODE(ip) @ jump to next instruction 8063 8064 8065/* continuation for OP_CONST_CLASS */ 8066 8067 /* 8068 * Continuation if the Class has not yet been resolved. 8069 * r1: BBBB (Class ref) 8070 * r9: target register 8071 */ 8072.LOP_CONST_CLASS_resolve: 8073 EXPORT_PC() 8074 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8075 mov r2, #1 @ r2<- true 8076 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8077 bl dvmResolveClass @ r0<- Class reference 8078 cmp r0, #0 @ failed? 8079 beq common_exceptionThrown @ yup, handle the exception 8080 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8081 GET_INST_OPCODE(ip) @ extract opcode from rINST 8082 SET_VREG(r0, r9) @ vAA<- r0 8083 GOTO_OPCODE(ip) @ jump to next instruction 8084 8085 8086/* continuation for OP_CHECK_CAST */ 8087 8088 /* 8089 * Trivial test failed, need to perform full check. This is common. 8090 * r0 holds obj->clazz 8091 * r1 holds class resolved from BBBB 8092 * r9 holds object 8093 */ 8094.LOP_CHECK_CAST_fullcheck: 8095 bl dvmInstanceofNonTrivial @ r0<- boolean result 8096 cmp r0, #0 @ failed? 8097 bne .LOP_CHECK_CAST_okay @ no, success 8098 8099 @ A cast has failed. We need to throw a ClassCastException with the 8100 @ class of the object that failed to be cast. 8101 EXPORT_PC() @ about to throw 8102 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 8103 ldr r0, .LstrClassCastExceptionPtr 8104 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 8105 bl dvmThrowExceptionWithClassMessage 8106 b common_exceptionThrown 8107 8108 /* 8109 * Resolution required. This is the least-likely path. 8110 * 8111 * r2 holds BBBB 8112 * r9 holds object 8113 */ 8114.LOP_CHECK_CAST_resolve: 8115 EXPORT_PC() @ resolve() could throw 8116 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8117 mov r1, r2 @ r1<- BBBB 8118 mov r2, #0 @ r2<- false 8119 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8120 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8121 cmp r0, #0 @ got null? 8122 beq common_exceptionThrown @ yes, handle exception 8123 mov r1, r0 @ r1<- class resolved from BBB 8124 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8125 b .LOP_CHECK_CAST_resolved @ pick up where we left off 8126 8127.LstrClassCastExceptionPtr: 8128 .word .LstrClassCastException 8129 8130 8131/* continuation for OP_INSTANCE_OF */ 8132 8133 /* 8134 * Trivial test failed, need to perform full check. This is common. 8135 * r0 holds obj->clazz 8136 * r1 holds class resolved from BBBB 8137 * r9 holds A 8138 */ 8139.LOP_INSTANCE_OF_fullcheck: 8140 bl dvmInstanceofNonTrivial @ r0<- boolean result 8141 @ fall through to OP_INSTANCE_OF_store 8142 8143 /* 8144 * r0 holds boolean result 8145 * r9 holds A 8146 */ 8147.LOP_INSTANCE_OF_store: 8148 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8149 SET_VREG(r0, r9) @ vA<- r0 8150 GET_INST_OPCODE(ip) @ extract opcode from rINST 8151 GOTO_OPCODE(ip) @ jump to next instruction 8152 8153 /* 8154 * Trivial test succeeded, save and bail. 8155 * r9 holds A 8156 */ 8157.LOP_INSTANCE_OF_trivial: 8158 mov r0, #1 @ indicate success 8159 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 8160 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8161 SET_VREG(r0, r9) @ vA<- r0 8162 GET_INST_OPCODE(ip) @ extract opcode from rINST 8163 GOTO_OPCODE(ip) @ jump to next instruction 8164 8165 /* 8166 * Resolution required. This is the least-likely path. 8167 * 8168 * r3 holds BBBB 8169 * r9 holds A 8170 */ 8171.LOP_INSTANCE_OF_resolve: 8172 EXPORT_PC() @ resolve() could throw 8173 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8174 mov r1, r3 @ r1<- BBBB 8175 mov r2, #1 @ r2<- true 8176 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8177 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8178 cmp r0, #0 @ got null? 8179 beq common_exceptionThrown @ yes, handle exception 8180 mov r1, r0 @ r1<- class resolved from BBB 8181 mov r3, rINST, lsr #12 @ r3<- B 8182 GET_VREG(r0, r3) @ r0<- vB (object) 8183 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 8184 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 8185 8186 8187/* continuation for OP_NEW_INSTANCE */ 8188 8189 .balign 32 @ minimize cache lines 8190.LOP_NEW_INSTANCE_finish: @ r0=new object 8191 mov r3, rINST, lsr #8 @ r3<- AA 8192 cmp r0, #0 @ failed? 8193 beq common_exceptionThrown @ yes, handle the exception 8194 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8195 GET_INST_OPCODE(ip) @ extract opcode from rINST 8196 SET_VREG(r0, r3) @ vAA<- r0 8197 GOTO_OPCODE(ip) @ jump to next instruction 8198 8199 /* 8200 * Class initialization required. 8201 * 8202 * r0 holds class object 8203 */ 8204.LOP_NEW_INSTANCE_needinit: 8205 mov r9, r0 @ save r0 8206 bl dvmInitClass @ initialize class 8207 cmp r0, #0 @ check boolean result 8208 mov r0, r9 @ restore r0 8209 bne .LOP_NEW_INSTANCE_initialized @ success, continue 8210 b common_exceptionThrown @ failed, deal with init exception 8211 8212 /* 8213 * Resolution required. This is the least-likely path. 8214 * 8215 * r1 holds BBBB 8216 */ 8217.LOP_NEW_INSTANCE_resolve: 8218 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8219 mov r2, #0 @ r2<- false 8220 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8221 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8222 cmp r0, #0 @ got null? 8223 bne .LOP_NEW_INSTANCE_resolved @ no, continue 8224 b common_exceptionThrown @ yes, handle exception 8225 8226.LstrInstantiationErrorPtr: 8227 .word .LstrInstantiationError 8228 8229 8230/* continuation for OP_NEW_ARRAY */ 8231 8232 8233 /* 8234 * Resolve class. (This is an uncommon case.) 8235 * 8236 * r1 holds array length 8237 * r2 holds class ref CCCC 8238 */ 8239.LOP_NEW_ARRAY_resolve: 8240 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8241 mov r9, r1 @ r9<- length (save) 8242 mov r1, r2 @ r1<- CCCC 8243 mov r2, #0 @ r2<- false 8244 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8245 bl dvmResolveClass @ r0<- call(clazz, ref) 8246 cmp r0, #0 @ got null? 8247 mov r1, r9 @ r1<- length (restore) 8248 beq common_exceptionThrown @ yes, handle exception 8249 @ fall through to OP_NEW_ARRAY_finish 8250 8251 /* 8252 * Finish allocation. 8253 * 8254 * r0 holds class 8255 * r1 holds array length 8256 */ 8257.LOP_NEW_ARRAY_finish: 8258 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8259 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8260 cmp r0, #0 @ failed? 8261 mov r2, rINST, lsr #8 @ r2<- A+ 8262 beq common_exceptionThrown @ yes, handle the exception 8263 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8264 and r2, r2, #15 @ r2<- A 8265 GET_INST_OPCODE(ip) @ extract opcode from rINST 8266 SET_VREG(r0, r2) @ vA<- r0 8267 GOTO_OPCODE(ip) @ jump to next instruction 8268 8269 8270/* continuation for OP_FILLED_NEW_ARRAY */ 8271 8272 /* 8273 * On entry: 8274 * r0 holds array class 8275 * r10 holds AA or BA 8276 */ 8277.LOP_FILLED_NEW_ARRAY_continue: 8278 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8279 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8280 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8281 .if 0 8282 mov r1, r10 @ r1<- AA (length) 8283 .else 8284 mov r1, r10, lsr #4 @ r1<- B (length) 8285 .endif 8286 cmp r3, #'I' @ array of ints? 8287 cmpne r3, #'L' @ array of objects? 8288 cmpne r3, #'[' @ array of arrays? 8289 mov r9, r1 @ save length in r9 8290 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8291 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8292 cmp r0, #0 @ null return? 8293 beq common_exceptionThrown @ alloc failed, handle exception 8294 8295 FETCH(r1, 2) @ r1<- FEDC or CCCC 8296 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8297 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8298 subs r9, r9, #1 @ length--, check for neg 8299 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8300 bmi 2f @ was zero, bail 8301 8302 @ copy values from registers into the array 8303 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8304 .if 0 8305 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 83061: ldr r3, [r2], #4 @ r3<- *r2++ 8307 subs r9, r9, #1 @ count-- 8308 str r3, [r0], #4 @ *contents++ = vX 8309 bpl 1b 8310 @ continue at 2 8311 .else 8312 cmp r9, #4 @ length was initially 5? 8313 and r2, r10, #15 @ r2<- A 8314 bne 1f @ <= 4 args, branch 8315 GET_VREG(r3, r2) @ r3<- vA 8316 sub r9, r9, #1 @ count-- 8317 str r3, [r0, #16] @ contents[4] = vA 83181: and r2, r1, #15 @ r2<- F/E/D/C 8319 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8320 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8321 subs r9, r9, #1 @ count-- 8322 str r3, [r0], #4 @ *contents++ = vX 8323 bpl 1b 8324 @ continue at 2 8325 .endif 8326 83272: 8328 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8329 GOTO_OPCODE(ip) @ execute it 8330 8331 /* 8332 * Throw an exception indicating that we have not implemented this 8333 * mode of filled-new-array. 8334 */ 8335.LOP_FILLED_NEW_ARRAY_notimpl: 8336 ldr r0, .L_strInternalError 8337 ldr r1, .L_strFilledNewArrayNotImpl 8338 bl dvmThrowException 8339 b common_exceptionThrown 8340 8341 .if (!0) @ define in one or the other, not both 8342.L_strFilledNewArrayNotImpl: 8343 .word .LstrFilledNewArrayNotImpl 8344.L_strInternalError: 8345 .word .LstrInternalError 8346 .endif 8347 8348 8349/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8350 8351 /* 8352 * On entry: 8353 * r0 holds array class 8354 * r10 holds AA or BA 8355 */ 8356.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8357 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8358 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8359 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8360 .if 1 8361 mov r1, r10 @ r1<- AA (length) 8362 .else 8363 mov r1, r10, lsr #4 @ r1<- B (length) 8364 .endif 8365 cmp r3, #'I' @ array of ints? 8366 cmpne r3, #'L' @ array of objects? 8367 cmpne r3, #'[' @ array of arrays? 8368 mov r9, r1 @ save length in r9 8369 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8370 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8371 cmp r0, #0 @ null return? 8372 beq common_exceptionThrown @ alloc failed, handle exception 8373 8374 FETCH(r1, 2) @ r1<- FEDC or CCCC 8375 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8376 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8377 subs r9, r9, #1 @ length--, check for neg 8378 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8379 bmi 2f @ was zero, bail 8380 8381 @ copy values from registers into the array 8382 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8383 .if 1 8384 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 83851: ldr r3, [r2], #4 @ r3<- *r2++ 8386 subs r9, r9, #1 @ count-- 8387 str r3, [r0], #4 @ *contents++ = vX 8388 bpl 1b 8389 @ continue at 2 8390 .else 8391 cmp r9, #4 @ length was initially 5? 8392 and r2, r10, #15 @ r2<- A 8393 bne 1f @ <= 4 args, branch 8394 GET_VREG(r3, r2) @ r3<- vA 8395 sub r9, r9, #1 @ count-- 8396 str r3, [r0, #16] @ contents[4] = vA 83971: and r2, r1, #15 @ r2<- F/E/D/C 8398 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8399 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8400 subs r9, r9, #1 @ count-- 8401 str r3, [r0], #4 @ *contents++ = vX 8402 bpl 1b 8403 @ continue at 2 8404 .endif 8405 84062: 8407 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8408 GOTO_OPCODE(ip) @ execute it 8409 8410 /* 8411 * Throw an exception indicating that we have not implemented this 8412 * mode of filled-new-array. 8413 */ 8414.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8415 ldr r0, .L_strInternalError 8416 ldr r1, .L_strFilledNewArrayNotImpl 8417 bl dvmThrowException 8418 b common_exceptionThrown 8419 8420 .if (!1) @ define in one or the other, not both 8421.L_strFilledNewArrayNotImpl: 8422 .word .LstrFilledNewArrayNotImpl 8423.L_strInternalError: 8424 .word .LstrInternalError 8425 .endif 8426 8427 8428/* continuation for OP_CMPL_FLOAT */ 8429 8430 @ Test for NaN with a second comparison. EABI forbids testing bit 8431 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8432 @ make the library call. 8433.LOP_CMPL_FLOAT_gt_or_nan: 8434 mov r1, r9 @ reverse order 8435 mov r0, r10 8436 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8437 @bleq common_abort 8438 movcc r1, #1 @ (greater than) r1<- 1 8439 bcc .LOP_CMPL_FLOAT_finish 8440 mvn r1, #0 @ r1<- 1 or -1 for NaN 8441 b .LOP_CMPL_FLOAT_finish 8442 8443 8444#if 0 /* "clasic" form */ 8445 FETCH(r0, 1) @ r0<- CCBB 8446 and r2, r0, #255 @ r2<- BB 8447 mov r3, r0, lsr #8 @ r3<- CC 8448 GET_VREG(r9, r2) @ r9<- vBB 8449 GET_VREG(r10, r3) @ r10<- vCC 8450 mov r0, r9 @ r0<- vBB 8451 mov r1, r10 @ r1<- vCC 8452 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8453 cmp r0, #0 @ equal? 8454 movne r1, #0 @ yes, result is 0 8455 bne OP_CMPL_FLOAT_finish 8456 mov r0, r9 @ r0<- vBB 8457 mov r1, r10 @ r1<- vCC 8458 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8459 cmp r0, #0 @ less than? 8460 b OP_CMPL_FLOAT_continue 8461@%break 8462 8463OP_CMPL_FLOAT_continue: 8464 mvnne r1, #0 @ yes, result is -1 8465 bne OP_CMPL_FLOAT_finish 8466 mov r0, r9 @ r0<- vBB 8467 mov r1, r10 @ r1<- vCC 8468 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8469 cmp r0, #0 @ greater than? 8470 beq OP_CMPL_FLOAT_nan @ no, must be NaN 8471 mov r1, #1 @ yes, result is 1 8472 @ fall through to _finish 8473 8474OP_CMPL_FLOAT_finish: 8475 mov r3, rINST, lsr #8 @ r3<- AA 8476 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8477 SET_VREG(r1, r3) @ vAA<- r1 8478 GET_INST_OPCODE(ip) @ extract opcode from rINST 8479 GOTO_OPCODE(ip) @ jump to next instruction 8480 8481 /* 8482 * This is expected to be uncommon, so we double-branch (once to here, 8483 * again back to _finish). 8484 */ 8485OP_CMPL_FLOAT_nan: 8486 mvn r1, #0 @ r1<- 1 or -1 for NaN 8487 b OP_CMPL_FLOAT_finish 8488 8489#endif 8490 8491 8492/* continuation for OP_CMPG_FLOAT */ 8493 8494 @ Test for NaN with a second comparison. EABI forbids testing bit 8495 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8496 @ make the library call. 8497.LOP_CMPG_FLOAT_gt_or_nan: 8498 mov r1, r9 @ reverse order 8499 mov r0, r10 8500 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8501 @bleq common_abort 8502 movcc r1, #1 @ (greater than) r1<- 1 8503 bcc .LOP_CMPG_FLOAT_finish 8504 mov r1, #1 @ r1<- 1 or -1 for NaN 8505 b .LOP_CMPG_FLOAT_finish 8506 8507 8508#if 0 /* "clasic" form */ 8509 FETCH(r0, 1) @ r0<- CCBB 8510 and r2, r0, #255 @ r2<- BB 8511 mov r3, r0, lsr #8 @ r3<- CC 8512 GET_VREG(r9, r2) @ r9<- vBB 8513 GET_VREG(r10, r3) @ r10<- vCC 8514 mov r0, r9 @ r0<- vBB 8515 mov r1, r10 @ r1<- vCC 8516 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8517 cmp r0, #0 @ equal? 8518 movne r1, #0 @ yes, result is 0 8519 bne OP_CMPG_FLOAT_finish 8520 mov r0, r9 @ r0<- vBB 8521 mov r1, r10 @ r1<- vCC 8522 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8523 cmp r0, #0 @ less than? 8524 b OP_CMPG_FLOAT_continue 8525@%break 8526 8527OP_CMPG_FLOAT_continue: 8528 mvnne r1, #0 @ yes, result is -1 8529 bne OP_CMPG_FLOAT_finish 8530 mov r0, r9 @ r0<- vBB 8531 mov r1, r10 @ r1<- vCC 8532 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8533 cmp r0, #0 @ greater than? 8534 beq OP_CMPG_FLOAT_nan @ no, must be NaN 8535 mov r1, #1 @ yes, result is 1 8536 @ fall through to _finish 8537 8538OP_CMPG_FLOAT_finish: 8539 mov r3, rINST, lsr #8 @ r3<- AA 8540 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8541 SET_VREG(r1, r3) @ vAA<- r1 8542 GET_INST_OPCODE(ip) @ extract opcode from rINST 8543 GOTO_OPCODE(ip) @ jump to next instruction 8544 8545 /* 8546 * This is expected to be uncommon, so we double-branch (once to here, 8547 * again back to _finish). 8548 */ 8549OP_CMPG_FLOAT_nan: 8550 mov r1, #1 @ r1<- 1 or -1 for NaN 8551 b OP_CMPG_FLOAT_finish 8552 8553#endif 8554 8555 8556/* continuation for OP_CMPL_DOUBLE */ 8557 8558 @ Test for NaN with a second comparison. EABI forbids testing bit 8559 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8560 @ make the library call. 8561.LOP_CMPL_DOUBLE_gt_or_nan: 8562 ldmia r10, {r0-r1} @ reverse order 8563 ldmia r9, {r2-r3} 8564 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8565 @bleq common_abort 8566 movcc r1, #1 @ (greater than) r1<- 1 8567 bcc .LOP_CMPL_DOUBLE_finish 8568 mvn r1, #0 @ r1<- 1 or -1 for NaN 8569 b .LOP_CMPL_DOUBLE_finish 8570 8571 8572/* continuation for OP_CMPG_DOUBLE */ 8573 8574 @ Test for NaN with a second comparison. EABI forbids testing bit 8575 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8576 @ make the library call. 8577.LOP_CMPG_DOUBLE_gt_or_nan: 8578 ldmia r10, {r0-r1} @ reverse order 8579 ldmia r9, {r2-r3} 8580 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8581 @bleq common_abort 8582 movcc r1, #1 @ (greater than) r1<- 1 8583 bcc .LOP_CMPG_DOUBLE_finish 8584 mov r1, #1 @ r1<- 1 or -1 for NaN 8585 b .LOP_CMPG_DOUBLE_finish 8586 8587 8588/* continuation for OP_CMP_LONG */ 8589 8590.LOP_CMP_LONG_less: 8591 mvn r1, #0 @ r1<- -1 8592 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8593 @ instead, we just replicate the tail end. 8594 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8595 SET_VREG(r1, r9) @ vAA<- r1 8596 GET_INST_OPCODE(ip) @ extract opcode from rINST 8597 GOTO_OPCODE(ip) @ jump to next instruction 8598 8599.LOP_CMP_LONG_greater: 8600 mov r1, #1 @ r1<- 1 8601 @ fall through to _finish 8602 8603.LOP_CMP_LONG_finish: 8604 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8605 SET_VREG(r1, r9) @ vAA<- r1 8606 GET_INST_OPCODE(ip) @ extract opcode from rINST 8607 GOTO_OPCODE(ip) @ jump to next instruction 8608 8609 8610/* continuation for OP_AGET_WIDE */ 8611 8612.LOP_AGET_WIDE_finish: 8613 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8614 add r0, r0, #offArrayObject_contents 8615 ldmia r0, {r2-r3} @ r2/r3 <- vBB[vCC] 8616 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8617 GET_INST_OPCODE(ip) @ extract opcode from rINST 8618 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8619 GOTO_OPCODE(ip) @ jump to next instruction 8620 8621 8622/* continuation for OP_APUT_WIDE */ 8623 8624.LOP_APUT_WIDE_finish: 8625 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8626 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8627 GET_INST_OPCODE(ip) @ extract opcode from rINST 8628 add r0, #offArrayObject_contents 8629 stmia r0, {r2-r3} @ vBB[vCC] <- r2/r3 8630 GOTO_OPCODE(ip) @ jump to next instruction 8631 8632 8633/* continuation for OP_APUT_OBJECT */ 8634 /* 8635 * On entry: 8636 * r1 = vBB (arrayObj) 8637 * r9 = vAA (obj) 8638 * r10 = offset into array (vBB + vCC * width) 8639 */ 8640.LOP_APUT_OBJECT_finish: 8641 cmp r9, #0 @ storing null reference? 8642 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8643 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8644 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8645 bl dvmCanPutArrayElement @ test object type vs. array type 8646 cmp r0, #0 @ okay? 8647 beq common_errArrayStore @ no 8648.LOP_APUT_OBJECT_skip_check: 8649 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8650 GET_INST_OPCODE(ip) @ extract opcode from rINST 8651 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8652 GOTO_OPCODE(ip) @ jump to next instruction 8653 8654 8655/* continuation for OP_IGET */ 8656 8657 /* 8658 * Currently: 8659 * r0 holds resolved field 8660 * r9 holds object 8661 */ 8662.LOP_IGET_finish: 8663 @bl common_squeak0 8664 cmp r9, #0 @ check object for null 8665 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8666 beq common_errNullObject @ object was null 8667 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8668 mov r2, rINST, lsr #8 @ r2<- A+ 8669 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8670 and r2, r2, #15 @ r2<- A 8671 GET_INST_OPCODE(ip) @ extract opcode from rINST 8672 SET_VREG(r0, r2) @ fp[A]<- r0 8673 GOTO_OPCODE(ip) @ jump to next instruction 8674 8675 8676/* continuation for OP_IGET_WIDE */ 8677 8678 /* 8679 * Currently: 8680 * r0 holds resolved field 8681 * r9 holds object 8682 */ 8683.LOP_IGET_WIDE_finish: 8684 cmp r9, #0 @ check object for null 8685 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8686 beq common_errNullObject @ object was null 8687 mov r2, rINST, lsr #8 @ r2<- A+ 8688 add r9, r9, r3 @ r9<- obj + field offset 8689 ldmia r9, {r0-r1} @ r0/r1<- obj.field (64-bit align ok) 8690 and r2, r2, #15 @ r2<- A 8691 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8692 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8693 GET_INST_OPCODE(ip) @ extract opcode from rINST 8694 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8695 GOTO_OPCODE(ip) @ jump to next instruction 8696 8697 8698/* continuation for OP_IGET_OBJECT */ 8699 8700 /* 8701 * Currently: 8702 * r0 holds resolved field 8703 * r9 holds object 8704 */ 8705.LOP_IGET_OBJECT_finish: 8706 @bl common_squeak0 8707 cmp r9, #0 @ check object for null 8708 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8709 beq common_errNullObject @ object was null 8710 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8711 mov r2, rINST, lsr #8 @ r2<- A+ 8712 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8713 and r2, r2, #15 @ r2<- A 8714 GET_INST_OPCODE(ip) @ extract opcode from rINST 8715 SET_VREG(r0, r2) @ fp[A]<- r0 8716 GOTO_OPCODE(ip) @ jump to next instruction 8717 8718 8719/* continuation for OP_IGET_BOOLEAN */ 8720 8721 /* 8722 * Currently: 8723 * r0 holds resolved field 8724 * r9 holds object 8725 */ 8726.LOP_IGET_BOOLEAN_finish: 8727 @bl common_squeak1 8728 cmp r9, #0 @ check object for null 8729 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8730 beq common_errNullObject @ object was null 8731 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8732 mov r2, rINST, lsr #8 @ r2<- A+ 8733 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8734 and r2, r2, #15 @ r2<- A 8735 GET_INST_OPCODE(ip) @ extract opcode from rINST 8736 SET_VREG(r0, r2) @ fp[A]<- r0 8737 GOTO_OPCODE(ip) @ jump to next instruction 8738 8739 8740/* continuation for OP_IGET_BYTE */ 8741 8742 /* 8743 * Currently: 8744 * r0 holds resolved field 8745 * r9 holds object 8746 */ 8747.LOP_IGET_BYTE_finish: 8748 @bl common_squeak2 8749 cmp r9, #0 @ check object for null 8750 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8751 beq common_errNullObject @ object was null 8752 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8753 mov r2, rINST, lsr #8 @ r2<- A+ 8754 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8755 and r2, r2, #15 @ r2<- A 8756 GET_INST_OPCODE(ip) @ extract opcode from rINST 8757 SET_VREG(r0, r2) @ fp[A]<- r0 8758 GOTO_OPCODE(ip) @ jump to next instruction 8759 8760 8761/* continuation for OP_IGET_CHAR */ 8762 8763 /* 8764 * Currently: 8765 * r0 holds resolved field 8766 * r9 holds object 8767 */ 8768.LOP_IGET_CHAR_finish: 8769 @bl common_squeak3 8770 cmp r9, #0 @ check object for null 8771 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8772 beq common_errNullObject @ object was null 8773 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8774 mov r2, rINST, lsr #8 @ r2<- A+ 8775 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8776 and r2, r2, #15 @ r2<- A 8777 GET_INST_OPCODE(ip) @ extract opcode from rINST 8778 SET_VREG(r0, r2) @ fp[A]<- r0 8779 GOTO_OPCODE(ip) @ jump to next instruction 8780 8781 8782/* continuation for OP_IGET_SHORT */ 8783 8784 /* 8785 * Currently: 8786 * r0 holds resolved field 8787 * r9 holds object 8788 */ 8789.LOP_IGET_SHORT_finish: 8790 @bl common_squeak4 8791 cmp r9, #0 @ check object for null 8792 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8793 beq common_errNullObject @ object was null 8794 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8795 mov r2, rINST, lsr #8 @ r2<- A+ 8796 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8797 and r2, r2, #15 @ r2<- A 8798 GET_INST_OPCODE(ip) @ extract opcode from rINST 8799 SET_VREG(r0, r2) @ fp[A]<- r0 8800 GOTO_OPCODE(ip) @ jump to next instruction 8801 8802 8803/* continuation for OP_IPUT */ 8804 8805 /* 8806 * Currently: 8807 * r0 holds resolved field 8808 * r9 holds object 8809 */ 8810.LOP_IPUT_finish: 8811 @bl common_squeak0 8812 mov r1, rINST, lsr #8 @ r1<- A+ 8813 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8814 and r1, r1, #15 @ r1<- A 8815 cmp r9, #0 @ check object for null 8816 GET_VREG(r0, r1) @ r0<- fp[A] 8817 beq common_errNullObject @ object was null 8818 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8819 GET_INST_OPCODE(ip) @ extract opcode from rINST 8820 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8821 GOTO_OPCODE(ip) @ jump to next instruction 8822 8823 8824/* continuation for OP_IPUT_WIDE */ 8825 8826 /* 8827 * Currently: 8828 * r0 holds resolved field 8829 * r9 holds object 8830 */ 8831.LOP_IPUT_WIDE_finish: 8832 mov r2, rINST, lsr #8 @ r2<- A+ 8833 cmp r9, #0 @ check object for null 8834 and r2, r2, #15 @ r2<- A 8835 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8836 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8837 beq common_errNullObject @ object was null 8838 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8839 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8840 GET_INST_OPCODE(ip) @ extract opcode from rINST 8841 add r9, r9, r3 @ r9<- object + byte offset 8842 stmia r9, {r0-r1} @ obj.field (64 bits, aligned)<- r0/r1 8843 GOTO_OPCODE(ip) @ jump to next instruction 8844 8845 8846/* continuation for OP_IPUT_OBJECT */ 8847 8848 /* 8849 * Currently: 8850 * r0 holds resolved field 8851 * r9 holds object 8852 */ 8853.LOP_IPUT_OBJECT_finish: 8854 @bl common_squeak0 8855 mov r1, rINST, lsr #8 @ r1<- A+ 8856 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8857 and r1, r1, #15 @ r1<- A 8858 cmp r9, #0 @ check object for null 8859 GET_VREG(r0, r1) @ r0<- fp[A] 8860 beq common_errNullObject @ object was null 8861 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8862 GET_INST_OPCODE(ip) @ extract opcode from rINST 8863 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8864 GOTO_OPCODE(ip) @ jump to next instruction 8865 8866 8867/* continuation for OP_IPUT_BOOLEAN */ 8868 8869 /* 8870 * Currently: 8871 * r0 holds resolved field 8872 * r9 holds object 8873 */ 8874.LOP_IPUT_BOOLEAN_finish: 8875 @bl common_squeak1 8876 mov r1, rINST, lsr #8 @ r1<- A+ 8877 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8878 and r1, r1, #15 @ r1<- A 8879 cmp r9, #0 @ check object for null 8880 GET_VREG(r0, r1) @ r0<- fp[A] 8881 beq common_errNullObject @ object was null 8882 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8883 GET_INST_OPCODE(ip) @ extract opcode from rINST 8884 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8885 GOTO_OPCODE(ip) @ jump to next instruction 8886 8887 8888/* continuation for OP_IPUT_BYTE */ 8889 8890 /* 8891 * Currently: 8892 * r0 holds resolved field 8893 * r9 holds object 8894 */ 8895.LOP_IPUT_BYTE_finish: 8896 @bl common_squeak2 8897 mov r1, rINST, lsr #8 @ r1<- A+ 8898 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8899 and r1, r1, #15 @ r1<- A 8900 cmp r9, #0 @ check object for null 8901 GET_VREG(r0, r1) @ r0<- fp[A] 8902 beq common_errNullObject @ object was null 8903 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8904 GET_INST_OPCODE(ip) @ extract opcode from rINST 8905 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8906 GOTO_OPCODE(ip) @ jump to next instruction 8907 8908 8909/* continuation for OP_IPUT_CHAR */ 8910 8911 /* 8912 * Currently: 8913 * r0 holds resolved field 8914 * r9 holds object 8915 */ 8916.LOP_IPUT_CHAR_finish: 8917 @bl common_squeak3 8918 mov r1, rINST, lsr #8 @ r1<- A+ 8919 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8920 and r1, r1, #15 @ r1<- A 8921 cmp r9, #0 @ check object for null 8922 GET_VREG(r0, r1) @ r0<- fp[A] 8923 beq common_errNullObject @ object was null 8924 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8925 GET_INST_OPCODE(ip) @ extract opcode from rINST 8926 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8927 GOTO_OPCODE(ip) @ jump to next instruction 8928 8929 8930/* continuation for OP_IPUT_SHORT */ 8931 8932 /* 8933 * Currently: 8934 * r0 holds resolved field 8935 * r9 holds object 8936 */ 8937.LOP_IPUT_SHORT_finish: 8938 @bl common_squeak4 8939 mov r1, rINST, lsr #8 @ r1<- A+ 8940 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8941 and r1, r1, #15 @ r1<- A 8942 cmp r9, #0 @ check object for null 8943 GET_VREG(r0, r1) @ r0<- fp[A] 8944 beq common_errNullObject @ object was null 8945 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8946 GET_INST_OPCODE(ip) @ extract opcode from rINST 8947 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8948 GOTO_OPCODE(ip) @ jump to next instruction 8949 8950 8951/* continuation for OP_SGET */ 8952 8953 /* 8954 * Continuation if the field has not yet been resolved. 8955 * r1: BBBB field ref 8956 */ 8957.LOP_SGET_resolve: 8958 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8959 EXPORT_PC() @ resolve() could throw, so export now 8960 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8961 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8962 cmp r0, #0 @ success? 8963 bne .LOP_SGET_finish @ yes, finish 8964 b common_exceptionThrown @ no, handle exception 8965 8966 8967/* continuation for OP_SGET_WIDE */ 8968 8969 /* 8970 * Continuation if the field has not yet been resolved. 8971 * r1: BBBB field ref 8972 */ 8973.LOP_SGET_WIDE_resolve: 8974 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8975 EXPORT_PC() @ resolve() could throw, so export now 8976 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8977 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8978 cmp r0, #0 @ success? 8979 bne .LOP_SGET_WIDE_finish @ yes, finish 8980 b common_exceptionThrown @ no, handle exception 8981 8982 8983/* continuation for OP_SGET_OBJECT */ 8984 8985 /* 8986 * Continuation if the field has not yet been resolved. 8987 * r1: BBBB field ref 8988 */ 8989.LOP_SGET_OBJECT_resolve: 8990 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8991 EXPORT_PC() @ resolve() could throw, so export now 8992 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8993 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8994 cmp r0, #0 @ success? 8995 bne .LOP_SGET_OBJECT_finish @ yes, finish 8996 b common_exceptionThrown @ no, handle exception 8997 8998 8999/* continuation for OP_SGET_BOOLEAN */ 9000 9001 /* 9002 * Continuation if the field has not yet been resolved. 9003 * r1: BBBB field ref 9004 */ 9005.LOP_SGET_BOOLEAN_resolve: 9006 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9007 EXPORT_PC() @ resolve() could throw, so export now 9008 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9009 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9010 cmp r0, #0 @ success? 9011 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 9012 b common_exceptionThrown @ no, handle exception 9013 9014 9015/* continuation for OP_SGET_BYTE */ 9016 9017 /* 9018 * Continuation if the field has not yet been resolved. 9019 * r1: BBBB field ref 9020 */ 9021.LOP_SGET_BYTE_resolve: 9022 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9023 EXPORT_PC() @ resolve() could throw, so export now 9024 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9025 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9026 cmp r0, #0 @ success? 9027 bne .LOP_SGET_BYTE_finish @ yes, finish 9028 b common_exceptionThrown @ no, handle exception 9029 9030 9031/* continuation for OP_SGET_CHAR */ 9032 9033 /* 9034 * Continuation if the field has not yet been resolved. 9035 * r1: BBBB field ref 9036 */ 9037.LOP_SGET_CHAR_resolve: 9038 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9039 EXPORT_PC() @ resolve() could throw, so export now 9040 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9041 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9042 cmp r0, #0 @ success? 9043 bne .LOP_SGET_CHAR_finish @ yes, finish 9044 b common_exceptionThrown @ no, handle exception 9045 9046 9047/* continuation for OP_SGET_SHORT */ 9048 9049 /* 9050 * Continuation if the field has not yet been resolved. 9051 * r1: BBBB field ref 9052 */ 9053.LOP_SGET_SHORT_resolve: 9054 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9055 EXPORT_PC() @ resolve() could throw, so export now 9056 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9057 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9058 cmp r0, #0 @ success? 9059 bne .LOP_SGET_SHORT_finish @ yes, finish 9060 b common_exceptionThrown @ no, handle exception 9061 9062 9063/* continuation for OP_SPUT */ 9064 9065 /* 9066 * Continuation if the field has not yet been resolved. 9067 * r1: BBBB field ref 9068 */ 9069.LOP_SPUT_resolve: 9070 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9071 EXPORT_PC() @ resolve() could throw, so export now 9072 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9073 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9074 cmp r0, #0 @ success? 9075 bne .LOP_SPUT_finish @ yes, finish 9076 b common_exceptionThrown @ no, handle exception 9077 9078 9079/* continuation for OP_SPUT_WIDE */ 9080 9081 /* 9082 * Continuation if the field has not yet been resolved. 9083 * r1: BBBB field ref 9084 * r9: &fp[AA] 9085 */ 9086.LOP_SPUT_WIDE_resolve: 9087 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9088 EXPORT_PC() @ resolve() could throw, so export now 9089 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9090 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9091 cmp r0, #0 @ success? 9092 bne .LOP_SPUT_WIDE_finish @ yes, finish 9093 b common_exceptionThrown @ no, handle exception 9094 9095 9096/* continuation for OP_SPUT_OBJECT */ 9097 9098 /* 9099 * Continuation if the field has not yet been resolved. 9100 * r1: BBBB field ref 9101 */ 9102.LOP_SPUT_OBJECT_resolve: 9103 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9104 EXPORT_PC() @ resolve() could throw, so export now 9105 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9106 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9107 cmp r0, #0 @ success? 9108 bne .LOP_SPUT_OBJECT_finish @ yes, finish 9109 b common_exceptionThrown @ no, handle exception 9110 9111 9112/* continuation for OP_SPUT_BOOLEAN */ 9113 9114 /* 9115 * Continuation if the field has not yet been resolved. 9116 * r1: BBBB field ref 9117 */ 9118.LOP_SPUT_BOOLEAN_resolve: 9119 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9120 EXPORT_PC() @ resolve() could throw, so export now 9121 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9122 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9123 cmp r0, #0 @ success? 9124 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 9125 b common_exceptionThrown @ no, handle exception 9126 9127 9128/* continuation for OP_SPUT_BYTE */ 9129 9130 /* 9131 * Continuation if the field has not yet been resolved. 9132 * r1: BBBB field ref 9133 */ 9134.LOP_SPUT_BYTE_resolve: 9135 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9136 EXPORT_PC() @ resolve() could throw, so export now 9137 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9138 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9139 cmp r0, #0 @ success? 9140 bne .LOP_SPUT_BYTE_finish @ yes, finish 9141 b common_exceptionThrown @ no, handle exception 9142 9143 9144/* continuation for OP_SPUT_CHAR */ 9145 9146 /* 9147 * Continuation if the field has not yet been resolved. 9148 * r1: BBBB field ref 9149 */ 9150.LOP_SPUT_CHAR_resolve: 9151 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9152 EXPORT_PC() @ resolve() could throw, so export now 9153 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9154 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9155 cmp r0, #0 @ success? 9156 bne .LOP_SPUT_CHAR_finish @ yes, finish 9157 b common_exceptionThrown @ no, handle exception 9158 9159 9160/* continuation for OP_SPUT_SHORT */ 9161 9162 /* 9163 * Continuation if the field has not yet been resolved. 9164 * r1: BBBB field ref 9165 */ 9166.LOP_SPUT_SHORT_resolve: 9167 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9168 EXPORT_PC() @ resolve() could throw, so export now 9169 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9170 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9171 cmp r0, #0 @ success? 9172 bne .LOP_SPUT_SHORT_finish @ yes, finish 9173 b common_exceptionThrown @ no, handle exception 9174 9175 9176/* continuation for OP_INVOKE_VIRTUAL */ 9177 9178 /* 9179 * At this point: 9180 * r0 = resolved base method 9181 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9182 */ 9183.LOP_INVOKE_VIRTUAL_continue: 9184 GET_VREG(r1, r10) @ r1<- "this" ptr 9185 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9186 cmp r1, #0 @ is "this" null? 9187 beq common_errNullObject @ null "this", throw exception 9188 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9189 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9190 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9191 bl common_invokeMethodNoRange @ continue on 9192 9193 9194/* continuation for OP_INVOKE_SUPER */ 9195 9196 /* 9197 * At this point: 9198 * r0 = resolved base method 9199 * r9 = method->clazz 9200 */ 9201.LOP_INVOKE_SUPER_continue: 9202 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9203 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9204 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9205 EXPORT_PC() @ must export for invoke 9206 cmp r2, r3 @ compare (methodIndex, vtableCount) 9207 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 9208 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9209 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9210 bl common_invokeMethodNoRange @ continue on 9211 9212.LOP_INVOKE_SUPER_resolve: 9213 mov r0, r9 @ r0<- method->clazz 9214 mov r2, #METHOD_VIRTUAL @ resolver method type 9215 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9216 cmp r0, #0 @ got null? 9217 bne .LOP_INVOKE_SUPER_continue @ no, continue 9218 b common_exceptionThrown @ yes, handle exception 9219 9220 /* 9221 * Throw a NoSuchMethodError with the method name as the message. 9222 * r0 = resolved base method 9223 */ 9224.LOP_INVOKE_SUPER_nsm: 9225 ldr r1, [r0, #offMethod_name] @ r1<- method name 9226 b common_errNoSuchMethod 9227 9228 9229/* continuation for OP_INVOKE_DIRECT */ 9230 9231 /* 9232 * On entry: 9233 * r1 = reference (BBBB or CCCC) 9234 * r10 = "this" register 9235 */ 9236.LOP_INVOKE_DIRECT_resolve: 9237 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9238 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9239 mov r2, #METHOD_DIRECT @ resolver method type 9240 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9241 cmp r0, #0 @ got null? 9242 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9243 bne .LOP_INVOKE_DIRECT_finish @ no, continue 9244 b common_exceptionThrown @ yes, handle exception 9245 9246 9247/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 9248 9249 /* 9250 * At this point: 9251 * r0 = resolved base method 9252 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9253 */ 9254.LOP_INVOKE_VIRTUAL_RANGE_continue: 9255 GET_VREG(r1, r10) @ r1<- "this" ptr 9256 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9257 cmp r1, #0 @ is "this" null? 9258 beq common_errNullObject @ null "this", throw exception 9259 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9260 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9261 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9262 bl common_invokeMethodRange @ continue on 9263 9264 9265/* continuation for OP_INVOKE_SUPER_RANGE */ 9266 9267 /* 9268 * At this point: 9269 * r0 = resolved base method 9270 * r9 = method->clazz 9271 */ 9272.LOP_INVOKE_SUPER_RANGE_continue: 9273 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9274 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9275 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9276 EXPORT_PC() @ must export for invoke 9277 cmp r2, r3 @ compare (methodIndex, vtableCount) 9278 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 9279 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9280 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9281 bl common_invokeMethodRange @ continue on 9282 9283.LOP_INVOKE_SUPER_RANGE_resolve: 9284 mov r0, r9 @ r0<- method->clazz 9285 mov r2, #METHOD_VIRTUAL @ resolver method type 9286 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9287 cmp r0, #0 @ got null? 9288 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 9289 b common_exceptionThrown @ yes, handle exception 9290 9291 /* 9292 * Throw a NoSuchMethodError with the method name as the message. 9293 * r0 = resolved base method 9294 */ 9295.LOP_INVOKE_SUPER_RANGE_nsm: 9296 ldr r1, [r0, #offMethod_name] @ r1<- method name 9297 b common_errNoSuchMethod 9298 9299 9300/* continuation for OP_INVOKE_DIRECT_RANGE */ 9301 9302 /* 9303 * On entry: 9304 * r1 = reference (BBBB or CCCC) 9305 * r10 = "this" register 9306 */ 9307.LOP_INVOKE_DIRECT_RANGE_resolve: 9308 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9309 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9310 mov r2, #METHOD_DIRECT @ resolver method type 9311 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9312 cmp r0, #0 @ got null? 9313 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9314 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 9315 b common_exceptionThrown @ yes, handle exception 9316 9317 9318/* continuation for OP_FLOAT_TO_LONG */ 9319/* 9320 * Convert the float in r0 to a long in r0/r1. 9321 * 9322 * We have to clip values to long min/max per the specification. The 9323 * expected common case is a "reasonable" value that converts directly 9324 * to modest integer. The EABI convert function isn't doing this for us. 9325 */ 9326f2l_doconv: 9327 stmfd sp!, {r4, lr} 9328 mov r1, #0x5f000000 @ (float)maxlong 9329 mov r4, r0 9330 bl __aeabi_fcmpge @ is arg >= maxlong? 9331 cmp r0, #0 @ nonzero == yes 9332 mvnne r0, #0 @ return maxlong (7fffffff) 9333 mvnne r1, #0x80000000 9334 ldmnefd sp!, {r4, pc} 9335 9336 mov r0, r4 @ recover arg 9337 mov r1, #0xdf000000 @ (float)minlong 9338 bl __aeabi_fcmple @ is arg <= minlong? 9339 cmp r0, #0 @ nonzero == yes 9340 movne r0, #0 @ return minlong (80000000) 9341 movne r1, #0x80000000 9342 ldmnefd sp!, {r4, pc} 9343 9344 mov r0, r4 @ recover arg 9345 mov r1, r4 9346 bl __aeabi_fcmpeq @ is arg == self? 9347 cmp r0, #0 @ zero == no 9348 moveq r1, #0 @ return zero for NaN 9349 ldmeqfd sp!, {r4, pc} 9350 9351 mov r0, r4 @ recover arg 9352 bl __aeabi_f2lz @ convert float to long 9353 ldmfd sp!, {r4, pc} 9354 9355 9356/* continuation for OP_DOUBLE_TO_LONG */ 9357/* 9358 * Convert the double in r0/r1 to a long in r0/r1. 9359 * 9360 * We have to clip values to long min/max per the specification. The 9361 * expected common case is a "reasonable" value that converts directly 9362 * to modest integer. The EABI convert function isn't doing this for us. 9363 */ 9364d2l_doconv: 9365 stmfd sp!, {r4, r5, lr} @ save regs 9366 mov r3, #0x43000000 @ maxlong, as a double (high word) 9367 add r3, #0x00e00000 @ 0x43e00000 9368 mov r2, #0 @ maxlong, as a double (low word) 9369 sub sp, sp, #4 @ align for EABI 9370 mov r4, r0 @ save a copy of r0 9371 mov r5, r1 @ and r1 9372 bl __aeabi_dcmpge @ is arg >= maxlong? 9373 cmp r0, #0 @ nonzero == yes 9374 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9375 mvnne r1, #0x80000000 9376 bne 1f 9377 9378 mov r0, r4 @ recover arg 9379 mov r1, r5 9380 mov r3, #0xc3000000 @ minlong, as a double (high word) 9381 add r3, #0x00e00000 @ 0xc3e00000 9382 mov r2, #0 @ minlong, as a double (low word) 9383 bl __aeabi_dcmple @ is arg <= minlong? 9384 cmp r0, #0 @ nonzero == yes 9385 movne r0, #0 @ return minlong (8000000000000000) 9386 movne r1, #0x80000000 9387 bne 1f 9388 9389 mov r0, r4 @ recover arg 9390 mov r1, r5 9391 mov r2, r4 @ compare against self 9392 mov r3, r5 9393 bl __aeabi_dcmpeq @ is arg == self? 9394 cmp r0, #0 @ zero == no 9395 moveq r1, #0 @ return zero for NaN 9396 beq 1f 9397 9398 mov r0, r4 @ recover arg 9399 mov r1, r5 9400 bl __aeabi_d2lz @ convert double to long 9401 94021: 9403 add sp, sp, #4 9404 ldmfd sp!, {r4, r5, pc} 9405 9406 9407/* continuation for OP_MUL_LONG */ 9408 9409.LOP_MUL_LONG_finish: 9410 GET_INST_OPCODE(ip) @ extract opcode from rINST 9411 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9412 GOTO_OPCODE(ip) @ jump to next instruction 9413 9414 9415/* continuation for OP_SHL_LONG */ 9416 9417.LOP_SHL_LONG_finish: 9418 mov r0, r0, asl r2 @ r0<- r0 << r2 9419 GET_INST_OPCODE(ip) @ extract opcode from rINST 9420 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9421 GOTO_OPCODE(ip) @ jump to next instruction 9422 9423 9424/* continuation for OP_SHR_LONG */ 9425 9426.LOP_SHR_LONG_finish: 9427 mov r1, r1, asr r2 @ r1<- r1 >> r2 9428 GET_INST_OPCODE(ip) @ extract opcode from rINST 9429 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9430 GOTO_OPCODE(ip) @ jump to next instruction 9431 9432 9433/* continuation for OP_USHR_LONG */ 9434 9435.LOP_USHR_LONG_finish: 9436 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9437 GET_INST_OPCODE(ip) @ extract opcode from rINST 9438 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9439 GOTO_OPCODE(ip) @ jump to next instruction 9440 9441 9442/* continuation for OP_SHL_LONG_2ADDR */ 9443 9444.LOP_SHL_LONG_2ADDR_finish: 9445 GET_INST_OPCODE(ip) @ extract opcode from rINST 9446 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9447 GOTO_OPCODE(ip) @ jump to next instruction 9448 9449 9450/* continuation for OP_SHR_LONG_2ADDR */ 9451 9452.LOP_SHR_LONG_2ADDR_finish: 9453 GET_INST_OPCODE(ip) @ extract opcode from rINST 9454 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9455 GOTO_OPCODE(ip) @ jump to next instruction 9456 9457 9458/* continuation for OP_USHR_LONG_2ADDR */ 9459 9460.LOP_USHR_LONG_2ADDR_finish: 9461 GET_INST_OPCODE(ip) @ extract opcode from rINST 9462 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9463 GOTO_OPCODE(ip) @ jump to next instruction 9464 9465 9466/* continuation for OP_EXECUTE_INLINE */ 9467 9468 /* 9469 * Extract args, call function. 9470 * r0 = #of args (0-4) 9471 * r10 = call index 9472 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9473 * 9474 * Other ideas: 9475 * - Use a jump table from the main piece to jump directly into the 9476 * AND/LDR pairs. Costs a data load, saves a branch. 9477 * - Have five separate pieces that do the loading, so we can work the 9478 * interleave a little better. Increases code size. 9479 */ 9480.LOP_EXECUTE_INLINE_continue: 9481 rsb r0, r0, #4 @ r0<- 4-r0 9482 FETCH(r9, 2) @ r9<- FEDC 9483 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9484 bl common_abort @ (skipped due to ARM prefetch) 94854: and ip, r9, #0xf000 @ isolate F 9486 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 94873: and ip, r9, #0x0f00 @ isolate E 9488 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 94892: and ip, r9, #0x00f0 @ isolate D 9490 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 94911: and ip, r9, #0x000f @ isolate C 9492 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 94930: 9494 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9495 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9496 @ (not reached) 9497 9498.LOP_EXECUTE_INLINE_table: 9499 .word gDvmInlineOpsTable 9500 9501 9502/* continuation for OP_EXECUTE_INLINE_RANGE */ 9503 9504 /* 9505 * Extract args, call function. 9506 * r0 = #of args (0-4) 9507 * r10 = call index 9508 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9509 */ 9510.LOP_EXECUTE_INLINE_RANGE_continue: 9511 rsb r0, r0, #4 @ r0<- 4-r0 9512 FETCH(r9, 2) @ r9<- CCCC 9513 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9514 bl common_abort @ (skipped due to ARM prefetch) 95154: add ip, r9, #3 @ base+3 9516 GET_VREG(r3, ip) @ r3<- vBase[3] 95173: add ip, r9, #2 @ base+2 9518 GET_VREG(r2, ip) @ r2<- vBase[2] 95192: add ip, r9, #1 @ base+1 9520 GET_VREG(r1, ip) @ r1<- vBase[1] 95211: add ip, r9, #0 @ (nop) 9522 GET_VREG(r0, ip) @ r0<- vBase[0] 95230: 9524 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9525 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9526 @ (not reached) 9527 9528.LOP_EXECUTE_INLINE_RANGE_table: 9529 .word gDvmInlineOpsTable 9530 9531 9532 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9533 .global dvmAsmSisterEnd 9534dvmAsmSisterEnd: 9535 9536/* File: armv5te/footer.S */ 9537 9538/* 9539 * =========================================================================== 9540 * Common subroutines and data 9541 * =========================================================================== 9542 */ 9543 9544 9545 9546 .text 9547 .align 2 9548 9549#if defined(WITH_JIT) 9550#if defined(WITH_SELF_VERIFICATION) 9551 .global dvmJitToInterpPunt 9552dvmJitToInterpPunt: 9553 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9554 mov r2,#kSVSPunt @ r2<- interpreter entry point 9555 mov r3, #0 9556 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9557 b jitSVShadowRunEnd @ doesn't return 9558 9559 .global dvmJitToInterpSingleStep 9560dvmJitToInterpSingleStep: 9561 str lr,[rGLUE,#offGlue_jitResumeNPC] 9562 str r1,[rGLUE,#offGlue_jitResumeDPC] 9563 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9564 b jitSVShadowRunEnd @ doesn't return 9565 9566 .global dvmJitToInterpTraceSelectNoChain 9567dvmJitToInterpTraceSelectNoChain: 9568 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9569 mov r0,rPC @ pass our target PC 9570 mov r2,#kSVSTraceSelectNoChain @ r2<- interpreter entry point 9571 mov r3, #0 9572 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9573 b jitSVShadowRunEnd @ doesn't return 9574 9575 .global dvmJitToInterpTraceSelect 9576dvmJitToInterpTraceSelect: 9577 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9578 ldr r0,[lr, #-1] @ pass our target PC 9579 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9580 mov r3, #0 9581 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9582 b jitSVShadowRunEnd @ doesn't return 9583 9584 .global dvmJitToInterpBackwardBranch 9585dvmJitToInterpBackwardBranch: 9586 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9587 ldr r0,[lr, #-1] @ pass our target PC 9588 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9589 mov r3, #0 9590 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9591 b jitSVShadowRunEnd @ doesn't return 9592 9593 .global dvmJitToInterpNormal 9594dvmJitToInterpNormal: 9595 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9596 ldr r0,[lr, #-1] @ pass our target PC 9597 mov r2,#kSVSNormal @ r2<- interpreter entry point 9598 mov r3, #0 9599 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9600 b jitSVShadowRunEnd @ doesn't return 9601 9602 .global dvmJitToInterpNoChain 9603dvmJitToInterpNoChain: 9604 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9605 mov r0,rPC @ pass our target PC 9606 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9607 mov r3, #0 9608 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9609 b jitSVShadowRunEnd @ doesn't return 9610#else 9611/* 9612 * Return from the translation cache to the interpreter when the compiler is 9613 * having issues translating/executing a Dalvik instruction. We have to skip 9614 * the code cache lookup otherwise it is possible to indefinitely bouce 9615 * between the interpreter and the code cache if the instruction that fails 9616 * to be compiled happens to be at a trace start. 9617 */ 9618 .global dvmJitToInterpPunt 9619dvmJitToInterpPunt: 9620 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9621 mov rPC, r0 9622#ifdef JIT_STATS 9623 mov r0,lr 9624 bl dvmBumpPunt; 9625#endif 9626 EXPORT_PC() 9627 mov r0, #0 9628 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9629 adrl rIBASE, dvmAsmInstructionStart 9630 FETCH_INST() 9631 GET_INST_OPCODE(ip) 9632 GOTO_OPCODE(ip) 9633 9634/* 9635 * Return to the interpreter to handle a single instruction. 9636 * On entry: 9637 * r0 <= PC 9638 * r1 <= PC of resume instruction 9639 * lr <= resume point in translation 9640 */ 9641 .global dvmJitToInterpSingleStep 9642dvmJitToInterpSingleStep: 9643 str lr,[rGLUE,#offGlue_jitResumeNPC] 9644 str r1,[rGLUE,#offGlue_jitResumeDPC] 9645 mov r1,#kInterpEntryInstr 9646 @ enum is 4 byte in aapcs-EABI 9647 str r1, [rGLUE, #offGlue_entryPoint] 9648 mov rPC,r0 9649 EXPORT_PC() 9650 9651 adrl rIBASE, dvmAsmInstructionStart 9652 mov r2,#kJitSingleStep @ Ask for single step and then revert 9653 str r2,[rGLUE,#offGlue_jitState] 9654 mov r1,#1 @ set changeInterp to bail to debug interp 9655 b common_gotoBail 9656 9657/* 9658 * Return from the translation cache and immediately request 9659 * a translation for the exit target. Commonly used for callees. 9660 */ 9661 .global dvmJitToInterpTraceSelectNoChain 9662dvmJitToInterpTraceSelectNoChain: 9663#ifdef JIT_STATS 9664 bl dvmBumpNoChain 9665#endif 9666 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9667 mov r0,rPC 9668 bl dvmJitGetCodeAddr @ Is there a translation? 9669 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9670 mov r1, rPC @ arg1 of translation may need this 9671 mov lr, #0 @ in case target is HANDLER_INTERPRET 9672 cmp r0,#0 9673 bxne r0 @ continue native execution if so 9674 b 2f 9675 9676/* 9677 * Return from the translation cache and immediately request 9678 * a translation for the exit target. Commonly used following 9679 * invokes. 9680 */ 9681 .global dvmJitToInterpTraceSelect 9682dvmJitToInterpTraceSelect: 9683 ldr rPC,[lr, #-1] @ get our target PC 9684 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9685 add rINST,lr,#-5 @ save start of chain branch 9686 mov r0,rPC 9687 bl dvmJitGetCodeAddr @ Is there a translation? 9688 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9689 cmp r0,#0 9690 beq 2f 9691 mov r1,rINST 9692 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9693 mov r1, rPC @ arg1 of translation may need this 9694 mov lr, #0 @ in case target is HANDLER_INTERPRET 9695 cmp r0,#0 @ successful chain? 9696 bxne r0 @ continue native execution 9697 b toInterpreter @ didn't chain - resume with interpreter 9698 9699/* No translation, so request one if profiling isn't disabled*/ 97002: 9701 adrl rIBASE, dvmAsmInstructionStart 9702 GET_JIT_PROF_TABLE(r0) 9703 FETCH_INST() 9704 cmp r0, #0 9705 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9706 bne common_selectTrace 9707 GET_INST_OPCODE(ip) 9708 GOTO_OPCODE(ip) 9709 9710/* 9711 * Return from the translation cache to the interpreter. 9712 * The return was done with a BLX from thumb mode, and 9713 * the following 32-bit word contains the target rPC value. 9714 * Note that lr (r14) will have its low-order bit set to denote 9715 * its thumb-mode origin. 9716 * 9717 * We'll need to stash our lr origin away, recover the new 9718 * target and then check to see if there is a translation available 9719 * for our new target. If so, we do a translation chain and 9720 * go back to native execution. Otherwise, it's back to the 9721 * interpreter (after treating this entry as a potential 9722 * trace start). 9723 */ 9724 .global dvmJitToInterpNormal 9725dvmJitToInterpNormal: 9726 ldr rPC,[lr, #-1] @ get our target PC 9727 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9728 add rINST,lr,#-5 @ save start of chain branch 9729#ifdef JIT_STATS 9730 bl dvmBumpNormal 9731#endif 9732 mov r0,rPC 9733 bl dvmJitGetCodeAddr @ Is there a translation? 9734 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9735 cmp r0,#0 9736 beq toInterpreter @ go if not, otherwise do chain 9737 mov r1,rINST 9738 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9739 mov r1, rPC @ arg1 of translation may need this 9740 mov lr, #0 @ in case target is HANDLER_INTERPRET 9741 cmp r0,#0 @ successful chain? 9742 bxne r0 @ continue native execution 9743 b toInterpreter @ didn't chain - resume with interpreter 9744 9745/* 9746 * Return from the translation cache to the interpreter to do method invocation. 9747 * Check if translation exists for the callee, but don't chain to it. 9748 */ 9749 .global dvmJitToInterpNoChain 9750dvmJitToInterpNoChain: 9751#ifdef JIT_STATS 9752 bl dvmBumpNoChain 9753#endif 9754 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9755 mov r0,rPC 9756 bl dvmJitGetCodeAddr @ Is there a translation? 9757 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9758 mov r1, rPC @ arg1 of translation may need this 9759 mov lr, #0 @ in case target is HANDLER_INTERPRET 9760 cmp r0,#0 9761 bxne r0 @ continue native execution if so 9762#endif 9763 9764/* 9765 * No translation, restore interpreter regs and start interpreting. 9766 * rGLUE & rFP were preserved in the translated code, and rPC has 9767 * already been restored by the time we get here. We'll need to set 9768 * up rIBASE & rINST, and load the address of the JitTable into r0. 9769 */ 9770toInterpreter: 9771 EXPORT_PC() 9772 adrl rIBASE, dvmAsmInstructionStart 9773 FETCH_INST() 9774 GET_JIT_PROF_TABLE(r0) 9775 @ NOTE: intended fallthrough 9776/* 9777 * Common code to update potential trace start counter, and initiate 9778 * a trace-build if appropriate. On entry, rPC should point to the 9779 * next instruction to execute, and rINST should be already loaded with 9780 * the next opcode word, and r0 holds a pointer to the jit profile 9781 * table (pJitProfTable). 9782 */ 9783common_testUpdateProfile: 9784 cmp r0,#0 9785 GET_INST_OPCODE(ip) 9786 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9787 9788common_updateProfile: 9789 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9790 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9791 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9792 GET_INST_OPCODE(ip) 9793 subs r1,r1,#1 @ decrement counter 9794 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9795 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9796 9797/* 9798 * Here, we switch to the debug interpreter to request 9799 * trace selection. First, though, check to see if there 9800 * is already a native translation in place (and, if so, 9801 * jump to it now). 9802 */ 9803 GET_JIT_THRESHOLD(r1) 9804 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9805 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9806 EXPORT_PC() 9807 mov r0,rPC 9808 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9809 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9810 mov r1, rPC @ arg1 of translation may need this 9811 mov lr, #0 @ in case target is HANDLER_INTERPRET 9812 cmp r0,#0 9813#if !defined(WITH_SELF_VERIFICATION) 9814 bxne r0 @ jump to the translation 9815 mov r2,#kJitTSelectRequest @ ask for trace selection 9816 @ fall-through to common_selectTrace 9817#else 9818 moveq r2,#kJitTSelectRequest @ ask for trace selection 9819 beq common_selectTrace 9820 /* 9821 * At this point, we have a target translation. However, if 9822 * that translation is actually the interpret-only pseudo-translation 9823 * we want to treat it the same as no translation. 9824 */ 9825 mov r10, r0 @ save target 9826 bl dvmCompilerGetInterpretTemplate 9827 cmp r0, r10 @ special case? 9828 bne jitSVShadowRunStart @ set up self verification shadow space 9829 GET_INST_OPCODE(ip) 9830 GOTO_OPCODE(ip) 9831 /* no return */ 9832#endif 9833 9834/* 9835 * On entry: 9836 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9837 */ 9838common_selectTrace: 9839 str r2,[rGLUE,#offGlue_jitState] 9840 mov r2,#kInterpEntryInstr @ normal entry reason 9841 str r2,[rGLUE,#offGlue_entryPoint] 9842 mov r1,#1 @ set changeInterp 9843 b common_gotoBail 9844 9845#if defined(WITH_SELF_VERIFICATION) 9846/* 9847 * Save PC and registers to shadow memory for self verification mode 9848 * before jumping to native translation. 9849 * On entry: 9850 * rPC, rFP, rGLUE: the values that they should contain 9851 * r10: the address of the target translation. 9852 */ 9853jitSVShadowRunStart: 9854 mov r0,rPC @ r0<- program counter 9855 mov r1,rFP @ r1<- frame pointer 9856 mov r2,rGLUE @ r2<- InterpState pointer 9857 mov r3,r10 @ r3<- target translation 9858 bl dvmSelfVerificationSaveState @ save registers to shadow space 9859 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9860 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9861 bx r10 @ jump to the translation 9862 9863/* 9864 * Restore PC, registers, and interpState to original values 9865 * before jumping back to the interpreter. 9866 */ 9867jitSVShadowRunEnd: 9868 mov r1,rFP @ pass ending fp 9869 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9870 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9871 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9872 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9873 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9874 cmp r1,#0 @ check for punt condition 9875 beq 1f 9876 mov r2,#kJitSelfVerification @ ask for self verification 9877 str r2,[rGLUE,#offGlue_jitState] 9878 mov r2,#kInterpEntryInstr @ normal entry reason 9879 str r2,[rGLUE,#offGlue_entryPoint] 9880 mov r1,#1 @ set changeInterp 9881 b common_gotoBail 9882 98831: @ exit to interpreter without check 9884 EXPORT_PC() 9885 adrl rIBASE, dvmAsmInstructionStart 9886 FETCH_INST() 9887 GET_INST_OPCODE(ip) 9888 GOTO_OPCODE(ip) 9889#endif 9890 9891#endif 9892 9893/* 9894 * Common code when a backward branch is taken. 9895 * 9896 * On entry: 9897 * r9 is PC adjustment *in bytes* 9898 */ 9899common_backwardBranch: 9900 mov r0, #kInterpEntryInstr 9901 bl common_periodicChecks 9902#if defined(WITH_JIT) 9903 GET_JIT_PROF_TABLE(r0) 9904 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9905 cmp r0,#0 9906 bne common_updateProfile 9907 GET_INST_OPCODE(ip) 9908 GOTO_OPCODE(ip) 9909#else 9910 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9911 GET_INST_OPCODE(ip) @ extract opcode from rINST 9912 GOTO_OPCODE(ip) @ jump to next instruction 9913#endif 9914 9915 9916/* 9917 * Need to see if the thread needs to be suspended or debugger/profiler 9918 * activity has begun. 9919 * 9920 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9921 * have to do the second ldr. 9922 * 9923 * TODO: reduce this so we're just checking a single location. 9924 * 9925 * On entry: 9926 * r0 is reentry type, e.g. kInterpEntryInstr 9927 * r9 is trampoline PC adjustment *in bytes* 9928 */ 9929common_periodicChecks: 9930 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9931 9932 @ speculatively store r0 before it is clobbered by dvmCheckSuspendPending 9933 str r0, [rGLUE, #offGlue_entryPoint] 9934 9935#if defined(WITH_DEBUGGER) 9936 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9937#endif 9938#if defined(WITH_PROFILER) 9939 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9940#endif 9941 9942 ldr r3, [r3] @ r3<- suspendCount (int) 9943 9944#if defined(WITH_DEBUGGER) 9945 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9946#endif 9947#if defined (WITH_PROFILER) 9948 ldr r2, [r2] @ r2<- activeProfilers (int) 9949#endif 9950 9951 cmp r3, #0 @ suspend pending? 9952 bne 2f @ yes, do full suspension check 9953 9954#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9955# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9956 orrs r1, r1, r2 @ r1<- r1 | r2 9957 cmp r1, #0 @ debugger attached or profiler started? 9958# elif defined(WITH_DEBUGGER) 9959 cmp r1, #0 @ debugger attached? 9960# elif defined(WITH_PROFILER) 9961 cmp r2, #0 @ profiler started? 9962# endif 9963 bne 3f @ debugger/profiler, switch interp 9964#endif 9965 9966 bx lr @ nothing to do, return 9967 99682: @ check suspend 9969#if defined(WITH_JIT) 9970 /* 9971 * Refresh the Jit's cached copy of profile table pointer. This pointer 9972 * doubles as the Jit's on/off switch. 9973 */ 9974 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9975 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9976 ldr r3, [r3] @ r3 <- pJitProfTable 9977 EXPORT_PC() @ need for precise GC 9978 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9979#else 9980 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9981 EXPORT_PC() @ need for precise GC 9982#endif 9983 b dvmCheckSuspendPending @ suspend if necessary, then return 9984 99853: @ debugger/profiler enabled, bail out 9986 add rPC, rPC, r9 @ update rPC 9987 mov r1, #1 @ "want switch" = true 9988 b common_gotoBail 9989 9990 9991/* 9992 * The equivalent of "goto bail", this calls through the "bail handler". 9993 * 9994 * State registers will be saved to the "glue" area before bailing. 9995 * 9996 * On entry: 9997 * r1 is "bool changeInterp", indicating if we want to switch to the 9998 * other interpreter or just bail all the way out 9999 */ 10000common_gotoBail: 10001 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10002 mov r0, rGLUE @ r0<- glue ptr 10003 b dvmMterpStdBail @ call(glue, changeInterp) 10004 10005 @add r1, r1, #1 @ using (boolean+1) 10006 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 10007 @bl _longjmp @ does not return 10008 @bl common_abort 10009 10010 10011/* 10012 * Common code for method invocation with range. 10013 * 10014 * On entry: 10015 * r0 is "Method* methodToCall", the method we're trying to call 10016 */ 10017common_invokeMethodRange: 10018.LinvokeNewRange: 10019 @ prepare to copy args to "outs" area of current frame 10020 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 10021 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10022 beq .LinvokeArgsDone @ if no args, skip the rest 10023 FETCH(r1, 2) @ r1<- CCCC 10024 10025 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 10026 @ (very few methods have > 10 args; could unroll for common cases) 10027 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 10028 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 10029 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 100301: ldr r1, [r3], #4 @ val = *fp++ 10031 subs r2, r2, #1 @ count-- 10032 str r1, [r10], #4 @ *outs++ = val 10033 bne 1b @ ...while count != 0 10034 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10035 b .LinvokeArgsDone 10036 10037/* 10038 * Common code for method invocation without range. 10039 * 10040 * On entry: 10041 * r0 is "Method* methodToCall", the method we're trying to call 10042 */ 10043common_invokeMethodNoRange: 10044.LinvokeNewNoRange: 10045 @ prepare to copy args to "outs" area of current frame 10046 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 10047 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10048 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 10049 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 10050 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10051 beq .LinvokeArgsDone 10052 10053 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 10054.LinvokeNonRange: 10055 rsb r2, r2, #5 @ r2<- 5-r2 10056 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 10057 bl common_abort @ (skipped due to ARM prefetch) 100585: and ip, rINST, #0x0f00 @ isolate A 10059 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 10060 mov r0, r0 @ nop 10061 str r2, [r10, #-4]! @ *--outs = vA 100624: and ip, r1, #0xf000 @ isolate G 10063 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 10064 mov r0, r0 @ nop 10065 str r2, [r10, #-4]! @ *--outs = vG 100663: and ip, r1, #0x0f00 @ isolate F 10067 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 10068 mov r0, r0 @ nop 10069 str r2, [r10, #-4]! @ *--outs = vF 100702: and ip, r1, #0x00f0 @ isolate E 10071 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 10072 mov r0, r0 @ nop 10073 str r2, [r10, #-4]! @ *--outs = vE 100741: and ip, r1, #0x000f @ isolate D 10075 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 10076 mov r0, r0 @ nop 10077 str r2, [r10, #-4]! @ *--outs = vD 100780: @ fall through to .LinvokeArgsDone 10079 10080.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 10081 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 10082 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 10083 @ find space for the new stack frame, check for overflow 10084 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 10085 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 10086 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 10087@ bl common_dumpRegs 10088 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 10089 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 10090 cmp r3, r9 @ bottom < interpStackEnd? 10091 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 10092 blt .LstackOverflow @ yes, this frame will overflow stack 10093 10094 @ set up newSaveArea 10095#ifdef EASY_GDB 10096 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 10097 str ip, [r10, #offStackSaveArea_prevSave] 10098#endif 10099 str rFP, [r10, #offStackSaveArea_prevFrame] 10100 str rPC, [r10, #offStackSaveArea_savedPc] 10101#if defined(WITH_JIT) 10102 mov r9, #0 10103 str r9, [r10, #offStackSaveArea_returnAddr] 10104#endif 10105 str r0, [r10, #offStackSaveArea_method] 10106 tst r3, #ACC_NATIVE 10107 bne .LinvokeNative 10108 10109 /* 10110 stmfd sp!, {r0-r3} 10111 bl common_printNewline 10112 mov r0, rFP 10113 mov r1, #0 10114 bl dvmDumpFp 10115 ldmfd sp!, {r0-r3} 10116 stmfd sp!, {r0-r3} 10117 mov r0, r1 10118 mov r1, r10 10119 bl dvmDumpFp 10120 bl common_printNewline 10121 ldmfd sp!, {r0-r3} 10122 */ 10123 10124 ldrh r9, [r2] @ r9 <- load INST from new PC 10125 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10126 mov rPC, r2 @ publish new rPC 10127 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10128 10129 @ Update "glue" values for the new method 10130 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10131 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10132 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10133#if defined(WITH_JIT) 10134 GET_JIT_PROF_TABLE(r0) 10135 mov rFP, r1 @ fp = newFp 10136 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10137 mov rINST, r9 @ publish new rINST 10138 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10139 cmp r0,#0 10140 bne common_updateProfile 10141 GOTO_OPCODE(ip) @ jump to next instruction 10142#else 10143 mov rFP, r1 @ fp = newFp 10144 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10145 mov rINST, r9 @ publish new rINST 10146 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10147 GOTO_OPCODE(ip) @ jump to next instruction 10148#endif 10149 10150.LinvokeNative: 10151 @ Prep for the native call 10152 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10153 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10154 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10155 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10156 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10157 mov r9, r3 @ r9<- glue->self (preserve) 10158 10159 mov r2, r0 @ r2<- methodToCall 10160 mov r0, r1 @ r0<- newFp (points to args) 10161 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10162 10163#ifdef ASSIST_DEBUGGER 10164 /* insert fake function header to help gdb find the stack frame */ 10165 b .Lskip 10166 .type dalvik_mterp, %function 10167dalvik_mterp: 10168 .fnstart 10169 MTERP_ENTRY1 10170 MTERP_ENTRY2 10171.Lskip: 10172#endif 10173 10174 @mov lr, pc @ set return addr 10175 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10176 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10177 10178#if defined(WITH_JIT) 10179 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10180#endif 10181 10182 @ native return; r9=self, r10=newSaveArea 10183 @ equivalent to dvmPopJniLocals 10184 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10185 ldr r1, [r9, #offThread_exception] @ check for exception 10186#if defined(WITH_JIT) 10187 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10188#endif 10189 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10190 cmp r1, #0 @ null? 10191 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10192#if defined(WITH_JIT) 10193 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10194#endif 10195 bne common_exceptionThrown @ no, handle exception 10196 10197 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10198 GET_INST_OPCODE(ip) @ extract opcode from rINST 10199 GOTO_OPCODE(ip) @ jump to next instruction 10200 10201.LstackOverflow: @ r0=methodToCall 10202 mov r1, r0 @ r1<- methodToCall 10203 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10204 bl dvmHandleStackOverflow 10205 b common_exceptionThrown 10206#ifdef ASSIST_DEBUGGER 10207 .fnend 10208#endif 10209 10210 10211 /* 10212 * Common code for method invocation, calling through "glue code". 10213 * 10214 * TODO: now that we have range and non-range invoke handlers, this 10215 * needs to be split into two. Maybe just create entry points 10216 * that set r9 and jump here? 10217 * 10218 * On entry: 10219 * r0 is "Method* methodToCall", the method we're trying to call 10220 * r9 is "bool methodCallRange", indicating if this is a /range variant 10221 */ 10222 .if 0 10223.LinvokeOld: 10224 sub sp, sp, #8 @ space for args + pad 10225 FETCH(ip, 2) @ ip<- FEDC or CCCC 10226 mov r2, r0 @ A2<- methodToCall 10227 mov r0, rGLUE @ A0<- glue 10228 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10229 mov r1, r9 @ A1<- methodCallRange 10230 mov r3, rINST, lsr #8 @ A3<- AA 10231 str ip, [sp, #0] @ A4<- ip 10232 bl dvmMterp_invokeMethod @ call the C invokeMethod 10233 add sp, sp, #8 @ remove arg area 10234 b common_resumeAfterGlueCall @ continue to next instruction 10235 .endif 10236 10237 10238 10239/* 10240 * Common code for handling a return instruction. 10241 * 10242 * This does not return. 10243 */ 10244common_returnFromMethod: 10245.LreturnNew: 10246 mov r0, #kInterpEntryReturn 10247 mov r9, #0 10248 bl common_periodicChecks 10249 10250 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10251 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10252 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10253 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10254 @ r2<- method we're returning to 10255 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10256 cmp r2, #0 @ is this a break frame? 10257 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10258 mov r1, #0 @ "want switch" = false 10259 beq common_gotoBail @ break frame, bail out completely 10260 10261 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10262 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10263 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10264 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10265#if defined(WITH_JIT) 10266 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10267 GET_JIT_PROF_TABLE(r0) 10268 mov rPC, r9 @ publish new rPC 10269 str r1, [rGLUE, #offGlue_methodClassDex] 10270 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10271 cmp r10, #0 @ caller is compiled code 10272 blxne r10 10273 GET_INST_OPCODE(ip) @ extract opcode from rINST 10274 cmp r0,#0 10275 bne common_updateProfile 10276 GOTO_OPCODE(ip) @ jump to next instruction 10277#else 10278 GET_INST_OPCODE(ip) @ extract opcode from rINST 10279 mov rPC, r9 @ publish new rPC 10280 str r1, [rGLUE, #offGlue_methodClassDex] 10281 GOTO_OPCODE(ip) @ jump to next instruction 10282#endif 10283 10284 /* 10285 * Return handling, calls through "glue code". 10286 */ 10287 .if 0 10288.LreturnOld: 10289 SAVE_PC_FP_TO_GLUE() @ export state 10290 mov r0, rGLUE @ arg to function 10291 bl dvmMterp_returnFromMethod 10292 b common_resumeAfterGlueCall 10293 .endif 10294 10295 10296/* 10297 * Somebody has thrown an exception. Handle it. 10298 * 10299 * If the exception processing code returns to us (instead of falling 10300 * out of the interpreter), continue with whatever the next instruction 10301 * now happens to be. 10302 * 10303 * This does not return. 10304 */ 10305 .global dvmMterpCommonExceptionThrown 10306dvmMterpCommonExceptionThrown: 10307common_exceptionThrown: 10308.LexceptionNew: 10309 mov r0, #kInterpEntryThrow 10310 mov r9, #0 10311 bl common_periodicChecks 10312 10313 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10314 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10315 mov r1, r10 @ r1<- self 10316 mov r0, r9 @ r0<- exception 10317 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10318 mov r3, #0 @ r3<- NULL 10319 str r3, [r10, #offThread_exception] @ self->exception = NULL 10320 10321 /* set up args and a local for "&fp" */ 10322 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10323 str rFP, [sp, #-4]! @ *--sp = fp 10324 mov ip, sp @ ip<- &fp 10325 mov r3, #0 @ r3<- false 10326 str ip, [sp, #-4]! @ *--sp = &fp 10327 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10328 mov r0, r10 @ r0<- self 10329 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10330 mov r2, r9 @ r2<- exception 10331 sub r1, rPC, r1 @ r1<- pc - method->insns 10332 mov r1, r1, asr #1 @ r1<- offset in code units 10333 10334 /* call, r0 gets catchRelPc (a code-unit offset) */ 10335 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10336 10337 /* fix earlier stack overflow if necessary; may trash rFP */ 10338 ldrb r1, [r10, #offThread_stackOverflowed] 10339 cmp r1, #0 @ did we overflow earlier? 10340 beq 1f @ no, skip ahead 10341 mov rFP, r0 @ save relPc result in rFP 10342 mov r0, r10 @ r0<- self 10343 mov r1, r9 @ r1<- exception 10344 bl dvmCleanupStackOverflow @ call(self) 10345 mov r0, rFP @ restore result 103461: 10347 10348 /* update frame pointer and check result from dvmFindCatchBlock */ 10349 ldr rFP, [sp, #4] @ retrieve the updated rFP 10350 cmp r0, #0 @ is catchRelPc < 0? 10351 add sp, sp, #8 @ restore stack 10352 bmi .LnotCaughtLocally 10353 10354 /* adjust locals to match self->curFrame and updated PC */ 10355 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10356 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10357 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10358 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10359 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10360 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10361 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10362 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10363 10364 /* release the tracked alloc on the exception */ 10365 mov r0, r9 @ r0<- exception 10366 mov r1, r10 @ r1<- self 10367 bl dvmReleaseTrackedAlloc @ release the exception 10368 10369 /* restore the exception if the handler wants it */ 10370 FETCH_INST() @ load rINST from rPC 10371 GET_INST_OPCODE(ip) @ extract opcode from rINST 10372 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10373 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10374 GOTO_OPCODE(ip) @ jump to next instruction 10375 10376.LnotCaughtLocally: @ r9=exception, r10=self 10377 /* fix stack overflow if necessary */ 10378 ldrb r1, [r10, #offThread_stackOverflowed] 10379 cmp r1, #0 @ did we overflow earlier? 10380 movne r0, r10 @ if yes: r0<- self 10381 movne r1, r9 @ if yes: r1<- exception 10382 blne dvmCleanupStackOverflow @ if yes: call(self) 10383 10384 @ may want to show "not caught locally" debug messages here 10385#if DVM_SHOW_EXCEPTION >= 2 10386 /* call __android_log_print(prio, tag, format, ...) */ 10387 /* "Exception %s from %s:%d not caught locally" */ 10388 @ dvmLineNumFromPC(method, pc - method->insns) 10389 ldr r0, [rGLUE, #offGlue_method] 10390 ldr r1, [r0, #offMethod_insns] 10391 sub r1, rPC, r1 10392 asr r1, r1, #1 10393 bl dvmLineNumFromPC 10394 str r0, [sp, #-4]! 10395 @ dvmGetMethodSourceFile(method) 10396 ldr r0, [rGLUE, #offGlue_method] 10397 bl dvmGetMethodSourceFile 10398 str r0, [sp, #-4]! 10399 @ exception->clazz->descriptor 10400 ldr r3, [r9, #offObject_clazz] 10401 ldr r3, [r3, #offClassObject_descriptor] 10402 @ 10403 ldr r2, strExceptionNotCaughtLocally 10404 ldr r1, strLogTag 10405 mov r0, #3 @ LOG_DEBUG 10406 bl __android_log_print 10407#endif 10408 str r9, [r10, #offThread_exception] @ restore exception 10409 mov r0, r9 @ r0<- exception 10410 mov r1, r10 @ r1<- self 10411 bl dvmReleaseTrackedAlloc @ release the exception 10412 mov r1, #0 @ "want switch" = false 10413 b common_gotoBail @ bail out 10414 10415 10416 /* 10417 * Exception handling, calls through "glue code". 10418 */ 10419 .if 0 10420.LexceptionOld: 10421 SAVE_PC_FP_TO_GLUE() @ export state 10422 mov r0, rGLUE @ arg to function 10423 bl dvmMterp_exceptionThrown 10424 b common_resumeAfterGlueCall 10425 .endif 10426 10427 10428/* 10429 * After returning from a "glued" function, pull out the updated 10430 * values and start executing at the next instruction. 10431 */ 10432common_resumeAfterGlueCall: 10433 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10434 FETCH_INST() @ load rINST from rPC 10435 GET_INST_OPCODE(ip) @ extract opcode from rINST 10436 GOTO_OPCODE(ip) @ jump to next instruction 10437 10438/* 10439 * Invalid array index. 10440 */ 10441common_errArrayIndex: 10442 EXPORT_PC() 10443 ldr r0, strArrayIndexException 10444 mov r1, #0 10445 bl dvmThrowException 10446 b common_exceptionThrown 10447 10448/* 10449 * Invalid array value. 10450 */ 10451common_errArrayStore: 10452 EXPORT_PC() 10453 ldr r0, strArrayStoreException 10454 mov r1, #0 10455 bl dvmThrowException 10456 b common_exceptionThrown 10457 10458/* 10459 * Integer divide or mod by zero. 10460 */ 10461common_errDivideByZero: 10462 EXPORT_PC() 10463 ldr r0, strArithmeticException 10464 ldr r1, strDivideByZero 10465 bl dvmThrowException 10466 b common_exceptionThrown 10467 10468/* 10469 * Attempt to allocate an array with a negative size. 10470 */ 10471common_errNegativeArraySize: 10472 EXPORT_PC() 10473 ldr r0, strNegativeArraySizeException 10474 mov r1, #0 10475 bl dvmThrowException 10476 b common_exceptionThrown 10477 10478/* 10479 * Invocation of a non-existent method. 10480 */ 10481common_errNoSuchMethod: 10482 EXPORT_PC() 10483 ldr r0, strNoSuchMethodError 10484 mov r1, #0 10485 bl dvmThrowException 10486 b common_exceptionThrown 10487 10488/* 10489 * We encountered a null object when we weren't expecting one. We 10490 * export the PC, throw a NullPointerException, and goto the exception 10491 * processing code. 10492 */ 10493common_errNullObject: 10494 EXPORT_PC() 10495 ldr r0, strNullPointerException 10496 mov r1, #0 10497 bl dvmThrowException 10498 b common_exceptionThrown 10499 10500/* 10501 * For debugging, cause an immediate fault. The source address will 10502 * be in lr (use a bl instruction to jump here). 10503 */ 10504common_abort: 10505 ldr pc, .LdeadFood 10506.LdeadFood: 10507 .word 0xdeadf00d 10508 10509/* 10510 * Spit out a "we were here", preserving all registers. (The attempt 10511 * to save ip won't work, but we need to save an even number of 10512 * registers for EABI 64-bit stack alignment.) 10513 */ 10514 .macro SQUEAK num 10515common_squeak\num: 10516 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10517 ldr r0, strSqueak 10518 mov r1, #\num 10519 bl printf 10520 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10521 bx lr 10522 .endm 10523 10524 SQUEAK 0 10525 SQUEAK 1 10526 SQUEAK 2 10527 SQUEAK 3 10528 SQUEAK 4 10529 SQUEAK 5 10530 10531/* 10532 * Spit out the number in r0, preserving registers. 10533 */ 10534common_printNum: 10535 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10536 mov r1, r0 10537 ldr r0, strSqueak 10538 bl printf 10539 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10540 bx lr 10541 10542/* 10543 * Print a newline, preserving registers. 10544 */ 10545common_printNewline: 10546 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10547 ldr r0, strNewline 10548 bl printf 10549 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10550 bx lr 10551 10552 /* 10553 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10554 */ 10555common_printHex: 10556 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10557 mov r1, r0 10558 ldr r0, strPrintHex 10559 bl printf 10560 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10561 bx lr 10562 10563/* 10564 * Print the 64-bit quantity in r0-r1, preserving registers. 10565 */ 10566common_printLong: 10567 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10568 mov r3, r1 10569 mov r2, r0 10570 ldr r0, strPrintLong 10571 bl printf 10572 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10573 bx lr 10574 10575/* 10576 * Print full method info. Pass the Method* in r0. Preserves regs. 10577 */ 10578common_printMethod: 10579 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10580 bl dvmMterpPrintMethod 10581 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10582 bx lr 10583 10584/* 10585 * Call a C helper function that dumps regs and possibly some 10586 * additional info. Requires the C function to be compiled in. 10587 */ 10588 .if 0 10589common_dumpRegs: 10590 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10591 bl dvmMterpDumpArmRegs 10592 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10593 bx lr 10594 .endif 10595 10596#if 0 10597/* 10598 * Experiment on VFP mode. 10599 * 10600 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10601 * 10602 * Updates the bits specified by "mask", setting them to the values in "val". 10603 */ 10604setFPSCR: 10605 and r0, r0, r1 @ make sure no stray bits are set 10606 fmrx r2, fpscr @ get VFP reg 10607 mvn r1, r1 @ bit-invert mask 10608 and r2, r2, r1 @ clear masked bits 10609 orr r2, r2, r0 @ set specified bits 10610 fmxr fpscr, r2 @ set VFP reg 10611 mov r0, r2 @ return new value 10612 bx lr 10613 10614 .align 2 10615 .global dvmConfigureFP 10616 .type dvmConfigureFP, %function 10617dvmConfigureFP: 10618 stmfd sp!, {ip, lr} 10619 /* 0x03000000 sets DN/FZ */ 10620 /* 0x00009f00 clears the six exception enable flags */ 10621 bl common_squeak0 10622 mov r0, #0x03000000 @ r0<- 0x03000000 10623 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10624 bl setFPSCR 10625 ldmfd sp!, {ip, pc} 10626#endif 10627 10628 10629/* 10630 * String references, must be close to the code that uses them. 10631 */ 10632 .align 2 10633strArithmeticException: 10634 .word .LstrArithmeticException 10635strArrayIndexException: 10636 .word .LstrArrayIndexException 10637strArrayStoreException: 10638 .word .LstrArrayStoreException 10639strDivideByZero: 10640 .word .LstrDivideByZero 10641strNegativeArraySizeException: 10642 .word .LstrNegativeArraySizeException 10643strNoSuchMethodError: 10644 .word .LstrNoSuchMethodError 10645strNullPointerException: 10646 .word .LstrNullPointerException 10647 10648strLogTag: 10649 .word .LstrLogTag 10650strExceptionNotCaughtLocally: 10651 .word .LstrExceptionNotCaughtLocally 10652 10653strNewline: 10654 .word .LstrNewline 10655strSqueak: 10656 .word .LstrSqueak 10657strPrintHex: 10658 .word .LstrPrintHex 10659strPrintLong: 10660 .word .LstrPrintLong 10661 10662/* 10663 * Zero-terminated ASCII string data. 10664 * 10665 * On ARM we have two choices: do like gcc does, and LDR from a .word 10666 * with the address, or use an ADR pseudo-op to get the address 10667 * directly. ADR saves 4 bytes and an indirection, but it's using a 10668 * PC-relative addressing mode and hence has a limited range, which 10669 * makes it not work well with mergeable string sections. 10670 */ 10671 .section .rodata.str1.4,"aMS",%progbits,1 10672 10673.LstrBadEntryPoint: 10674 .asciz "Bad entry point %d\n" 10675.LstrArithmeticException: 10676 .asciz "Ljava/lang/ArithmeticException;" 10677.LstrArrayIndexException: 10678 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10679.LstrArrayStoreException: 10680 .asciz "Ljava/lang/ArrayStoreException;" 10681.LstrClassCastException: 10682 .asciz "Ljava/lang/ClassCastException;" 10683.LstrDivideByZero: 10684 .asciz "divide by zero" 10685.LstrFilledNewArrayNotImpl: 10686 .asciz "filled-new-array only implemented for objects and 'int'" 10687.LstrInternalError: 10688 .asciz "Ljava/lang/InternalError;" 10689.LstrInstantiationError: 10690 .asciz "Ljava/lang/InstantiationError;" 10691.LstrNegativeArraySizeException: 10692 .asciz "Ljava/lang/NegativeArraySizeException;" 10693.LstrNoSuchMethodError: 10694 .asciz "Ljava/lang/NoSuchMethodError;" 10695.LstrNullPointerException: 10696 .asciz "Ljava/lang/NullPointerException;" 10697 10698.LstrLogTag: 10699 .asciz "mterp" 10700.LstrExceptionNotCaughtLocally: 10701 .asciz "Exception %s from %s:%d not caught locally\n" 10702 10703.LstrNewline: 10704 .asciz "\n" 10705.LstrSqueak: 10706 .asciz "<%d>" 10707.LstrPrintHex: 10708 .asciz "<0x%x>" 10709.LstrPrintLong: 10710 .asciz "<%lld>" 10711 10712 10713