InterpAsm-armv5te.S revision 7365493ad8d360c1dcf9cd8b6eee62747af01cae
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204#if defined(WITH_JIT) 205#include "../common/jit-config.h" 206#endif 207 208/* File: armv5te/platform.S */ 209/* 210 * =========================================================================== 211 * CPU-version-specific defines 212 * =========================================================================== 213 */ 214 215/* 216 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 217 * one-way branch. 218 * 219 * May modify IP. Does not modify LR. 220 */ 221.macro LDR_PC source 222 ldr pc, \source 223.endm 224 225/* 226 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 227 * Jump to subroutine. 228 * 229 * May modify IP and LR. 230 */ 231.macro LDR_PC_LR source 232 mov lr, pc 233 ldr pc, \source 234.endm 235 236/* 237 * Macro for "LDMFD SP!, {...regs...,PC}". 238 * 239 * May modify IP and LR. 240 */ 241.macro LDMFD_PC regs 242 ldmfd sp!, {\regs,pc} 243.endm 244 245/* File: armv5te/entry.S */ 246/* 247 * Copyright (C) 2008 The Android Open Source Project 248 * 249 * Licensed under the Apache License, Version 2.0 (the "License"); 250 * you may not use this file except in compliance with the License. 251 * You may obtain a copy of the License at 252 * 253 * http://www.apache.org/licenses/LICENSE-2.0 254 * 255 * Unless required by applicable law or agreed to in writing, software 256 * distributed under the License is distributed on an "AS IS" BASIS, 257 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 258 * See the License for the specific language governing permissions and 259 * limitations under the License. 260 */ 261/* 262 * Interpreter entry point. 263 */ 264 265/* 266 * We don't have formal stack frames, so gdb scans upward in the code 267 * to find the start of the function (a label with the %function type), 268 * and then looks at the next few instructions to figure out what 269 * got pushed onto the stack. From this it figures out how to restore 270 * the registers, including PC, for the previous stack frame. If gdb 271 * sees a non-function label, it stops scanning, so either we need to 272 * have nothing but assembler-local labels between the entry point and 273 * the break, or we need to fake it out. 274 * 275 * When this is defined, we add some stuff to make gdb less confused. 276 */ 277#define ASSIST_DEBUGGER 1 278 279 .text 280 .align 2 281 .global dvmMterpStdRun 282 .type dvmMterpStdRun, %function 283 284/* 285 * On entry: 286 * r0 MterpGlue* glue 287 * 288 * This function returns a boolean "changeInterp" value. The return comes 289 * via a call to dvmMterpStdBail(). 290 */ 291dvmMterpStdRun: 292#define MTERP_ENTRY1 \ 293 .save {r4-r10,fp,lr}; \ 294 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 295#define MTERP_ENTRY2 \ 296 .pad #4; \ 297 sub sp, sp, #4 @ align 64 298 299 .fnstart 300 MTERP_ENTRY1 301 MTERP_ENTRY2 302 303 /* save stack pointer, add magic word for debuggerd */ 304 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 305 306 /* set up "named" registers, figure out entry point */ 307 mov rGLUE, r0 @ set rGLUE 308 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 309 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 310 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 311 cmp r1, #kInterpEntryInstr @ usual case? 312 bne .Lnot_instr @ no, handle it 313 314#if defined(WITH_JIT) 315.LentryInstr: 316 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 317 /* Entry is always a possible trace start */ 318 GET_JIT_PROF_TABLE(r0) 319 FETCH_INST() 320 mov r1, #0 @ prepare the value for the new state 321 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 322 cmp r0,#0 323 bne common_updateProfile 324 GET_INST_OPCODE(ip) 325 GOTO_OPCODE(ip) 326#else 327 /* start executing the instruction at rPC */ 328 FETCH_INST() @ load rINST from rPC 329 GET_INST_OPCODE(ip) @ extract opcode from rINST 330 GOTO_OPCODE(ip) @ jump to next instruction 331#endif 332 333.Lnot_instr: 334 cmp r1, #kInterpEntryReturn @ were we returning from a method? 335 beq common_returnFromMethod 336 337.Lnot_return: 338 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 339 beq common_exceptionThrown 340 341#if defined(WITH_JIT) 342.Lnot_throw: 343 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 344 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 345 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 346 bne .Lbad_arg 347 cmp rPC,r2 348 bne .LentryInstr @ must have branched, don't resume 349#if defined(WITH_SELF_VERIFICATION) 350 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 351 b jitSVShadowRunStart @ re-enter the translation after the 352 @ single-stepped instruction 353 @noreturn 354#endif 355 mov r1, #kInterpEntryInstr 356 str r1, [rGLUE, #offGlue_entryPoint] 357 bx r10 @ re-enter the translation 358#endif 359 360.Lbad_arg: 361 ldr r0, strBadEntryPoint 362 @ r1 holds value of entryPoint 363 bl printf 364 bl dvmAbort 365 .fnend 366 367 368 .global dvmMterpStdBail 369 .type dvmMterpStdBail, %function 370 371/* 372 * Restore the stack pointer and PC from the save point established on entry. 373 * This is essentially the same as a longjmp, but should be cheaper. The 374 * last instruction causes us to return to whoever called dvmMterpStdRun. 375 * 376 * We pushed some registers on the stack in dvmMterpStdRun, then saved 377 * SP and LR. Here we restore SP, restore the registers, and then restore 378 * LR to PC. 379 * 380 * On entry: 381 * r0 MterpGlue* glue 382 * r1 bool changeInterp 383 */ 384dvmMterpStdBail: 385 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 386 mov r0, r1 @ return the changeInterp value 387 add sp, sp, #4 @ un-align 64 388 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 389 390 391/* 392 * String references. 393 */ 394strBadEntryPoint: 395 .word .LstrBadEntryPoint 396 397 398 .global dvmAsmInstructionStart 399 .type dvmAsmInstructionStart, %function 400dvmAsmInstructionStart = .L_OP_NOP 401 .text 402 403/* ------------------------------ */ 404 .balign 64 405.L_OP_NOP: /* 0x00 */ 406/* File: armv5te/OP_NOP.S */ 407 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 408 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 409 GOTO_OPCODE(ip) @ execute it 410 411#ifdef ASSIST_DEBUGGER 412 /* insert fake function header to help gdb find the stack frame */ 413 .type dalvik_inst, %function 414dalvik_inst: 415 .fnstart 416 MTERP_ENTRY1 417 MTERP_ENTRY2 418 .fnend 419#endif 420 421/* ------------------------------ */ 422 .balign 64 423.L_OP_MOVE: /* 0x01 */ 424/* File: armv5te/OP_MOVE.S */ 425 /* for move, move-object, long-to-int */ 426 /* op vA, vB */ 427 mov r1, rINST, lsr #12 @ r1<- B from 15:12 428 mov r0, rINST, lsr #8 @ r0<- A from 11:8 429 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 430 GET_VREG(r2, r1) @ r2<- fp[B] 431 and r0, r0, #15 432 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 433 SET_VREG(r2, r0) @ fp[A]<- r2 434 GOTO_OPCODE(ip) @ execute next instruction 435 436/* ------------------------------ */ 437 .balign 64 438.L_OP_MOVE_FROM16: /* 0x02 */ 439/* File: armv5te/OP_MOVE_FROM16.S */ 440 /* for: move/from16, move-object/from16 */ 441 /* op vAA, vBBBB */ 442 FETCH(r1, 1) @ r1<- BBBB 443 mov r0, rINST, lsr #8 @ r0<- AA 444 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 445 GET_VREG(r2, r1) @ r2<- fp[BBBB] 446 GET_INST_OPCODE(ip) @ extract opcode from rINST 447 SET_VREG(r2, r0) @ fp[AA]<- r2 448 GOTO_OPCODE(ip) @ jump to next instruction 449 450/* ------------------------------ */ 451 .balign 64 452.L_OP_MOVE_16: /* 0x03 */ 453/* File: armv5te/OP_MOVE_16.S */ 454 /* for: move/16, move-object/16 */ 455 /* op vAAAA, vBBBB */ 456 FETCH(r1, 2) @ r1<- BBBB 457 FETCH(r0, 1) @ r0<- AAAA 458 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 459 GET_VREG(r2, r1) @ r2<- fp[BBBB] 460 GET_INST_OPCODE(ip) @ extract opcode from rINST 461 SET_VREG(r2, r0) @ fp[AAAA]<- r2 462 GOTO_OPCODE(ip) @ jump to next instruction 463 464/* ------------------------------ */ 465 .balign 64 466.L_OP_MOVE_WIDE: /* 0x04 */ 467/* File: armv5te/OP_MOVE_WIDE.S */ 468 /* move-wide vA, vB */ 469 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 470 mov r2, rINST, lsr #8 @ r2<- A(+) 471 mov r3, rINST, lsr #12 @ r3<- B 472 and r2, r2, #15 473 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 474 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 475 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 476 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 477 GET_INST_OPCODE(ip) @ extract opcode from rINST 478 stmia r2, {r0-r1} @ fp[A]<- r0/r1 479 GOTO_OPCODE(ip) @ jump to next instruction 480 481/* ------------------------------ */ 482 .balign 64 483.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 484/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 485 /* move-wide/from16 vAA, vBBBB */ 486 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 487 FETCH(r3, 1) @ r3<- BBBB 488 mov r2, rINST, lsr #8 @ r2<- AA 489 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 490 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 491 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 492 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 493 GET_INST_OPCODE(ip) @ extract opcode from rINST 494 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 495 GOTO_OPCODE(ip) @ jump to next instruction 496 497/* ------------------------------ */ 498 .balign 64 499.L_OP_MOVE_WIDE_16: /* 0x06 */ 500/* File: armv5te/OP_MOVE_WIDE_16.S */ 501 /* move-wide/16 vAAAA, vBBBB */ 502 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 503 FETCH(r3, 2) @ r3<- BBBB 504 FETCH(r2, 1) @ r2<- AAAA 505 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 506 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 507 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 508 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 509 GET_INST_OPCODE(ip) @ extract opcode from rINST 510 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 511 GOTO_OPCODE(ip) @ jump to next instruction 512 513/* ------------------------------ */ 514 .balign 64 515.L_OP_MOVE_OBJECT: /* 0x07 */ 516/* File: armv5te/OP_MOVE_OBJECT.S */ 517/* File: armv5te/OP_MOVE.S */ 518 /* for move, move-object, long-to-int */ 519 /* op vA, vB */ 520 mov r1, rINST, lsr #12 @ r1<- B from 15:12 521 mov r0, rINST, lsr #8 @ r0<- A from 11:8 522 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 523 GET_VREG(r2, r1) @ r2<- fp[B] 524 and r0, r0, #15 525 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 526 SET_VREG(r2, r0) @ fp[A]<- r2 527 GOTO_OPCODE(ip) @ execute next instruction 528 529 530/* ------------------------------ */ 531 .balign 64 532.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 533/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 534/* File: armv5te/OP_MOVE_FROM16.S */ 535 /* for: move/from16, move-object/from16 */ 536 /* op vAA, vBBBB */ 537 FETCH(r1, 1) @ r1<- BBBB 538 mov r0, rINST, lsr #8 @ r0<- AA 539 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 540 GET_VREG(r2, r1) @ r2<- fp[BBBB] 541 GET_INST_OPCODE(ip) @ extract opcode from rINST 542 SET_VREG(r2, r0) @ fp[AA]<- r2 543 GOTO_OPCODE(ip) @ jump to next instruction 544 545 546/* ------------------------------ */ 547 .balign 64 548.L_OP_MOVE_OBJECT_16: /* 0x09 */ 549/* File: armv5te/OP_MOVE_OBJECT_16.S */ 550/* File: armv5te/OP_MOVE_16.S */ 551 /* for: move/16, move-object/16 */ 552 /* op vAAAA, vBBBB */ 553 FETCH(r1, 2) @ r1<- BBBB 554 FETCH(r0, 1) @ r0<- AAAA 555 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 556 GET_VREG(r2, r1) @ r2<- fp[BBBB] 557 GET_INST_OPCODE(ip) @ extract opcode from rINST 558 SET_VREG(r2, r0) @ fp[AAAA]<- r2 559 GOTO_OPCODE(ip) @ jump to next instruction 560 561 562/* ------------------------------ */ 563 .balign 64 564.L_OP_MOVE_RESULT: /* 0x0a */ 565/* File: armv5te/OP_MOVE_RESULT.S */ 566 /* for: move-result, move-result-object */ 567 /* op vAA */ 568 mov r2, rINST, lsr #8 @ r2<- AA 569 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 570 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 571 GET_INST_OPCODE(ip) @ extract opcode from rINST 572 SET_VREG(r0, r2) @ fp[AA]<- r0 573 GOTO_OPCODE(ip) @ jump to next instruction 574 575/* ------------------------------ */ 576 .balign 64 577.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 578/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 579 /* move-result-wide vAA */ 580 mov r2, rINST, lsr #8 @ r2<- AA 581 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 582 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 583 ldmia r3, {r0-r1} @ r0/r1<- retval.j 584 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 585 GET_INST_OPCODE(ip) @ extract opcode from rINST 586 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 587 GOTO_OPCODE(ip) @ jump to next instruction 588 589/* ------------------------------ */ 590 .balign 64 591.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 592/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 593/* File: armv5te/OP_MOVE_RESULT.S */ 594 /* for: move-result, move-result-object */ 595 /* op vAA */ 596 mov r2, rINST, lsr #8 @ r2<- AA 597 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 598 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 599 GET_INST_OPCODE(ip) @ extract opcode from rINST 600 SET_VREG(r0, r2) @ fp[AA]<- r0 601 GOTO_OPCODE(ip) @ jump to next instruction 602 603 604/* ------------------------------ */ 605 .balign 64 606.L_OP_MOVE_EXCEPTION: /* 0x0d */ 607/* File: armv5te/OP_MOVE_EXCEPTION.S */ 608 /* move-exception vAA */ 609 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 610 mov r2, rINST, lsr #8 @ r2<- AA 611 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 612 mov r1, #0 @ r1<- 0 613 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 614 SET_VREG(r3, r2) @ fp[AA]<- exception obj 615 GET_INST_OPCODE(ip) @ extract opcode from rINST 616 str r1, [r0, #offThread_exception] @ dvmClearException bypass 617 GOTO_OPCODE(ip) @ jump to next instruction 618 619/* ------------------------------ */ 620 .balign 64 621.L_OP_RETURN_VOID: /* 0x0e */ 622/* File: armv5te/OP_RETURN_VOID.S */ 623 b common_returnFromMethod 624 625/* ------------------------------ */ 626 .balign 64 627.L_OP_RETURN: /* 0x0f */ 628/* File: armv5te/OP_RETURN.S */ 629 /* 630 * Return a 32-bit value. Copies the return value into the "glue" 631 * structure, then jumps to the return handler. 632 * 633 * for: return, return-object 634 */ 635 /* op vAA */ 636 mov r2, rINST, lsr #8 @ r2<- AA 637 GET_VREG(r0, r2) @ r0<- vAA 638 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 639 b common_returnFromMethod 640 641/* ------------------------------ */ 642 .balign 64 643.L_OP_RETURN_WIDE: /* 0x10 */ 644/* File: armv5te/OP_RETURN_WIDE.S */ 645 /* 646 * Return a 64-bit value. Copies the return value into the "glue" 647 * structure, then jumps to the return handler. 648 */ 649 /* return-wide vAA */ 650 mov r2, rINST, lsr #8 @ r2<- AA 651 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 652 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 653 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 654 stmia r3, {r0-r1} @ retval<- r0/r1 655 b common_returnFromMethod 656 657/* ------------------------------ */ 658 .balign 64 659.L_OP_RETURN_OBJECT: /* 0x11 */ 660/* File: armv5te/OP_RETURN_OBJECT.S */ 661/* File: armv5te/OP_RETURN.S */ 662 /* 663 * Return a 32-bit value. Copies the return value into the "glue" 664 * structure, then jumps to the return handler. 665 * 666 * for: return, return-object 667 */ 668 /* op vAA */ 669 mov r2, rINST, lsr #8 @ r2<- AA 670 GET_VREG(r0, r2) @ r0<- vAA 671 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 672 b common_returnFromMethod 673 674 675/* ------------------------------ */ 676 .balign 64 677.L_OP_CONST_4: /* 0x12 */ 678/* File: armv5te/OP_CONST_4.S */ 679 /* const/4 vA, #+B */ 680 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 681 mov r0, rINST, lsr #8 @ r0<- A+ 682 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 683 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 684 and r0, r0, #15 685 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 686 SET_VREG(r1, r0) @ fp[A]<- r1 687 GOTO_OPCODE(ip) @ execute next instruction 688 689/* ------------------------------ */ 690 .balign 64 691.L_OP_CONST_16: /* 0x13 */ 692/* File: armv5te/OP_CONST_16.S */ 693 /* const/16 vAA, #+BBBB */ 694 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 695 mov r3, rINST, lsr #8 @ r3<- AA 696 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 697 SET_VREG(r0, r3) @ vAA<- r0 698 GET_INST_OPCODE(ip) @ extract opcode from rINST 699 GOTO_OPCODE(ip) @ jump to next instruction 700 701/* ------------------------------ */ 702 .balign 64 703.L_OP_CONST: /* 0x14 */ 704/* File: armv5te/OP_CONST.S */ 705 /* const vAA, #+BBBBbbbb */ 706 mov r3, rINST, lsr #8 @ r3<- AA 707 FETCH(r0, 1) @ r0<- bbbb (low) 708 FETCH(r1, 2) @ r1<- BBBB (high) 709 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 710 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 711 GET_INST_OPCODE(ip) @ extract opcode from rINST 712 SET_VREG(r0, r3) @ vAA<- r0 713 GOTO_OPCODE(ip) @ jump to next instruction 714 715/* ------------------------------ */ 716 .balign 64 717.L_OP_CONST_HIGH16: /* 0x15 */ 718/* File: armv5te/OP_CONST_HIGH16.S */ 719 /* const/high16 vAA, #+BBBB0000 */ 720 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 721 mov r3, rINST, lsr #8 @ r3<- AA 722 mov r0, r0, lsl #16 @ r0<- BBBB0000 723 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 724 SET_VREG(r0, r3) @ vAA<- r0 725 GET_INST_OPCODE(ip) @ extract opcode from rINST 726 GOTO_OPCODE(ip) @ jump to next instruction 727 728/* ------------------------------ */ 729 .balign 64 730.L_OP_CONST_WIDE_16: /* 0x16 */ 731/* File: armv5te/OP_CONST_WIDE_16.S */ 732 /* const-wide/16 vAA, #+BBBB */ 733 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 734 mov r3, rINST, lsr #8 @ r3<- AA 735 mov r1, r0, asr #31 @ r1<- ssssssss 736 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 737 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 738 GET_INST_OPCODE(ip) @ extract opcode from rINST 739 stmia r3, {r0-r1} @ vAA<- r0/r1 740 GOTO_OPCODE(ip) @ jump to next instruction 741 742/* ------------------------------ */ 743 .balign 64 744.L_OP_CONST_WIDE_32: /* 0x17 */ 745/* File: armv5te/OP_CONST_WIDE_32.S */ 746 /* const-wide/32 vAA, #+BBBBbbbb */ 747 FETCH(r0, 1) @ r0<- 0000bbbb (low) 748 mov r3, rINST, lsr #8 @ r3<- AA 749 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 750 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 751 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 752 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 753 mov r1, r0, asr #31 @ r1<- ssssssss 754 GET_INST_OPCODE(ip) @ extract opcode from rINST 755 stmia r3, {r0-r1} @ vAA<- r0/r1 756 GOTO_OPCODE(ip) @ jump to next instruction 757 758/* ------------------------------ */ 759 .balign 64 760.L_OP_CONST_WIDE: /* 0x18 */ 761/* File: armv5te/OP_CONST_WIDE.S */ 762 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 763 FETCH(r0, 1) @ r0<- bbbb (low) 764 FETCH(r1, 2) @ r1<- BBBB (low middle) 765 FETCH(r2, 3) @ r2<- hhhh (high middle) 766 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 767 FETCH(r3, 4) @ r3<- HHHH (high) 768 mov r9, rINST, lsr #8 @ r9<- AA 769 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 770 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 771 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 772 GET_INST_OPCODE(ip) @ extract opcode from rINST 773 stmia r9, {r0-r1} @ vAA<- r0/r1 774 GOTO_OPCODE(ip) @ jump to next instruction 775 776/* ------------------------------ */ 777 .balign 64 778.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 779/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 780 /* const-wide/high16 vAA, #+BBBB000000000000 */ 781 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 782 mov r3, rINST, lsr #8 @ r3<- AA 783 mov r0, #0 @ r0<- 00000000 784 mov r1, r1, lsl #16 @ r1<- BBBB0000 785 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 786 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 787 GET_INST_OPCODE(ip) @ extract opcode from rINST 788 stmia r3, {r0-r1} @ vAA<- r0/r1 789 GOTO_OPCODE(ip) @ jump to next instruction 790 791/* ------------------------------ */ 792 .balign 64 793.L_OP_CONST_STRING: /* 0x1a */ 794/* File: armv5te/OP_CONST_STRING.S */ 795 /* const/string vAA, String@BBBB */ 796 FETCH(r1, 1) @ r1<- BBBB 797 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 798 mov r9, rINST, lsr #8 @ r9<- AA 799 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 800 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 801 cmp r0, #0 @ not yet resolved? 802 beq .LOP_CONST_STRING_resolve 803 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 804 GET_INST_OPCODE(ip) @ extract opcode from rINST 805 SET_VREG(r0, r9) @ vAA<- r0 806 GOTO_OPCODE(ip) @ jump to next instruction 807 808/* ------------------------------ */ 809 .balign 64 810.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 811/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 812 /* const/string vAA, String@BBBBBBBB */ 813 FETCH(r0, 1) @ r0<- bbbb (low) 814 FETCH(r1, 2) @ r1<- BBBB (high) 815 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 816 mov r9, rINST, lsr #8 @ r9<- AA 817 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 818 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 819 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 820 cmp r0, #0 821 beq .LOP_CONST_STRING_JUMBO_resolve 822 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 823 GET_INST_OPCODE(ip) @ extract opcode from rINST 824 SET_VREG(r0, r9) @ vAA<- r0 825 GOTO_OPCODE(ip) @ jump to next instruction 826 827/* ------------------------------ */ 828 .balign 64 829.L_OP_CONST_CLASS: /* 0x1c */ 830/* File: armv5te/OP_CONST_CLASS.S */ 831 /* const/class vAA, Class@BBBB */ 832 FETCH(r1, 1) @ r1<- BBBB 833 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 834 mov r9, rINST, lsr #8 @ r9<- AA 835 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 836 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 837 cmp r0, #0 @ not yet resolved? 838 beq .LOP_CONST_CLASS_resolve 839 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 840 GET_INST_OPCODE(ip) @ extract opcode from rINST 841 SET_VREG(r0, r9) @ vAA<- r0 842 GOTO_OPCODE(ip) @ jump to next instruction 843 844/* ------------------------------ */ 845 .balign 64 846.L_OP_MONITOR_ENTER: /* 0x1d */ 847/* File: armv5te/OP_MONITOR_ENTER.S */ 848 /* 849 * Synchronize on an object. 850 */ 851 /* monitor-enter vAA */ 852 mov r2, rINST, lsr #8 @ r2<- AA 853 GET_VREG(r1, r2) @ r1<- vAA (object) 854 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 855 cmp r1, #0 @ null object? 856 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 857 beq common_errNullObject @ null object, throw an exception 858 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 859 bl dvmLockObject @ call(self, obj) 860#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 861 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 862 ldr r1, [r0, #offThread_exception] @ check for exception 863 cmp r1, #0 864 bne common_exceptionThrown @ exception raised, bail out 865#endif 866 GET_INST_OPCODE(ip) @ extract opcode from rINST 867 GOTO_OPCODE(ip) @ jump to next instruction 868 869/* ------------------------------ */ 870 .balign 64 871.L_OP_MONITOR_EXIT: /* 0x1e */ 872/* File: armv5te/OP_MONITOR_EXIT.S */ 873 /* 874 * Unlock an object. 875 * 876 * Exceptions that occur when unlocking a monitor need to appear as 877 * if they happened at the following instruction. See the Dalvik 878 * instruction spec. 879 */ 880 /* monitor-exit vAA */ 881 mov r2, rINST, lsr #8 @ r2<- AA 882 EXPORT_PC() @ before fetch: export the PC 883 GET_VREG(r1, r2) @ r1<- vAA (object) 884 cmp r1, #0 @ null object? 885 beq 1f @ yes 886 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 887 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 888 cmp r0, #0 @ failed? 889 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 890 beq common_exceptionThrown @ yes, exception is pending 891 GET_INST_OPCODE(ip) @ extract opcode from rINST 892 GOTO_OPCODE(ip) @ jump to next instruction 8931: 894 FETCH_ADVANCE_INST(1) @ advance before throw 895 b common_errNullObject 896 897/* ------------------------------ */ 898 .balign 64 899.L_OP_CHECK_CAST: /* 0x1f */ 900/* File: armv5te/OP_CHECK_CAST.S */ 901 /* 902 * Check to see if a cast from one class to another is allowed. 903 */ 904 /* check-cast vAA, class@BBBB */ 905 mov r3, rINST, lsr #8 @ r3<- AA 906 FETCH(r2, 1) @ r2<- BBBB 907 GET_VREG(r9, r3) @ r9<- object 908 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 909 cmp r9, #0 @ is object null? 910 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 911 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 912 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 913 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 914 cmp r1, #0 @ have we resolved this before? 915 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 916.LOP_CHECK_CAST_resolved: 917 cmp r0, r1 @ same class (trivial success)? 918 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 919.LOP_CHECK_CAST_okay: 920 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 921 GET_INST_OPCODE(ip) @ extract opcode from rINST 922 GOTO_OPCODE(ip) @ jump to next instruction 923 924/* ------------------------------ */ 925 .balign 64 926.L_OP_INSTANCE_OF: /* 0x20 */ 927/* File: armv5te/OP_INSTANCE_OF.S */ 928 /* 929 * Check to see if an object reference is an instance of a class. 930 * 931 * Most common situation is a non-null object, being compared against 932 * an already-resolved class. 933 */ 934 /* instance-of vA, vB, class@CCCC */ 935 mov r3, rINST, lsr #12 @ r3<- B 936 mov r9, rINST, lsr #8 @ r9<- A+ 937 GET_VREG(r0, r3) @ r0<- vB (object) 938 and r9, r9, #15 @ r9<- A 939 cmp r0, #0 @ is object null? 940 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 941 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 942 FETCH(r3, 1) @ r3<- CCCC 943 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 944 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 945 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 946 cmp r1, #0 @ have we resolved this before? 947 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 948.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 949 cmp r0, r1 @ same class (trivial success)? 950 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 951 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 952 953/* ------------------------------ */ 954 .balign 64 955.L_OP_ARRAY_LENGTH: /* 0x21 */ 956/* File: armv5te/OP_ARRAY_LENGTH.S */ 957 /* 958 * Return the length of an array. 959 */ 960 mov r1, rINST, lsr #12 @ r1<- B 961 mov r2, rINST, lsr #8 @ r2<- A+ 962 GET_VREG(r0, r1) @ r0<- vB (object ref) 963 and r2, r2, #15 @ r2<- A 964 cmp r0, #0 @ is object null? 965 beq common_errNullObject @ yup, fail 966 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 967 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 968 GET_INST_OPCODE(ip) @ extract opcode from rINST 969 SET_VREG(r3, r2) @ vB<- length 970 GOTO_OPCODE(ip) @ jump to next instruction 971 972/* ------------------------------ */ 973 .balign 64 974.L_OP_NEW_INSTANCE: /* 0x22 */ 975/* File: armv5te/OP_NEW_INSTANCE.S */ 976 /* 977 * Create a new instance of a class. 978 */ 979 /* new-instance vAA, class@BBBB */ 980 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 981 FETCH(r1, 1) @ r1<- BBBB 982 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 983 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 984 EXPORT_PC() @ req'd for init, resolve, alloc 985 cmp r0, #0 @ already resolved? 986 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 987.LOP_NEW_INSTANCE_resolved: @ r0=class 988 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 989 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 990 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 991.LOP_NEW_INSTANCE_initialized: @ r0=class 992 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 993 bl dvmAllocObject @ r0<- new object 994 b .LOP_NEW_INSTANCE_finish @ continue 995 996/* ------------------------------ */ 997 .balign 64 998.L_OP_NEW_ARRAY: /* 0x23 */ 999/* File: armv5te/OP_NEW_ARRAY.S */ 1000 /* 1001 * Allocate an array of objects, specified with the array class 1002 * and a count. 1003 * 1004 * The verifier guarantees that this is an array class, so we don't 1005 * check for it here. 1006 */ 1007 /* new-array vA, vB, class@CCCC */ 1008 mov r0, rINST, lsr #12 @ r0<- B 1009 FETCH(r2, 1) @ r2<- CCCC 1010 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1011 GET_VREG(r1, r0) @ r1<- vB (array length) 1012 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1013 cmp r1, #0 @ check length 1014 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1015 bmi common_errNegativeArraySize @ negative length, bail 1016 cmp r0, #0 @ already resolved? 1017 EXPORT_PC() @ req'd for resolve, alloc 1018 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1019 b .LOP_NEW_ARRAY_resolve @ do resolve now 1020 1021/* ------------------------------ */ 1022 .balign 64 1023.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1024/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1025 /* 1026 * Create a new array with elements filled from registers. 1027 * 1028 * for: filled-new-array, filled-new-array/range 1029 */ 1030 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1031 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1032 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1033 FETCH(r1, 1) @ r1<- BBBB 1034 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1035 EXPORT_PC() @ need for resolve and alloc 1036 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1037 mov r10, rINST, lsr #8 @ r10<- AA or BA 1038 cmp r0, #0 @ already resolved? 1039 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10408: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1041 mov r2, #0 @ r2<- false 1042 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1043 bl dvmResolveClass @ r0<- call(clazz, ref) 1044 cmp r0, #0 @ got null? 1045 beq common_exceptionThrown @ yes, handle exception 1046 b .LOP_FILLED_NEW_ARRAY_continue 1047 1048/* ------------------------------ */ 1049 .balign 64 1050.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1051/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1052/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1053 /* 1054 * Create a new array with elements filled from registers. 1055 * 1056 * for: filled-new-array, filled-new-array/range 1057 */ 1058 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1059 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1060 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1061 FETCH(r1, 1) @ r1<- BBBB 1062 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1063 EXPORT_PC() @ need for resolve and alloc 1064 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1065 mov r10, rINST, lsr #8 @ r10<- AA or BA 1066 cmp r0, #0 @ already resolved? 1067 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10688: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1069 mov r2, #0 @ r2<- false 1070 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1071 bl dvmResolveClass @ r0<- call(clazz, ref) 1072 cmp r0, #0 @ got null? 1073 beq common_exceptionThrown @ yes, handle exception 1074 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1075 1076 1077/* ------------------------------ */ 1078 .balign 64 1079.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1080/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1081 /* fill-array-data vAA, +BBBBBBBB */ 1082 FETCH(r0, 1) @ r0<- bbbb (lo) 1083 FETCH(r1, 2) @ r1<- BBBB (hi) 1084 mov r3, rINST, lsr #8 @ r3<- AA 1085 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1086 GET_VREG(r0, r3) @ r0<- vAA (array object) 1087 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1088 EXPORT_PC(); 1089 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1090 cmp r0, #0 @ 0 means an exception is thrown 1091 beq common_exceptionThrown @ has exception 1092 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1093 GET_INST_OPCODE(ip) @ extract opcode from rINST 1094 GOTO_OPCODE(ip) @ jump to next instruction 1095 1096/* ------------------------------ */ 1097 .balign 64 1098.L_OP_THROW: /* 0x27 */ 1099/* File: armv5te/OP_THROW.S */ 1100 /* 1101 * Throw an exception object in the current thread. 1102 */ 1103 /* throw vAA */ 1104 mov r2, rINST, lsr #8 @ r2<- AA 1105 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1106 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1107 EXPORT_PC() @ exception handler can throw 1108 cmp r1, #0 @ null object? 1109 beq common_errNullObject @ yes, throw an NPE instead 1110 @ bypass dvmSetException, just store it 1111 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1112 b common_exceptionThrown 1113 1114/* ------------------------------ */ 1115 .balign 64 1116.L_OP_GOTO: /* 0x28 */ 1117/* File: armv5te/OP_GOTO.S */ 1118 /* 1119 * Unconditional branch, 8-bit offset. 1120 * 1121 * The branch distance is a signed code-unit offset, which we need to 1122 * double to get a byte offset. 1123 */ 1124 /* goto +AA */ 1125 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1126 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1127 mov r9, r9, lsl #1 @ r9<- byte offset 1128 bmi common_backwardBranch @ backward branch, do periodic checks 1129#if defined(WITH_JIT) 1130 GET_JIT_PROF_TABLE(r0) 1131 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1132 cmp r0,#0 1133 bne common_updateProfile 1134 GET_INST_OPCODE(ip) @ extract opcode from rINST 1135 GOTO_OPCODE(ip) @ jump to next instruction 1136#else 1137 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1138 GET_INST_OPCODE(ip) @ extract opcode from rINST 1139 GOTO_OPCODE(ip) @ jump to next instruction 1140#endif 1141 1142/* ------------------------------ */ 1143 .balign 64 1144.L_OP_GOTO_16: /* 0x29 */ 1145/* File: armv5te/OP_GOTO_16.S */ 1146 /* 1147 * Unconditional branch, 16-bit offset. 1148 * 1149 * The branch distance is a signed code-unit offset, which we need to 1150 * double to get a byte offset. 1151 */ 1152 /* goto/16 +AAAA */ 1153 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1154 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1155 bmi common_backwardBranch @ backward branch, do periodic checks 1156#if defined(WITH_JIT) 1157 GET_JIT_PROF_TABLE(r0) 1158 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1159 cmp r0,#0 1160 bne common_updateProfile 1161 GET_INST_OPCODE(ip) @ extract opcode from rINST 1162 GOTO_OPCODE(ip) @ jump to next instruction 1163#else 1164 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1165 GET_INST_OPCODE(ip) @ extract opcode from rINST 1166 GOTO_OPCODE(ip) @ jump to next instruction 1167#endif 1168 1169/* ------------------------------ */ 1170 .balign 64 1171.L_OP_GOTO_32: /* 0x2a */ 1172/* File: armv5te/OP_GOTO_32.S */ 1173 /* 1174 * Unconditional branch, 32-bit offset. 1175 * 1176 * The branch distance is a signed code-unit offset, which we need to 1177 * double to get a byte offset. 1178 * 1179 * Unlike most opcodes, this one is allowed to branch to itself, so 1180 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1181 * instruction doesn't affect the V flag, so we need to clear it 1182 * explicitly. 1183 */ 1184 /* goto/32 +AAAAAAAA */ 1185 FETCH(r0, 1) @ r0<- aaaa (lo) 1186 FETCH(r1, 2) @ r1<- AAAA (hi) 1187 cmp ip, ip @ (clear V flag during stall) 1188 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1189 mov r9, r0, asl #1 @ r9<- byte offset 1190 ble common_backwardBranch @ backward branch, do periodic checks 1191#if defined(WITH_JIT) 1192 GET_JIT_PROF_TABLE(r0) 1193 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1194 cmp r0,#0 1195 bne common_updateProfile 1196 GET_INST_OPCODE(ip) @ extract opcode from rINST 1197 GOTO_OPCODE(ip) @ jump to next instruction 1198#else 1199 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1200 GET_INST_OPCODE(ip) @ extract opcode from rINST 1201 GOTO_OPCODE(ip) @ jump to next instruction 1202#endif 1203 1204/* ------------------------------ */ 1205 .balign 64 1206.L_OP_PACKED_SWITCH: /* 0x2b */ 1207/* File: armv5te/OP_PACKED_SWITCH.S */ 1208 /* 1209 * Handle a packed-switch or sparse-switch instruction. In both cases 1210 * we decode it and hand it off to a helper function. 1211 * 1212 * We don't really expect backward branches in a switch statement, but 1213 * they're perfectly legal, so we check for them here. 1214 * 1215 * for: packed-switch, sparse-switch 1216 */ 1217 /* op vAA, +BBBB */ 1218 FETCH(r0, 1) @ r0<- bbbb (lo) 1219 FETCH(r1, 2) @ r1<- BBBB (hi) 1220 mov r3, rINST, lsr #8 @ r3<- AA 1221 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1222 GET_VREG(r1, r3) @ r1<- vAA 1223 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1224 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1225 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1226 bmi common_backwardBranch @ backward branch, do periodic checks 1227 beq common_backwardBranch @ (want to use BLE but V is unknown) 1228#if defined(WITH_JIT) 1229 GET_JIT_PROF_TABLE(r0) 1230 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1231 cmp r0,#0 1232 bne common_updateProfile 1233 GET_INST_OPCODE(ip) @ extract opcode from rINST 1234 GOTO_OPCODE(ip) @ jump to next instruction 1235#else 1236 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1237 GET_INST_OPCODE(ip) @ extract opcode from rINST 1238 GOTO_OPCODE(ip) @ jump to next instruction 1239#endif 1240 1241/* ------------------------------ */ 1242 .balign 64 1243.L_OP_SPARSE_SWITCH: /* 0x2c */ 1244/* File: armv5te/OP_SPARSE_SWITCH.S */ 1245/* File: armv5te/OP_PACKED_SWITCH.S */ 1246 /* 1247 * Handle a packed-switch or sparse-switch instruction. In both cases 1248 * we decode it and hand it off to a helper function. 1249 * 1250 * We don't really expect backward branches in a switch statement, but 1251 * they're perfectly legal, so we check for them here. 1252 * 1253 * for: packed-switch, sparse-switch 1254 */ 1255 /* op vAA, +BBBB */ 1256 FETCH(r0, 1) @ r0<- bbbb (lo) 1257 FETCH(r1, 2) @ r1<- BBBB (hi) 1258 mov r3, rINST, lsr #8 @ r3<- AA 1259 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1260 GET_VREG(r1, r3) @ r1<- vAA 1261 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1262 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1263 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1264 bmi common_backwardBranch @ backward branch, do periodic checks 1265 beq common_backwardBranch @ (want to use BLE but V is unknown) 1266#if defined(WITH_JIT) 1267 GET_JIT_PROF_TABLE(r0) 1268 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1269 cmp r0,#0 1270 bne common_updateProfile 1271 GET_INST_OPCODE(ip) @ extract opcode from rINST 1272 GOTO_OPCODE(ip) @ jump to next instruction 1273#else 1274 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1275 GET_INST_OPCODE(ip) @ extract opcode from rINST 1276 GOTO_OPCODE(ip) @ jump to next instruction 1277#endif 1278 1279 1280/* ------------------------------ */ 1281 .balign 64 1282.L_OP_CMPL_FLOAT: /* 0x2d */ 1283/* File: armv5te/OP_CMPL_FLOAT.S */ 1284 /* 1285 * Compare two floating-point values. Puts 0, 1, or -1 into the 1286 * destination register based on the results of the comparison. 1287 * 1288 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1289 * on what value we'd like to return when one of the operands is NaN. 1290 * 1291 * The operation we're implementing is: 1292 * if (x == y) 1293 * return 0; 1294 * else if (x < y) 1295 * return -1; 1296 * else if (x > y) 1297 * return 1; 1298 * else 1299 * return {-1,1}; // one or both operands was NaN 1300 * 1301 * The straightforward implementation requires 3 calls to functions 1302 * that return a result in r0. We can do it with two calls if our 1303 * EABI library supports __aeabi_cfcmple (only one if we want to check 1304 * for NaN directly): 1305 * check x <= y 1306 * if <, return -1 1307 * if ==, return 0 1308 * check y <= x 1309 * if <, return 1 1310 * return {-1,1} 1311 * 1312 * for: cmpl-float, cmpg-float 1313 */ 1314 /* op vAA, vBB, vCC */ 1315 FETCH(r0, 1) @ r0<- CCBB 1316 and r2, r0, #255 @ r2<- BB 1317 mov r3, r0, lsr #8 @ r3<- CC 1318 GET_VREG(r9, r2) @ r9<- vBB 1319 GET_VREG(r10, r3) @ r10<- vCC 1320 mov r0, r9 @ copy to arg registers 1321 mov r1, r10 1322 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1323 bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1324 mvncc r1, #0 @ (less than) r1<- -1 1325 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1326.LOP_CMPL_FLOAT_finish: 1327 mov r3, rINST, lsr #8 @ r3<- AA 1328 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1329 SET_VREG(r1, r3) @ vAA<- r1 1330 GET_INST_OPCODE(ip) @ extract opcode from rINST 1331 GOTO_OPCODE(ip) @ jump to next instruction 1332 1333/* ------------------------------ */ 1334 .balign 64 1335.L_OP_CMPG_FLOAT: /* 0x2e */ 1336/* File: armv5te/OP_CMPG_FLOAT.S */ 1337/* File: armv5te/OP_CMPL_FLOAT.S */ 1338 /* 1339 * Compare two floating-point values. Puts 0, 1, or -1 into the 1340 * destination register based on the results of the comparison. 1341 * 1342 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1343 * on what value we'd like to return when one of the operands is NaN. 1344 * 1345 * The operation we're implementing is: 1346 * if (x == y) 1347 * return 0; 1348 * else if (x < y) 1349 * return -1; 1350 * else if (x > y) 1351 * return 1; 1352 * else 1353 * return {-1,1}; // one or both operands was NaN 1354 * 1355 * The straightforward implementation requires 3 calls to functions 1356 * that return a result in r0. We can do it with two calls if our 1357 * EABI library supports __aeabi_cfcmple (only one if we want to check 1358 * for NaN directly): 1359 * check x <= y 1360 * if <, return -1 1361 * if ==, return 0 1362 * check y <= x 1363 * if <, return 1 1364 * return {-1,1} 1365 * 1366 * for: cmpl-float, cmpg-float 1367 */ 1368 /* op vAA, vBB, vCC */ 1369 FETCH(r0, 1) @ r0<- CCBB 1370 and r2, r0, #255 @ r2<- BB 1371 mov r3, r0, lsr #8 @ r3<- CC 1372 GET_VREG(r9, r2) @ r9<- vBB 1373 GET_VREG(r10, r3) @ r10<- vCC 1374 mov r0, r9 @ copy to arg registers 1375 mov r1, r10 1376 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1377 bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1378 mvncc r1, #0 @ (less than) r1<- -1 1379 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1380.LOP_CMPG_FLOAT_finish: 1381 mov r3, rINST, lsr #8 @ r3<- AA 1382 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1383 SET_VREG(r1, r3) @ vAA<- r1 1384 GET_INST_OPCODE(ip) @ extract opcode from rINST 1385 GOTO_OPCODE(ip) @ jump to next instruction 1386 1387 1388/* ------------------------------ */ 1389 .balign 64 1390.L_OP_CMPL_DOUBLE: /* 0x2f */ 1391/* File: armv5te/OP_CMPL_DOUBLE.S */ 1392 /* 1393 * Compare two floating-point values. Puts 0, 1, or -1 into the 1394 * destination register based on the results of the comparison. 1395 * 1396 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1397 * on what value we'd like to return when one of the operands is NaN. 1398 * 1399 * See OP_CMPL_FLOAT for an explanation. 1400 * 1401 * For: cmpl-double, cmpg-double 1402 */ 1403 /* op vAA, vBB, vCC */ 1404 FETCH(r0, 1) @ r0<- CCBB 1405 and r9, r0, #255 @ r9<- BB 1406 mov r10, r0, lsr #8 @ r10<- CC 1407 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1408 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1409 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1410 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1411 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1412 bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1413 mvncc r1, #0 @ (less than) r1<- -1 1414 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1415.LOP_CMPL_DOUBLE_finish: 1416 mov r3, rINST, lsr #8 @ r3<- AA 1417 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1418 SET_VREG(r1, r3) @ vAA<- r1 1419 GET_INST_OPCODE(ip) @ extract opcode from rINST 1420 GOTO_OPCODE(ip) @ jump to next instruction 1421 1422/* ------------------------------ */ 1423 .balign 64 1424.L_OP_CMPG_DOUBLE: /* 0x30 */ 1425/* File: armv5te/OP_CMPG_DOUBLE.S */ 1426/* File: armv5te/OP_CMPL_DOUBLE.S */ 1427 /* 1428 * Compare two floating-point values. Puts 0, 1, or -1 into the 1429 * destination register based on the results of the comparison. 1430 * 1431 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1432 * on what value we'd like to return when one of the operands is NaN. 1433 * 1434 * See OP_CMPL_FLOAT for an explanation. 1435 * 1436 * For: cmpl-double, cmpg-double 1437 */ 1438 /* op vAA, vBB, vCC */ 1439 FETCH(r0, 1) @ r0<- CCBB 1440 and r9, r0, #255 @ r9<- BB 1441 mov r10, r0, lsr #8 @ r10<- CC 1442 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1443 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1444 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1445 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1446 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1447 bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1448 mvncc r1, #0 @ (less than) r1<- -1 1449 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1450.LOP_CMPG_DOUBLE_finish: 1451 mov r3, rINST, lsr #8 @ r3<- AA 1452 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1453 SET_VREG(r1, r3) @ vAA<- r1 1454 GET_INST_OPCODE(ip) @ extract opcode from rINST 1455 GOTO_OPCODE(ip) @ jump to next instruction 1456 1457 1458/* ------------------------------ */ 1459 .balign 64 1460.L_OP_CMP_LONG: /* 0x31 */ 1461/* File: armv5te/OP_CMP_LONG.S */ 1462 /* 1463 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1464 * register based on the results of the comparison. 1465 * 1466 * We load the full values with LDM, but in practice many values could 1467 * be resolved by only looking at the high word. This could be made 1468 * faster or slower by splitting the LDM into a pair of LDRs. 1469 * 1470 * If we just wanted to set condition flags, we could do this: 1471 * subs ip, r0, r2 1472 * sbcs ip, r1, r3 1473 * subeqs ip, r0, r2 1474 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1475 * integer value, which we can do with 2 conditional mov/mvn instructions 1476 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1477 * us a constant 5-cycle path plus a branch at the end to the 1478 * instruction epilogue code. The multi-compare approach below needs 1479 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1480 * in the worst case (the 64-bit values are equal). 1481 */ 1482 /* cmp-long vAA, vBB, vCC */ 1483 FETCH(r0, 1) @ r0<- CCBB 1484 mov r9, rINST, lsr #8 @ r9<- AA 1485 and r2, r0, #255 @ r2<- BB 1486 mov r3, r0, lsr #8 @ r3<- CC 1487 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1488 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1489 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1490 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1491 cmp r1, r3 @ compare (vBB+1, vCC+1) 1492 blt .LOP_CMP_LONG_less @ signed compare on high part 1493 bgt .LOP_CMP_LONG_greater 1494 subs r1, r0, r2 @ r1<- r0 - r2 1495 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1496 bne .LOP_CMP_LONG_less 1497 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1498 1499/* ------------------------------ */ 1500 .balign 64 1501.L_OP_IF_EQ: /* 0x32 */ 1502/* File: armv5te/OP_IF_EQ.S */ 1503/* File: armv5te/bincmp.S */ 1504 /* 1505 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1506 * fragment that specifies the *reverse* comparison to perform, e.g. 1507 * for "if-le" you would use "gt". 1508 * 1509 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1510 */ 1511 /* if-cmp vA, vB, +CCCC */ 1512 mov r0, rINST, lsr #8 @ r0<- A+ 1513 mov r1, rINST, lsr #12 @ r1<- B 1514 and r0, r0, #15 1515 GET_VREG(r3, r1) @ r3<- vB 1516 GET_VREG(r2, r0) @ r2<- vA 1517 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1518 cmp r2, r3 @ compare (vA, vB) 1519 bne 1f @ branch to 1 if comparison failed 1520 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1521 movs r9, r9, asl #1 @ convert to bytes, check sign 1522 bmi common_backwardBranch @ yes, do periodic checks 15231: 1524#if defined(WITH_JIT) 1525 GET_JIT_PROF_TABLE(r0) 1526 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1527 b common_testUpdateProfile 1528#else 1529 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1530 GET_INST_OPCODE(ip) @ extract opcode from rINST 1531 GOTO_OPCODE(ip) @ jump to next instruction 1532#endif 1533 1534 1535/* ------------------------------ */ 1536 .balign 64 1537.L_OP_IF_NE: /* 0x33 */ 1538/* File: armv5te/OP_IF_NE.S */ 1539/* File: armv5te/bincmp.S */ 1540 /* 1541 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1542 * fragment that specifies the *reverse* comparison to perform, e.g. 1543 * for "if-le" you would use "gt". 1544 * 1545 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1546 */ 1547 /* if-cmp vA, vB, +CCCC */ 1548 mov r0, rINST, lsr #8 @ r0<- A+ 1549 mov r1, rINST, lsr #12 @ r1<- B 1550 and r0, r0, #15 1551 GET_VREG(r3, r1) @ r3<- vB 1552 GET_VREG(r2, r0) @ r2<- vA 1553 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1554 cmp r2, r3 @ compare (vA, vB) 1555 beq 1f @ branch to 1 if comparison failed 1556 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1557 movs r9, r9, asl #1 @ convert to bytes, check sign 1558 bmi common_backwardBranch @ yes, do periodic checks 15591: 1560#if defined(WITH_JIT) 1561 GET_JIT_PROF_TABLE(r0) 1562 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1563 b common_testUpdateProfile 1564#else 1565 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1566 GET_INST_OPCODE(ip) @ extract opcode from rINST 1567 GOTO_OPCODE(ip) @ jump to next instruction 1568#endif 1569 1570 1571/* ------------------------------ */ 1572 .balign 64 1573.L_OP_IF_LT: /* 0x34 */ 1574/* File: armv5te/OP_IF_LT.S */ 1575/* File: armv5te/bincmp.S */ 1576 /* 1577 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1578 * fragment that specifies the *reverse* comparison to perform, e.g. 1579 * for "if-le" you would use "gt". 1580 * 1581 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1582 */ 1583 /* if-cmp vA, vB, +CCCC */ 1584 mov r0, rINST, lsr #8 @ r0<- A+ 1585 mov r1, rINST, lsr #12 @ r1<- B 1586 and r0, r0, #15 1587 GET_VREG(r3, r1) @ r3<- vB 1588 GET_VREG(r2, r0) @ r2<- vA 1589 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1590 cmp r2, r3 @ compare (vA, vB) 1591 bge 1f @ branch to 1 if comparison failed 1592 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1593 movs r9, r9, asl #1 @ convert to bytes, check sign 1594 bmi common_backwardBranch @ yes, do periodic checks 15951: 1596#if defined(WITH_JIT) 1597 GET_JIT_PROF_TABLE(r0) 1598 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1599 b common_testUpdateProfile 1600#else 1601 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1602 GET_INST_OPCODE(ip) @ extract opcode from rINST 1603 GOTO_OPCODE(ip) @ jump to next instruction 1604#endif 1605 1606 1607/* ------------------------------ */ 1608 .balign 64 1609.L_OP_IF_GE: /* 0x35 */ 1610/* File: armv5te/OP_IF_GE.S */ 1611/* File: armv5te/bincmp.S */ 1612 /* 1613 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1614 * fragment that specifies the *reverse* comparison to perform, e.g. 1615 * for "if-le" you would use "gt". 1616 * 1617 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1618 */ 1619 /* if-cmp vA, vB, +CCCC */ 1620 mov r0, rINST, lsr #8 @ r0<- A+ 1621 mov r1, rINST, lsr #12 @ r1<- B 1622 and r0, r0, #15 1623 GET_VREG(r3, r1) @ r3<- vB 1624 GET_VREG(r2, r0) @ r2<- vA 1625 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1626 cmp r2, r3 @ compare (vA, vB) 1627 blt 1f @ branch to 1 if comparison failed 1628 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1629 movs r9, r9, asl #1 @ convert to bytes, check sign 1630 bmi common_backwardBranch @ yes, do periodic checks 16311: 1632#if defined(WITH_JIT) 1633 GET_JIT_PROF_TABLE(r0) 1634 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1635 b common_testUpdateProfile 1636#else 1637 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1638 GET_INST_OPCODE(ip) @ extract opcode from rINST 1639 GOTO_OPCODE(ip) @ jump to next instruction 1640#endif 1641 1642 1643/* ------------------------------ */ 1644 .balign 64 1645.L_OP_IF_GT: /* 0x36 */ 1646/* File: armv5te/OP_IF_GT.S */ 1647/* File: armv5te/bincmp.S */ 1648 /* 1649 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1650 * fragment that specifies the *reverse* comparison to perform, e.g. 1651 * for "if-le" you would use "gt". 1652 * 1653 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1654 */ 1655 /* if-cmp vA, vB, +CCCC */ 1656 mov r0, rINST, lsr #8 @ r0<- A+ 1657 mov r1, rINST, lsr #12 @ r1<- B 1658 and r0, r0, #15 1659 GET_VREG(r3, r1) @ r3<- vB 1660 GET_VREG(r2, r0) @ r2<- vA 1661 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1662 cmp r2, r3 @ compare (vA, vB) 1663 ble 1f @ branch to 1 if comparison failed 1664 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1665 movs r9, r9, asl #1 @ convert to bytes, check sign 1666 bmi common_backwardBranch @ yes, do periodic checks 16671: 1668#if defined(WITH_JIT) 1669 GET_JIT_PROF_TABLE(r0) 1670 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1671 b common_testUpdateProfile 1672#else 1673 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1674 GET_INST_OPCODE(ip) @ extract opcode from rINST 1675 GOTO_OPCODE(ip) @ jump to next instruction 1676#endif 1677 1678 1679/* ------------------------------ */ 1680 .balign 64 1681.L_OP_IF_LE: /* 0x37 */ 1682/* File: armv5te/OP_IF_LE.S */ 1683/* File: armv5te/bincmp.S */ 1684 /* 1685 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1686 * fragment that specifies the *reverse* comparison to perform, e.g. 1687 * for "if-le" you would use "gt". 1688 * 1689 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1690 */ 1691 /* if-cmp vA, vB, +CCCC */ 1692 mov r0, rINST, lsr #8 @ r0<- A+ 1693 mov r1, rINST, lsr #12 @ r1<- B 1694 and r0, r0, #15 1695 GET_VREG(r3, r1) @ r3<- vB 1696 GET_VREG(r2, r0) @ r2<- vA 1697 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1698 cmp r2, r3 @ compare (vA, vB) 1699 bgt 1f @ branch to 1 if comparison failed 1700 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1701 movs r9, r9, asl #1 @ convert to bytes, check sign 1702 bmi common_backwardBranch @ yes, do periodic checks 17031: 1704#if defined(WITH_JIT) 1705 GET_JIT_PROF_TABLE(r0) 1706 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1707 b common_testUpdateProfile 1708#else 1709 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1710 GET_INST_OPCODE(ip) @ extract opcode from rINST 1711 GOTO_OPCODE(ip) @ jump to next instruction 1712#endif 1713 1714 1715/* ------------------------------ */ 1716 .balign 64 1717.L_OP_IF_EQZ: /* 0x38 */ 1718/* File: armv5te/OP_IF_EQZ.S */ 1719/* File: armv5te/zcmp.S */ 1720 /* 1721 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1722 * fragment that specifies the *reverse* comparison to perform, e.g. 1723 * for "if-le" you would use "gt". 1724 * 1725 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1726 */ 1727 /* if-cmp vAA, +BBBB */ 1728 mov r0, rINST, lsr #8 @ r0<- AA 1729 GET_VREG(r2, r0) @ r2<- vAA 1730 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1731 cmp r2, #0 @ compare (vA, 0) 1732 bne 1f @ branch to 1 if comparison failed 1733 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1734 movs r9, r9, asl #1 @ convert to bytes, check sign 1735 bmi common_backwardBranch @ backward branch, do periodic checks 17361: 1737#if defined(WITH_JIT) 1738 GET_JIT_PROF_TABLE(r0) 1739 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1740 cmp r0,#0 1741 bne common_updateProfile 1742 GET_INST_OPCODE(ip) @ extract opcode from rINST 1743 GOTO_OPCODE(ip) @ jump to next instruction 1744#else 1745 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1746 GET_INST_OPCODE(ip) @ extract opcode from rINST 1747 GOTO_OPCODE(ip) @ jump to next instruction 1748#endif 1749 1750 1751/* ------------------------------ */ 1752 .balign 64 1753.L_OP_IF_NEZ: /* 0x39 */ 1754/* File: armv5te/OP_IF_NEZ.S */ 1755/* File: armv5te/zcmp.S */ 1756 /* 1757 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1758 * fragment that specifies the *reverse* comparison to perform, e.g. 1759 * for "if-le" you would use "gt". 1760 * 1761 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1762 */ 1763 /* if-cmp vAA, +BBBB */ 1764 mov r0, rINST, lsr #8 @ r0<- AA 1765 GET_VREG(r2, r0) @ r2<- vAA 1766 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1767 cmp r2, #0 @ compare (vA, 0) 1768 beq 1f @ branch to 1 if comparison failed 1769 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1770 movs r9, r9, asl #1 @ convert to bytes, check sign 1771 bmi common_backwardBranch @ backward branch, do periodic checks 17721: 1773#if defined(WITH_JIT) 1774 GET_JIT_PROF_TABLE(r0) 1775 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1776 cmp r0,#0 1777 bne common_updateProfile 1778 GET_INST_OPCODE(ip) @ extract opcode from rINST 1779 GOTO_OPCODE(ip) @ jump to next instruction 1780#else 1781 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1782 GET_INST_OPCODE(ip) @ extract opcode from rINST 1783 GOTO_OPCODE(ip) @ jump to next instruction 1784#endif 1785 1786 1787/* ------------------------------ */ 1788 .balign 64 1789.L_OP_IF_LTZ: /* 0x3a */ 1790/* File: armv5te/OP_IF_LTZ.S */ 1791/* File: armv5te/zcmp.S */ 1792 /* 1793 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1794 * fragment that specifies the *reverse* comparison to perform, e.g. 1795 * for "if-le" you would use "gt". 1796 * 1797 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1798 */ 1799 /* if-cmp vAA, +BBBB */ 1800 mov r0, rINST, lsr #8 @ r0<- AA 1801 GET_VREG(r2, r0) @ r2<- vAA 1802 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1803 cmp r2, #0 @ compare (vA, 0) 1804 bge 1f @ branch to 1 if comparison failed 1805 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1806 movs r9, r9, asl #1 @ convert to bytes, check sign 1807 bmi common_backwardBranch @ backward branch, do periodic checks 18081: 1809#if defined(WITH_JIT) 1810 GET_JIT_PROF_TABLE(r0) 1811 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1812 cmp r0,#0 1813 bne common_updateProfile 1814 GET_INST_OPCODE(ip) @ extract opcode from rINST 1815 GOTO_OPCODE(ip) @ jump to next instruction 1816#else 1817 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1818 GET_INST_OPCODE(ip) @ extract opcode from rINST 1819 GOTO_OPCODE(ip) @ jump to next instruction 1820#endif 1821 1822 1823/* ------------------------------ */ 1824 .balign 64 1825.L_OP_IF_GEZ: /* 0x3b */ 1826/* File: armv5te/OP_IF_GEZ.S */ 1827/* File: armv5te/zcmp.S */ 1828 /* 1829 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1830 * fragment that specifies the *reverse* comparison to perform, e.g. 1831 * for "if-le" you would use "gt". 1832 * 1833 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1834 */ 1835 /* if-cmp vAA, +BBBB */ 1836 mov r0, rINST, lsr #8 @ r0<- AA 1837 GET_VREG(r2, r0) @ r2<- vAA 1838 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1839 cmp r2, #0 @ compare (vA, 0) 1840 blt 1f @ branch to 1 if comparison failed 1841 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1842 movs r9, r9, asl #1 @ convert to bytes, check sign 1843 bmi common_backwardBranch @ backward branch, do periodic checks 18441: 1845#if defined(WITH_JIT) 1846 GET_JIT_PROF_TABLE(r0) 1847 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1848 cmp r0,#0 1849 bne common_updateProfile 1850 GET_INST_OPCODE(ip) @ extract opcode from rINST 1851 GOTO_OPCODE(ip) @ jump to next instruction 1852#else 1853 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1854 GET_INST_OPCODE(ip) @ extract opcode from rINST 1855 GOTO_OPCODE(ip) @ jump to next instruction 1856#endif 1857 1858 1859/* ------------------------------ */ 1860 .balign 64 1861.L_OP_IF_GTZ: /* 0x3c */ 1862/* File: armv5te/OP_IF_GTZ.S */ 1863/* File: armv5te/zcmp.S */ 1864 /* 1865 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1866 * fragment that specifies the *reverse* comparison to perform, e.g. 1867 * for "if-le" you would use "gt". 1868 * 1869 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1870 */ 1871 /* if-cmp vAA, +BBBB */ 1872 mov r0, rINST, lsr #8 @ r0<- AA 1873 GET_VREG(r2, r0) @ r2<- vAA 1874 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1875 cmp r2, #0 @ compare (vA, 0) 1876 ble 1f @ branch to 1 if comparison failed 1877 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1878 movs r9, r9, asl #1 @ convert to bytes, check sign 1879 bmi common_backwardBranch @ backward branch, do periodic checks 18801: 1881#if defined(WITH_JIT) 1882 GET_JIT_PROF_TABLE(r0) 1883 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1884 cmp r0,#0 1885 bne common_updateProfile 1886 GET_INST_OPCODE(ip) @ extract opcode from rINST 1887 GOTO_OPCODE(ip) @ jump to next instruction 1888#else 1889 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1890 GET_INST_OPCODE(ip) @ extract opcode from rINST 1891 GOTO_OPCODE(ip) @ jump to next instruction 1892#endif 1893 1894 1895/* ------------------------------ */ 1896 .balign 64 1897.L_OP_IF_LEZ: /* 0x3d */ 1898/* File: armv5te/OP_IF_LEZ.S */ 1899/* File: armv5te/zcmp.S */ 1900 /* 1901 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1902 * fragment that specifies the *reverse* comparison to perform, e.g. 1903 * for "if-le" you would use "gt". 1904 * 1905 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1906 */ 1907 /* if-cmp vAA, +BBBB */ 1908 mov r0, rINST, lsr #8 @ r0<- AA 1909 GET_VREG(r2, r0) @ r2<- vAA 1910 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1911 cmp r2, #0 @ compare (vA, 0) 1912 bgt 1f @ branch to 1 if comparison failed 1913 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1914 movs r9, r9, asl #1 @ convert to bytes, check sign 1915 bmi common_backwardBranch @ backward branch, do periodic checks 19161: 1917#if defined(WITH_JIT) 1918 GET_JIT_PROF_TABLE(r0) 1919 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1920 cmp r0,#0 1921 bne common_updateProfile 1922 GET_INST_OPCODE(ip) @ extract opcode from rINST 1923 GOTO_OPCODE(ip) @ jump to next instruction 1924#else 1925 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1926 GET_INST_OPCODE(ip) @ extract opcode from rINST 1927 GOTO_OPCODE(ip) @ jump to next instruction 1928#endif 1929 1930 1931/* ------------------------------ */ 1932 .balign 64 1933.L_OP_UNUSED_3E: /* 0x3e */ 1934/* File: armv5te/OP_UNUSED_3E.S */ 1935/* File: armv5te/unused.S */ 1936 bl common_abort 1937 1938 1939/* ------------------------------ */ 1940 .balign 64 1941.L_OP_UNUSED_3F: /* 0x3f */ 1942/* File: armv5te/OP_UNUSED_3F.S */ 1943/* File: armv5te/unused.S */ 1944 bl common_abort 1945 1946 1947/* ------------------------------ */ 1948 .balign 64 1949.L_OP_UNUSED_40: /* 0x40 */ 1950/* File: armv5te/OP_UNUSED_40.S */ 1951/* File: armv5te/unused.S */ 1952 bl common_abort 1953 1954 1955/* ------------------------------ */ 1956 .balign 64 1957.L_OP_UNUSED_41: /* 0x41 */ 1958/* File: armv5te/OP_UNUSED_41.S */ 1959/* File: armv5te/unused.S */ 1960 bl common_abort 1961 1962 1963/* ------------------------------ */ 1964 .balign 64 1965.L_OP_UNUSED_42: /* 0x42 */ 1966/* File: armv5te/OP_UNUSED_42.S */ 1967/* File: armv5te/unused.S */ 1968 bl common_abort 1969 1970 1971/* ------------------------------ */ 1972 .balign 64 1973.L_OP_UNUSED_43: /* 0x43 */ 1974/* File: armv5te/OP_UNUSED_43.S */ 1975/* File: armv5te/unused.S */ 1976 bl common_abort 1977 1978 1979/* ------------------------------ */ 1980 .balign 64 1981.L_OP_AGET: /* 0x44 */ 1982/* File: armv5te/OP_AGET.S */ 1983 /* 1984 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1985 * 1986 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1987 * instructions. We use a pair of FETCH_Bs instead. 1988 * 1989 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 1990 */ 1991 /* op vAA, vBB, vCC */ 1992 FETCH_B(r2, 1, 0) @ r2<- BB 1993 mov r9, rINST, lsr #8 @ r9<- AA 1994 FETCH_B(r3, 1, 1) @ r3<- CC 1995 GET_VREG(r0, r2) @ r0<- vBB (array object) 1996 GET_VREG(r1, r3) @ r1<- vCC (requested index) 1997 cmp r0, #0 @ null array object? 1998 beq common_errNullObject @ yes, bail 1999 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2000 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2001 cmp r1, r3 @ compare unsigned index, length 2002 bcs common_errArrayIndex @ index >= length, bail 2003 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2004 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2005 GET_INST_OPCODE(ip) @ extract opcode from rINST 2006 SET_VREG(r2, r9) @ vAA<- r2 2007 GOTO_OPCODE(ip) @ jump to next instruction 2008 2009/* ------------------------------ */ 2010 .balign 64 2011.L_OP_AGET_WIDE: /* 0x45 */ 2012/* File: armv5te/OP_AGET_WIDE.S */ 2013 /* 2014 * Array get, 64 bits. vAA <- vBB[vCC]. 2015 * 2016 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2017 */ 2018 /* aget-wide vAA, vBB, vCC */ 2019 FETCH(r0, 1) @ r0<- CCBB 2020 mov r9, rINST, lsr #8 @ r9<- AA 2021 and r2, r0, #255 @ r2<- BB 2022 mov r3, r0, lsr #8 @ r3<- CC 2023 GET_VREG(r0, r2) @ r0<- vBB (array object) 2024 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2025 cmp r0, #0 @ null array object? 2026 beq common_errNullObject @ yes, bail 2027 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2028 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2029 cmp r1, r3 @ compare unsigned index, length 2030 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2031 b common_errArrayIndex @ index >= length, bail 2032 @ May want to swap the order of these two branches depending on how the 2033 @ branch prediction (if any) handles conditional forward branches vs. 2034 @ unconditional forward branches. 2035 2036/* ------------------------------ */ 2037 .balign 64 2038.L_OP_AGET_OBJECT: /* 0x46 */ 2039/* File: armv5te/OP_AGET_OBJECT.S */ 2040/* File: armv5te/OP_AGET.S */ 2041 /* 2042 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2043 * 2044 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2045 * instructions. We use a pair of FETCH_Bs instead. 2046 * 2047 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2048 */ 2049 /* op vAA, vBB, vCC */ 2050 FETCH_B(r2, 1, 0) @ r2<- BB 2051 mov r9, rINST, lsr #8 @ r9<- AA 2052 FETCH_B(r3, 1, 1) @ r3<- CC 2053 GET_VREG(r0, r2) @ r0<- vBB (array object) 2054 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2055 cmp r0, #0 @ null array object? 2056 beq common_errNullObject @ yes, bail 2057 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2058 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2059 cmp r1, r3 @ compare unsigned index, length 2060 bcs common_errArrayIndex @ index >= length, bail 2061 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2062 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2063 GET_INST_OPCODE(ip) @ extract opcode from rINST 2064 SET_VREG(r2, r9) @ vAA<- r2 2065 GOTO_OPCODE(ip) @ jump to next instruction 2066 2067 2068/* ------------------------------ */ 2069 .balign 64 2070.L_OP_AGET_BOOLEAN: /* 0x47 */ 2071/* File: armv5te/OP_AGET_BOOLEAN.S */ 2072/* File: armv5te/OP_AGET.S */ 2073 /* 2074 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2075 * 2076 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2077 * instructions. We use a pair of FETCH_Bs instead. 2078 * 2079 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2080 */ 2081 /* op vAA, vBB, vCC */ 2082 FETCH_B(r2, 1, 0) @ r2<- BB 2083 mov r9, rINST, lsr #8 @ r9<- AA 2084 FETCH_B(r3, 1, 1) @ r3<- CC 2085 GET_VREG(r0, r2) @ r0<- vBB (array object) 2086 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2087 cmp r0, #0 @ null array object? 2088 beq common_errNullObject @ yes, bail 2089 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2090 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2091 cmp r1, r3 @ compare unsigned index, length 2092 bcs common_errArrayIndex @ index >= length, bail 2093 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2094 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2095 GET_INST_OPCODE(ip) @ extract opcode from rINST 2096 SET_VREG(r2, r9) @ vAA<- r2 2097 GOTO_OPCODE(ip) @ jump to next instruction 2098 2099 2100/* ------------------------------ */ 2101 .balign 64 2102.L_OP_AGET_BYTE: /* 0x48 */ 2103/* File: armv5te/OP_AGET_BYTE.S */ 2104/* File: armv5te/OP_AGET.S */ 2105 /* 2106 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2107 * 2108 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2109 * instructions. We use a pair of FETCH_Bs instead. 2110 * 2111 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2112 */ 2113 /* op vAA, vBB, vCC */ 2114 FETCH_B(r2, 1, 0) @ r2<- BB 2115 mov r9, rINST, lsr #8 @ r9<- AA 2116 FETCH_B(r3, 1, 1) @ r3<- CC 2117 GET_VREG(r0, r2) @ r0<- vBB (array object) 2118 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2119 cmp r0, #0 @ null array object? 2120 beq common_errNullObject @ yes, bail 2121 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2122 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2123 cmp r1, r3 @ compare unsigned index, length 2124 bcs common_errArrayIndex @ index >= length, bail 2125 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2126 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2127 GET_INST_OPCODE(ip) @ extract opcode from rINST 2128 SET_VREG(r2, r9) @ vAA<- r2 2129 GOTO_OPCODE(ip) @ jump to next instruction 2130 2131 2132/* ------------------------------ */ 2133 .balign 64 2134.L_OP_AGET_CHAR: /* 0x49 */ 2135/* File: armv5te/OP_AGET_CHAR.S */ 2136/* File: armv5te/OP_AGET.S */ 2137 /* 2138 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2139 * 2140 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2141 * instructions. We use a pair of FETCH_Bs instead. 2142 * 2143 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2144 */ 2145 /* op vAA, vBB, vCC */ 2146 FETCH_B(r2, 1, 0) @ r2<- BB 2147 mov r9, rINST, lsr #8 @ r9<- AA 2148 FETCH_B(r3, 1, 1) @ r3<- CC 2149 GET_VREG(r0, r2) @ r0<- vBB (array object) 2150 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2151 cmp r0, #0 @ null array object? 2152 beq common_errNullObject @ yes, bail 2153 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2154 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2155 cmp r1, r3 @ compare unsigned index, length 2156 bcs common_errArrayIndex @ index >= length, bail 2157 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2158 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2159 GET_INST_OPCODE(ip) @ extract opcode from rINST 2160 SET_VREG(r2, r9) @ vAA<- r2 2161 GOTO_OPCODE(ip) @ jump to next instruction 2162 2163 2164/* ------------------------------ */ 2165 .balign 64 2166.L_OP_AGET_SHORT: /* 0x4a */ 2167/* File: armv5te/OP_AGET_SHORT.S */ 2168/* File: armv5te/OP_AGET.S */ 2169 /* 2170 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2171 * 2172 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2173 * instructions. We use a pair of FETCH_Bs instead. 2174 * 2175 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2176 */ 2177 /* op vAA, vBB, vCC */ 2178 FETCH_B(r2, 1, 0) @ r2<- BB 2179 mov r9, rINST, lsr #8 @ r9<- AA 2180 FETCH_B(r3, 1, 1) @ r3<- CC 2181 GET_VREG(r0, r2) @ r0<- vBB (array object) 2182 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2183 cmp r0, #0 @ null array object? 2184 beq common_errNullObject @ yes, bail 2185 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2186 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2187 cmp r1, r3 @ compare unsigned index, length 2188 bcs common_errArrayIndex @ index >= length, bail 2189 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2190 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2191 GET_INST_OPCODE(ip) @ extract opcode from rINST 2192 SET_VREG(r2, r9) @ vAA<- r2 2193 GOTO_OPCODE(ip) @ jump to next instruction 2194 2195 2196/* ------------------------------ */ 2197 .balign 64 2198.L_OP_APUT: /* 0x4b */ 2199/* File: armv5te/OP_APUT.S */ 2200 /* 2201 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2202 * 2203 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2204 * instructions. We use a pair of FETCH_Bs instead. 2205 * 2206 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2207 */ 2208 /* op vAA, vBB, vCC */ 2209 FETCH_B(r2, 1, 0) @ r2<- BB 2210 mov r9, rINST, lsr #8 @ r9<- AA 2211 FETCH_B(r3, 1, 1) @ r3<- CC 2212 GET_VREG(r0, r2) @ r0<- vBB (array object) 2213 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2214 cmp r0, #0 @ null array object? 2215 beq common_errNullObject @ yes, bail 2216 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2217 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2218 cmp r1, r3 @ compare unsigned index, length 2219 bcs common_errArrayIndex @ index >= length, bail 2220 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2221 GET_VREG(r2, r9) @ r2<- vAA 2222 GET_INST_OPCODE(ip) @ extract opcode from rINST 2223 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2224 GOTO_OPCODE(ip) @ jump to next instruction 2225 2226/* ------------------------------ */ 2227 .balign 64 2228.L_OP_APUT_WIDE: /* 0x4c */ 2229/* File: armv5te/OP_APUT_WIDE.S */ 2230 /* 2231 * Array put, 64 bits. vBB[vCC] <- vAA. 2232 * 2233 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2234 */ 2235 /* aput-wide vAA, vBB, vCC */ 2236 FETCH(r0, 1) @ r0<- CCBB 2237 mov r9, rINST, lsr #8 @ r9<- AA 2238 and r2, r0, #255 @ r2<- BB 2239 mov r3, r0, lsr #8 @ r3<- CC 2240 GET_VREG(r0, r2) @ r0<- vBB (array object) 2241 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2242 cmp r0, #0 @ null array object? 2243 beq common_errNullObject @ yes, bail 2244 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2245 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2246 cmp r1, r3 @ compare unsigned index, length 2247 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2248 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2249 b common_errArrayIndex @ index >= length, bail 2250 @ May want to swap the order of these two branches depending on how the 2251 @ branch prediction (if any) handles conditional forward branches vs. 2252 @ unconditional forward branches. 2253 2254/* ------------------------------ */ 2255 .balign 64 2256.L_OP_APUT_OBJECT: /* 0x4d */ 2257/* File: armv5te/OP_APUT_OBJECT.S */ 2258 /* 2259 * Store an object into an array. vBB[vCC] <- vAA. 2260 * 2261 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2262 * instructions. We use a pair of FETCH_Bs instead. 2263 */ 2264 /* op vAA, vBB, vCC */ 2265 FETCH(r0, 1) @ r0<- CCBB 2266 mov r9, rINST, lsr #8 @ r9<- AA 2267 and r2, r0, #255 @ r2<- BB 2268 mov r3, r0, lsr #8 @ r3<- CC 2269 GET_VREG(r1, r2) @ r1<- vBB (array object) 2270 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2271 cmp r1, #0 @ null array object? 2272 GET_VREG(r9, r9) @ r9<- vAA 2273 beq common_errNullObject @ yes, bail 2274 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2275 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2276 cmp r0, r3 @ compare unsigned index, length 2277 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2278 b common_errArrayIndex @ index >= length, bail 2279 2280 2281/* ------------------------------ */ 2282 .balign 64 2283.L_OP_APUT_BOOLEAN: /* 0x4e */ 2284/* File: armv5te/OP_APUT_BOOLEAN.S */ 2285/* File: armv5te/OP_APUT.S */ 2286 /* 2287 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2288 * 2289 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2290 * instructions. We use a pair of FETCH_Bs instead. 2291 * 2292 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2293 */ 2294 /* op vAA, vBB, vCC */ 2295 FETCH_B(r2, 1, 0) @ r2<- BB 2296 mov r9, rINST, lsr #8 @ r9<- AA 2297 FETCH_B(r3, 1, 1) @ r3<- CC 2298 GET_VREG(r0, r2) @ r0<- vBB (array object) 2299 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2300 cmp r0, #0 @ null array object? 2301 beq common_errNullObject @ yes, bail 2302 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2303 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2304 cmp r1, r3 @ compare unsigned index, length 2305 bcs common_errArrayIndex @ index >= length, bail 2306 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2307 GET_VREG(r2, r9) @ r2<- vAA 2308 GET_INST_OPCODE(ip) @ extract opcode from rINST 2309 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2310 GOTO_OPCODE(ip) @ jump to next instruction 2311 2312 2313/* ------------------------------ */ 2314 .balign 64 2315.L_OP_APUT_BYTE: /* 0x4f */ 2316/* File: armv5te/OP_APUT_BYTE.S */ 2317/* File: armv5te/OP_APUT.S */ 2318 /* 2319 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2320 * 2321 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2322 * instructions. We use a pair of FETCH_Bs instead. 2323 * 2324 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2325 */ 2326 /* op vAA, vBB, vCC */ 2327 FETCH_B(r2, 1, 0) @ r2<- BB 2328 mov r9, rINST, lsr #8 @ r9<- AA 2329 FETCH_B(r3, 1, 1) @ r3<- CC 2330 GET_VREG(r0, r2) @ r0<- vBB (array object) 2331 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2332 cmp r0, #0 @ null array object? 2333 beq common_errNullObject @ yes, bail 2334 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2335 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2336 cmp r1, r3 @ compare unsigned index, length 2337 bcs common_errArrayIndex @ index >= length, bail 2338 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2339 GET_VREG(r2, r9) @ r2<- vAA 2340 GET_INST_OPCODE(ip) @ extract opcode from rINST 2341 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2342 GOTO_OPCODE(ip) @ jump to next instruction 2343 2344 2345/* ------------------------------ */ 2346 .balign 64 2347.L_OP_APUT_CHAR: /* 0x50 */ 2348/* File: armv5te/OP_APUT_CHAR.S */ 2349/* File: armv5te/OP_APUT.S */ 2350 /* 2351 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2352 * 2353 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2354 * instructions. We use a pair of FETCH_Bs instead. 2355 * 2356 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2357 */ 2358 /* op vAA, vBB, vCC */ 2359 FETCH_B(r2, 1, 0) @ r2<- BB 2360 mov r9, rINST, lsr #8 @ r9<- AA 2361 FETCH_B(r3, 1, 1) @ r3<- CC 2362 GET_VREG(r0, r2) @ r0<- vBB (array object) 2363 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2364 cmp r0, #0 @ null array object? 2365 beq common_errNullObject @ yes, bail 2366 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2367 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2368 cmp r1, r3 @ compare unsigned index, length 2369 bcs common_errArrayIndex @ index >= length, bail 2370 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2371 GET_VREG(r2, r9) @ r2<- vAA 2372 GET_INST_OPCODE(ip) @ extract opcode from rINST 2373 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2374 GOTO_OPCODE(ip) @ jump to next instruction 2375 2376 2377/* ------------------------------ */ 2378 .balign 64 2379.L_OP_APUT_SHORT: /* 0x51 */ 2380/* File: armv5te/OP_APUT_SHORT.S */ 2381/* File: armv5te/OP_APUT.S */ 2382 /* 2383 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2384 * 2385 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2386 * instructions. We use a pair of FETCH_Bs instead. 2387 * 2388 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2389 */ 2390 /* op vAA, vBB, vCC */ 2391 FETCH_B(r2, 1, 0) @ r2<- BB 2392 mov r9, rINST, lsr #8 @ r9<- AA 2393 FETCH_B(r3, 1, 1) @ r3<- CC 2394 GET_VREG(r0, r2) @ r0<- vBB (array object) 2395 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2396 cmp r0, #0 @ null array object? 2397 beq common_errNullObject @ yes, bail 2398 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2399 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2400 cmp r1, r3 @ compare unsigned index, length 2401 bcs common_errArrayIndex @ index >= length, bail 2402 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2403 GET_VREG(r2, r9) @ r2<- vAA 2404 GET_INST_OPCODE(ip) @ extract opcode from rINST 2405 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2406 GOTO_OPCODE(ip) @ jump to next instruction 2407 2408 2409/* ------------------------------ */ 2410 .balign 64 2411.L_OP_IGET: /* 0x52 */ 2412/* File: armv5te/OP_IGET.S */ 2413 /* 2414 * General 32-bit instance field get. 2415 * 2416 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2417 */ 2418 /* op vA, vB, field@CCCC */ 2419 mov r0, rINST, lsr #12 @ r0<- B 2420 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2421 FETCH(r1, 1) @ r1<- field ref CCCC 2422 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2423 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2424 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2425 cmp r0, #0 @ is resolved entry null? 2426 bne .LOP_IGET_finish @ no, already resolved 24278: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2428 EXPORT_PC() @ resolve() could throw 2429 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2430 bl dvmResolveInstField @ r0<- resolved InstField ptr 2431 cmp r0, #0 2432 bne .LOP_IGET_finish 2433 b common_exceptionThrown 2434 2435/* ------------------------------ */ 2436 .balign 64 2437.L_OP_IGET_WIDE: /* 0x53 */ 2438/* File: armv5te/OP_IGET_WIDE.S */ 2439 /* 2440 * Wide 32-bit instance field get. 2441 */ 2442 /* iget-wide vA, vB, field@CCCC */ 2443 mov r0, rINST, lsr #12 @ r0<- B 2444 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2445 FETCH(r1, 1) @ r1<- field ref CCCC 2446 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2447 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2448 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2449 cmp r0, #0 @ is resolved entry null? 2450 bne .LOP_IGET_WIDE_finish @ no, already resolved 24518: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2452 EXPORT_PC() @ resolve() could throw 2453 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2454 bl dvmResolveInstField @ r0<- resolved InstField ptr 2455 cmp r0, #0 2456 bne .LOP_IGET_WIDE_finish 2457 b common_exceptionThrown 2458 2459/* ------------------------------ */ 2460 .balign 64 2461.L_OP_IGET_OBJECT: /* 0x54 */ 2462/* File: armv5te/OP_IGET_OBJECT.S */ 2463/* File: armv5te/OP_IGET.S */ 2464 /* 2465 * General 32-bit instance field get. 2466 * 2467 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2468 */ 2469 /* op vA, vB, field@CCCC */ 2470 mov r0, rINST, lsr #12 @ r0<- B 2471 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2472 FETCH(r1, 1) @ r1<- field ref CCCC 2473 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2474 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2475 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2476 cmp r0, #0 @ is resolved entry null? 2477 bne .LOP_IGET_OBJECT_finish @ no, already resolved 24788: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2479 EXPORT_PC() @ resolve() could throw 2480 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2481 bl dvmResolveInstField @ r0<- resolved InstField ptr 2482 cmp r0, #0 2483 bne .LOP_IGET_OBJECT_finish 2484 b common_exceptionThrown 2485 2486 2487/* ------------------------------ */ 2488 .balign 64 2489.L_OP_IGET_BOOLEAN: /* 0x55 */ 2490/* File: armv5te/OP_IGET_BOOLEAN.S */ 2491@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2492/* File: armv5te/OP_IGET.S */ 2493 /* 2494 * General 32-bit instance field get. 2495 * 2496 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2497 */ 2498 /* op vA, vB, field@CCCC */ 2499 mov r0, rINST, lsr #12 @ r0<- B 2500 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2501 FETCH(r1, 1) @ r1<- field ref CCCC 2502 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2503 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2504 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2505 cmp r0, #0 @ is resolved entry null? 2506 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25078: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2508 EXPORT_PC() @ resolve() could throw 2509 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2510 bl dvmResolveInstField @ r0<- resolved InstField ptr 2511 cmp r0, #0 2512 bne .LOP_IGET_BOOLEAN_finish 2513 b common_exceptionThrown 2514 2515 2516/* ------------------------------ */ 2517 .balign 64 2518.L_OP_IGET_BYTE: /* 0x56 */ 2519/* File: armv5te/OP_IGET_BYTE.S */ 2520@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2521/* File: armv5te/OP_IGET.S */ 2522 /* 2523 * General 32-bit instance field get. 2524 * 2525 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2526 */ 2527 /* op vA, vB, field@CCCC */ 2528 mov r0, rINST, lsr #12 @ r0<- B 2529 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2530 FETCH(r1, 1) @ r1<- field ref CCCC 2531 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2532 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2533 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2534 cmp r0, #0 @ is resolved entry null? 2535 bne .LOP_IGET_BYTE_finish @ no, already resolved 25368: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2537 EXPORT_PC() @ resolve() could throw 2538 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2539 bl dvmResolveInstField @ r0<- resolved InstField ptr 2540 cmp r0, #0 2541 bne .LOP_IGET_BYTE_finish 2542 b common_exceptionThrown 2543 2544 2545/* ------------------------------ */ 2546 .balign 64 2547.L_OP_IGET_CHAR: /* 0x57 */ 2548/* File: armv5te/OP_IGET_CHAR.S */ 2549@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2550/* File: armv5te/OP_IGET.S */ 2551 /* 2552 * General 32-bit instance field get. 2553 * 2554 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2555 */ 2556 /* op vA, vB, field@CCCC */ 2557 mov r0, rINST, lsr #12 @ r0<- B 2558 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2559 FETCH(r1, 1) @ r1<- field ref CCCC 2560 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2561 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2562 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2563 cmp r0, #0 @ is resolved entry null? 2564 bne .LOP_IGET_CHAR_finish @ no, already resolved 25658: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2566 EXPORT_PC() @ resolve() could throw 2567 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2568 bl dvmResolveInstField @ r0<- resolved InstField ptr 2569 cmp r0, #0 2570 bne .LOP_IGET_CHAR_finish 2571 b common_exceptionThrown 2572 2573 2574/* ------------------------------ */ 2575 .balign 64 2576.L_OP_IGET_SHORT: /* 0x58 */ 2577/* File: armv5te/OP_IGET_SHORT.S */ 2578@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2579/* File: armv5te/OP_IGET.S */ 2580 /* 2581 * General 32-bit instance field get. 2582 * 2583 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2584 */ 2585 /* op vA, vB, field@CCCC */ 2586 mov r0, rINST, lsr #12 @ r0<- B 2587 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2588 FETCH(r1, 1) @ r1<- field ref CCCC 2589 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2590 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2591 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2592 cmp r0, #0 @ is resolved entry null? 2593 bne .LOP_IGET_SHORT_finish @ no, already resolved 25948: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2595 EXPORT_PC() @ resolve() could throw 2596 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2597 bl dvmResolveInstField @ r0<- resolved InstField ptr 2598 cmp r0, #0 2599 bne .LOP_IGET_SHORT_finish 2600 b common_exceptionThrown 2601 2602 2603/* ------------------------------ */ 2604 .balign 64 2605.L_OP_IPUT: /* 0x59 */ 2606/* File: armv5te/OP_IPUT.S */ 2607 /* 2608 * General 32-bit instance field put. 2609 * 2610 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2611 */ 2612 /* op vA, vB, field@CCCC */ 2613 mov r0, rINST, lsr #12 @ r0<- B 2614 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2615 FETCH(r1, 1) @ r1<- field ref CCCC 2616 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2617 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2618 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2619 cmp r0, #0 @ is resolved entry null? 2620 bne .LOP_IPUT_finish @ no, already resolved 26218: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2622 EXPORT_PC() @ resolve() could throw 2623 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2624 bl dvmResolveInstField @ r0<- resolved InstField ptr 2625 cmp r0, #0 @ success? 2626 bne .LOP_IPUT_finish @ yes, finish up 2627 b common_exceptionThrown 2628 2629/* ------------------------------ */ 2630 .balign 64 2631.L_OP_IPUT_WIDE: /* 0x5a */ 2632/* File: armv5te/OP_IPUT_WIDE.S */ 2633 /* iput-wide vA, vB, field@CCCC */ 2634 mov r0, rINST, lsr #12 @ r0<- B 2635 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2636 FETCH(r1, 1) @ r1<- field ref CCCC 2637 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2638 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2639 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2640 cmp r0, #0 @ is resolved entry null? 2641 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26428: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2643 EXPORT_PC() @ resolve() could throw 2644 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2645 bl dvmResolveInstField @ r0<- resolved InstField ptr 2646 cmp r0, #0 @ success? 2647 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2648 b common_exceptionThrown 2649 2650/* ------------------------------ */ 2651 .balign 64 2652.L_OP_IPUT_OBJECT: /* 0x5b */ 2653/* File: armv5te/OP_IPUT_OBJECT.S */ 2654/* File: armv5te/OP_IPUT.S */ 2655 /* 2656 * General 32-bit instance field put. 2657 * 2658 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2659 */ 2660 /* op vA, vB, field@CCCC */ 2661 mov r0, rINST, lsr #12 @ r0<- B 2662 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2663 FETCH(r1, 1) @ r1<- field ref CCCC 2664 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2665 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2666 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2667 cmp r0, #0 @ is resolved entry null? 2668 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26698: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2670 EXPORT_PC() @ resolve() could throw 2671 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2672 bl dvmResolveInstField @ r0<- resolved InstField ptr 2673 cmp r0, #0 @ success? 2674 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2675 b common_exceptionThrown 2676 2677 2678/* ------------------------------ */ 2679 .balign 64 2680.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2681/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2682@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2683/* File: armv5te/OP_IPUT.S */ 2684 /* 2685 * General 32-bit instance field put. 2686 * 2687 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2688 */ 2689 /* op vA, vB, field@CCCC */ 2690 mov r0, rINST, lsr #12 @ r0<- B 2691 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2692 FETCH(r1, 1) @ r1<- field ref CCCC 2693 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2694 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2695 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2696 cmp r0, #0 @ is resolved entry null? 2697 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 26988: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2699 EXPORT_PC() @ resolve() could throw 2700 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2701 bl dvmResolveInstField @ r0<- resolved InstField ptr 2702 cmp r0, #0 @ success? 2703 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2704 b common_exceptionThrown 2705 2706 2707/* ------------------------------ */ 2708 .balign 64 2709.L_OP_IPUT_BYTE: /* 0x5d */ 2710/* File: armv5te/OP_IPUT_BYTE.S */ 2711@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2712/* File: armv5te/OP_IPUT.S */ 2713 /* 2714 * General 32-bit instance field put. 2715 * 2716 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2717 */ 2718 /* op vA, vB, field@CCCC */ 2719 mov r0, rINST, lsr #12 @ r0<- B 2720 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2721 FETCH(r1, 1) @ r1<- field ref CCCC 2722 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2723 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2724 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2725 cmp r0, #0 @ is resolved entry null? 2726 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27278: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2728 EXPORT_PC() @ resolve() could throw 2729 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2730 bl dvmResolveInstField @ r0<- resolved InstField ptr 2731 cmp r0, #0 @ success? 2732 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2733 b common_exceptionThrown 2734 2735 2736/* ------------------------------ */ 2737 .balign 64 2738.L_OP_IPUT_CHAR: /* 0x5e */ 2739/* File: armv5te/OP_IPUT_CHAR.S */ 2740@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2741/* File: armv5te/OP_IPUT.S */ 2742 /* 2743 * General 32-bit instance field put. 2744 * 2745 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2746 */ 2747 /* op vA, vB, field@CCCC */ 2748 mov r0, rINST, lsr #12 @ r0<- B 2749 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2750 FETCH(r1, 1) @ r1<- field ref CCCC 2751 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2752 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2753 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2754 cmp r0, #0 @ is resolved entry null? 2755 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27568: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2757 EXPORT_PC() @ resolve() could throw 2758 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2759 bl dvmResolveInstField @ r0<- resolved InstField ptr 2760 cmp r0, #0 @ success? 2761 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2762 b common_exceptionThrown 2763 2764 2765/* ------------------------------ */ 2766 .balign 64 2767.L_OP_IPUT_SHORT: /* 0x5f */ 2768/* File: armv5te/OP_IPUT_SHORT.S */ 2769@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2770/* File: armv5te/OP_IPUT.S */ 2771 /* 2772 * General 32-bit instance field put. 2773 * 2774 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2775 */ 2776 /* op vA, vB, field@CCCC */ 2777 mov r0, rINST, lsr #12 @ r0<- B 2778 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2779 FETCH(r1, 1) @ r1<- field ref CCCC 2780 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2781 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2782 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2783 cmp r0, #0 @ is resolved entry null? 2784 bne .LOP_IPUT_SHORT_finish @ no, already resolved 27858: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2786 EXPORT_PC() @ resolve() could throw 2787 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2788 bl dvmResolveInstField @ r0<- resolved InstField ptr 2789 cmp r0, #0 @ success? 2790 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2791 b common_exceptionThrown 2792 2793 2794/* ------------------------------ */ 2795 .balign 64 2796.L_OP_SGET: /* 0x60 */ 2797/* File: armv5te/OP_SGET.S */ 2798 /* 2799 * General 32-bit SGET handler. 2800 * 2801 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2802 */ 2803 /* op vAA, field@BBBB */ 2804 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2805 FETCH(r1, 1) @ r1<- field ref BBBB 2806 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2807 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2808 cmp r0, #0 @ is resolved entry null? 2809 beq .LOP_SGET_resolve @ yes, do resolve 2810.LOP_SGET_finish: @ field ptr in r0 2811 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2812 mov r2, rINST, lsr #8 @ r2<- AA 2813 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2814 SET_VREG(r1, r2) @ fp[AA]<- r1 2815 GET_INST_OPCODE(ip) @ extract opcode from rINST 2816 GOTO_OPCODE(ip) @ jump to next instruction 2817 2818/* ------------------------------ */ 2819 .balign 64 2820.L_OP_SGET_WIDE: /* 0x61 */ 2821/* File: armv5te/OP_SGET_WIDE.S */ 2822 /* 2823 * 64-bit SGET handler. 2824 */ 2825 /* sget-wide vAA, field@BBBB */ 2826 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2827 FETCH(r1, 1) @ r1<- field ref BBBB 2828 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2829 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2830 cmp r0, #0 @ is resolved entry null? 2831 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2832.LOP_SGET_WIDE_finish: 2833 mov r9, rINST, lsr #8 @ r9<- AA 2834 .if 0 2835 add r0, r0, #offStaticField_value @ r0<- pointer to data 2836 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 2837 .else 2838 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2839 .endif 2840 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2841 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2842 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2843 GET_INST_OPCODE(ip) @ extract opcode from rINST 2844 GOTO_OPCODE(ip) @ jump to next instruction 2845 2846/* ------------------------------ */ 2847 .balign 64 2848.L_OP_SGET_OBJECT: /* 0x62 */ 2849/* File: armv5te/OP_SGET_OBJECT.S */ 2850/* File: armv5te/OP_SGET.S */ 2851 /* 2852 * General 32-bit SGET handler. 2853 * 2854 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2855 */ 2856 /* op vAA, field@BBBB */ 2857 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2858 FETCH(r1, 1) @ r1<- field ref BBBB 2859 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2860 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2861 cmp r0, #0 @ is resolved entry null? 2862 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2863.LOP_SGET_OBJECT_finish: @ field ptr in r0 2864 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2865 mov r2, rINST, lsr #8 @ r2<- AA 2866 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2867 SET_VREG(r1, r2) @ fp[AA]<- r1 2868 GET_INST_OPCODE(ip) @ extract opcode from rINST 2869 GOTO_OPCODE(ip) @ jump to next instruction 2870 2871 2872/* ------------------------------ */ 2873 .balign 64 2874.L_OP_SGET_BOOLEAN: /* 0x63 */ 2875/* File: armv5te/OP_SGET_BOOLEAN.S */ 2876/* File: armv5te/OP_SGET.S */ 2877 /* 2878 * General 32-bit SGET handler. 2879 * 2880 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2881 */ 2882 /* op vAA, field@BBBB */ 2883 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2884 FETCH(r1, 1) @ r1<- field ref BBBB 2885 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2886 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2887 cmp r0, #0 @ is resolved entry null? 2888 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2889.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2890 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2891 mov r2, rINST, lsr #8 @ r2<- AA 2892 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2893 SET_VREG(r1, r2) @ fp[AA]<- r1 2894 GET_INST_OPCODE(ip) @ extract opcode from rINST 2895 GOTO_OPCODE(ip) @ jump to next instruction 2896 2897 2898/* ------------------------------ */ 2899 .balign 64 2900.L_OP_SGET_BYTE: /* 0x64 */ 2901/* File: armv5te/OP_SGET_BYTE.S */ 2902/* File: armv5te/OP_SGET.S */ 2903 /* 2904 * General 32-bit SGET handler. 2905 * 2906 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2907 */ 2908 /* op vAA, field@BBBB */ 2909 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2910 FETCH(r1, 1) @ r1<- field ref BBBB 2911 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2912 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2913 cmp r0, #0 @ is resolved entry null? 2914 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2915.LOP_SGET_BYTE_finish: @ field ptr in r0 2916 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2917 mov r2, rINST, lsr #8 @ r2<- AA 2918 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2919 SET_VREG(r1, r2) @ fp[AA]<- r1 2920 GET_INST_OPCODE(ip) @ extract opcode from rINST 2921 GOTO_OPCODE(ip) @ jump to next instruction 2922 2923 2924/* ------------------------------ */ 2925 .balign 64 2926.L_OP_SGET_CHAR: /* 0x65 */ 2927/* File: armv5te/OP_SGET_CHAR.S */ 2928/* File: armv5te/OP_SGET.S */ 2929 /* 2930 * General 32-bit SGET handler. 2931 * 2932 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2933 */ 2934 /* op vAA, field@BBBB */ 2935 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2936 FETCH(r1, 1) @ r1<- field ref BBBB 2937 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2938 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2939 cmp r0, #0 @ is resolved entry null? 2940 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2941.LOP_SGET_CHAR_finish: @ field ptr in r0 2942 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2943 mov r2, rINST, lsr #8 @ r2<- AA 2944 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2945 SET_VREG(r1, r2) @ fp[AA]<- r1 2946 GET_INST_OPCODE(ip) @ extract opcode from rINST 2947 GOTO_OPCODE(ip) @ jump to next instruction 2948 2949 2950/* ------------------------------ */ 2951 .balign 64 2952.L_OP_SGET_SHORT: /* 0x66 */ 2953/* File: armv5te/OP_SGET_SHORT.S */ 2954/* File: armv5te/OP_SGET.S */ 2955 /* 2956 * General 32-bit SGET handler. 2957 * 2958 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2959 */ 2960 /* op vAA, field@BBBB */ 2961 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2962 FETCH(r1, 1) @ r1<- field ref BBBB 2963 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2964 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2965 cmp r0, #0 @ is resolved entry null? 2966 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2967.LOP_SGET_SHORT_finish: @ field ptr in r0 2968 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2969 mov r2, rINST, lsr #8 @ r2<- AA 2970 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2971 SET_VREG(r1, r2) @ fp[AA]<- r1 2972 GET_INST_OPCODE(ip) @ extract opcode from rINST 2973 GOTO_OPCODE(ip) @ jump to next instruction 2974 2975 2976/* ------------------------------ */ 2977 .balign 64 2978.L_OP_SPUT: /* 0x67 */ 2979/* File: armv5te/OP_SPUT.S */ 2980 /* 2981 * General 32-bit SPUT handler. 2982 * 2983 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 2984 */ 2985 /* op vAA, field@BBBB */ 2986 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2987 FETCH(r1, 1) @ r1<- field ref BBBB 2988 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2989 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2990 cmp r0, #0 @ is resolved entry null? 2991 beq .LOP_SPUT_resolve @ yes, do resolve 2992.LOP_SPUT_finish: @ field ptr in r0 2993 mov r2, rINST, lsr #8 @ r2<- AA 2994 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2995 GET_VREG(r1, r2) @ r1<- fp[AA] 2996 GET_INST_OPCODE(ip) @ extract opcode from rINST 2997 str r1, [r0, #offStaticField_value] @ field<- vAA 2998 GOTO_OPCODE(ip) @ jump to next instruction 2999 3000/* ------------------------------ */ 3001 .balign 64 3002.L_OP_SPUT_WIDE: /* 0x68 */ 3003/* File: armv5te/OP_SPUT_WIDE.S */ 3004 /* 3005 * 64-bit SPUT handler. 3006 */ 3007 /* sput-wide vAA, field@BBBB */ 3008 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 3009 FETCH(r1, 1) @ r1<- field ref BBBB 3010 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 3011 mov r9, rINST, lsr #8 @ r9<- AA 3012 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 3013 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3014 cmp r2, #0 @ is resolved entry null? 3015 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3016.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 3017 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3018 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 3019 GET_INST_OPCODE(r10) @ extract opcode from rINST 3020 .if 0 3021 add r2, r2, #offStaticField_value @ r2<- pointer to data 3022 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 3023 .else 3024 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3025 .endif 3026 GOTO_OPCODE(r10) @ jump to next instruction 3027 3028/* ------------------------------ */ 3029 .balign 64 3030.L_OP_SPUT_OBJECT: /* 0x69 */ 3031/* File: armv5te/OP_SPUT_OBJECT.S */ 3032/* File: armv5te/OP_SPUT.S */ 3033 /* 3034 * General 32-bit SPUT handler. 3035 * 3036 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3037 */ 3038 /* op vAA, field@BBBB */ 3039 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3040 FETCH(r1, 1) @ r1<- field ref BBBB 3041 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3042 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3043 cmp r0, #0 @ is resolved entry null? 3044 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3045.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3046 mov r2, rINST, lsr #8 @ r2<- AA 3047 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3048 GET_VREG(r1, r2) @ r1<- fp[AA] 3049 GET_INST_OPCODE(ip) @ extract opcode from rINST 3050 str r1, [r0, #offStaticField_value] @ field<- vAA 3051 GOTO_OPCODE(ip) @ jump to next instruction 3052 3053 3054/* ------------------------------ */ 3055 .balign 64 3056.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3057/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3058/* File: armv5te/OP_SPUT.S */ 3059 /* 3060 * General 32-bit SPUT handler. 3061 * 3062 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3063 */ 3064 /* op vAA, field@BBBB */ 3065 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3066 FETCH(r1, 1) @ r1<- field ref BBBB 3067 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3068 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3069 cmp r0, #0 @ is resolved entry null? 3070 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3071.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3072 mov r2, rINST, lsr #8 @ r2<- AA 3073 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3074 GET_VREG(r1, r2) @ r1<- fp[AA] 3075 GET_INST_OPCODE(ip) @ extract opcode from rINST 3076 str r1, [r0, #offStaticField_value] @ field<- vAA 3077 GOTO_OPCODE(ip) @ jump to next instruction 3078 3079 3080/* ------------------------------ */ 3081 .balign 64 3082.L_OP_SPUT_BYTE: /* 0x6b */ 3083/* File: armv5te/OP_SPUT_BYTE.S */ 3084/* File: armv5te/OP_SPUT.S */ 3085 /* 3086 * General 32-bit SPUT handler. 3087 * 3088 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3089 */ 3090 /* op vAA, field@BBBB */ 3091 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3092 FETCH(r1, 1) @ r1<- field ref BBBB 3093 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3094 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3095 cmp r0, #0 @ is resolved entry null? 3096 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3097.LOP_SPUT_BYTE_finish: @ field ptr in r0 3098 mov r2, rINST, lsr #8 @ r2<- AA 3099 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3100 GET_VREG(r1, r2) @ r1<- fp[AA] 3101 GET_INST_OPCODE(ip) @ extract opcode from rINST 3102 str r1, [r0, #offStaticField_value] @ field<- vAA 3103 GOTO_OPCODE(ip) @ jump to next instruction 3104 3105 3106/* ------------------------------ */ 3107 .balign 64 3108.L_OP_SPUT_CHAR: /* 0x6c */ 3109/* File: armv5te/OP_SPUT_CHAR.S */ 3110/* File: armv5te/OP_SPUT.S */ 3111 /* 3112 * General 32-bit SPUT handler. 3113 * 3114 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3115 */ 3116 /* op vAA, field@BBBB */ 3117 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3118 FETCH(r1, 1) @ r1<- field ref BBBB 3119 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3120 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3121 cmp r0, #0 @ is resolved entry null? 3122 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3123.LOP_SPUT_CHAR_finish: @ field ptr in r0 3124 mov r2, rINST, lsr #8 @ r2<- AA 3125 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3126 GET_VREG(r1, r2) @ r1<- fp[AA] 3127 GET_INST_OPCODE(ip) @ extract opcode from rINST 3128 str r1, [r0, #offStaticField_value] @ field<- vAA 3129 GOTO_OPCODE(ip) @ jump to next instruction 3130 3131 3132/* ------------------------------ */ 3133 .balign 64 3134.L_OP_SPUT_SHORT: /* 0x6d */ 3135/* File: armv5te/OP_SPUT_SHORT.S */ 3136/* File: armv5te/OP_SPUT.S */ 3137 /* 3138 * General 32-bit SPUT handler. 3139 * 3140 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3141 */ 3142 /* op vAA, field@BBBB */ 3143 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3144 FETCH(r1, 1) @ r1<- field ref BBBB 3145 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3146 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3147 cmp r0, #0 @ is resolved entry null? 3148 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3149.LOP_SPUT_SHORT_finish: @ field ptr in r0 3150 mov r2, rINST, lsr #8 @ r2<- AA 3151 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3152 GET_VREG(r1, r2) @ r1<- fp[AA] 3153 GET_INST_OPCODE(ip) @ extract opcode from rINST 3154 str r1, [r0, #offStaticField_value] @ field<- vAA 3155 GOTO_OPCODE(ip) @ jump to next instruction 3156 3157 3158/* ------------------------------ */ 3159 .balign 64 3160.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3161/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3162 /* 3163 * Handle a virtual method call. 3164 * 3165 * for: invoke-virtual, invoke-virtual/range 3166 */ 3167 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3168 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3169 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3170 FETCH(r1, 1) @ r1<- BBBB 3171 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3172 FETCH(r10, 2) @ r10<- GFED or CCCC 3173 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3174 .if (!0) 3175 and r10, r10, #15 @ r10<- D (or stays CCCC) 3176 .endif 3177 cmp r0, #0 @ already resolved? 3178 EXPORT_PC() @ must export for invoke 3179 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3180 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3181 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3182 mov r2, #METHOD_VIRTUAL @ resolver method type 3183 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3184 cmp r0, #0 @ got null? 3185 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3186 b common_exceptionThrown @ yes, handle exception 3187 3188/* ------------------------------ */ 3189 .balign 64 3190.L_OP_INVOKE_SUPER: /* 0x6f */ 3191/* File: armv5te/OP_INVOKE_SUPER.S */ 3192 /* 3193 * Handle a "super" method call. 3194 * 3195 * for: invoke-super, invoke-super/range 3196 */ 3197 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3198 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3199 FETCH(r10, 2) @ r10<- GFED or CCCC 3200 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3201 .if (!0) 3202 and r10, r10, #15 @ r10<- D (or stays CCCC) 3203 .endif 3204 FETCH(r1, 1) @ r1<- BBBB 3205 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3206 GET_VREG(r2, r10) @ r2<- "this" ptr 3207 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3208 cmp r2, #0 @ null "this"? 3209 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3210 beq common_errNullObject @ null "this", throw exception 3211 cmp r0, #0 @ already resolved? 3212 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3213 EXPORT_PC() @ must export for invoke 3214 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3215 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3216 3217/* ------------------------------ */ 3218 .balign 64 3219.L_OP_INVOKE_DIRECT: /* 0x70 */ 3220/* File: armv5te/OP_INVOKE_DIRECT.S */ 3221 /* 3222 * Handle a direct method call. 3223 * 3224 * (We could defer the "is 'this' pointer null" test to the common 3225 * method invocation code, and use a flag to indicate that static 3226 * calls don't count. If we do this as part of copying the arguments 3227 * out we could avoiding loading the first arg twice.) 3228 * 3229 * for: invoke-direct, invoke-direct/range 3230 */ 3231 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3232 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3233 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3234 FETCH(r1, 1) @ r1<- BBBB 3235 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3236 FETCH(r10, 2) @ r10<- GFED or CCCC 3237 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3238 .if (!0) 3239 and r10, r10, #15 @ r10<- D (or stays CCCC) 3240 .endif 3241 cmp r0, #0 @ already resolved? 3242 EXPORT_PC() @ must export for invoke 3243 GET_VREG(r2, r10) @ r2<- "this" ptr 3244 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3245.LOP_INVOKE_DIRECT_finish: 3246 cmp r2, #0 @ null "this" ref? 3247 bne common_invokeMethodNoRange @ no, continue on 3248 b common_errNullObject @ yes, throw exception 3249 3250/* ------------------------------ */ 3251 .balign 64 3252.L_OP_INVOKE_STATIC: /* 0x71 */ 3253/* File: armv5te/OP_INVOKE_STATIC.S */ 3254 /* 3255 * Handle a static method call. 3256 * 3257 * for: invoke-static, invoke-static/range 3258 */ 3259 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3260 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3261 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3262 FETCH(r1, 1) @ r1<- BBBB 3263 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3264 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3265 cmp r0, #0 @ already resolved? 3266 EXPORT_PC() @ must export for invoke 3267 bne common_invokeMethodNoRange @ yes, continue on 32680: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3269 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3270 mov r2, #METHOD_STATIC @ resolver method type 3271 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3272 cmp r0, #0 @ got null? 3273 bne common_invokeMethodNoRange @ no, continue 3274 b common_exceptionThrown @ yes, handle exception 3275 3276/* ------------------------------ */ 3277 .balign 64 3278.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3279/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3280 /* 3281 * Handle an interface method call. 3282 * 3283 * for: invoke-interface, invoke-interface/range 3284 */ 3285 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3286 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3287 FETCH(r2, 2) @ r2<- FEDC or CCCC 3288 FETCH(r1, 1) @ r1<- BBBB 3289 .if (!0) 3290 and r2, r2, #15 @ r2<- C (or stays CCCC) 3291 .endif 3292 EXPORT_PC() @ must export for invoke 3293 GET_VREG(r0, r2) @ r0<- first arg ("this") 3294 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3295 cmp r0, #0 @ null obj? 3296 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3297 beq common_errNullObject @ yes, fail 3298 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3299 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3300 cmp r0, #0 @ failed? 3301 beq common_exceptionThrown @ yes, handle exception 3302 b common_invokeMethodNoRange @ jump to common handler 3303 3304/* ------------------------------ */ 3305 .balign 64 3306.L_OP_UNUSED_73: /* 0x73 */ 3307/* File: armv5te/OP_UNUSED_73.S */ 3308/* File: armv5te/unused.S */ 3309 bl common_abort 3310 3311 3312/* ------------------------------ */ 3313 .balign 64 3314.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3315/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3316/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3317 /* 3318 * Handle a virtual method call. 3319 * 3320 * for: invoke-virtual, invoke-virtual/range 3321 */ 3322 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3323 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3324 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3325 FETCH(r1, 1) @ r1<- BBBB 3326 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3327 FETCH(r10, 2) @ r10<- GFED or CCCC 3328 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3329 .if (!1) 3330 and r10, r10, #15 @ r10<- D (or stays CCCC) 3331 .endif 3332 cmp r0, #0 @ already resolved? 3333 EXPORT_PC() @ must export for invoke 3334 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3335 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3336 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3337 mov r2, #METHOD_VIRTUAL @ resolver method type 3338 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3339 cmp r0, #0 @ got null? 3340 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3341 b common_exceptionThrown @ yes, handle exception 3342 3343 3344/* ------------------------------ */ 3345 .balign 64 3346.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3347/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3348/* File: armv5te/OP_INVOKE_SUPER.S */ 3349 /* 3350 * Handle a "super" method call. 3351 * 3352 * for: invoke-super, invoke-super/range 3353 */ 3354 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3355 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3356 FETCH(r10, 2) @ r10<- GFED or CCCC 3357 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3358 .if (!1) 3359 and r10, r10, #15 @ r10<- D (or stays CCCC) 3360 .endif 3361 FETCH(r1, 1) @ r1<- BBBB 3362 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3363 GET_VREG(r2, r10) @ r2<- "this" ptr 3364 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3365 cmp r2, #0 @ null "this"? 3366 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3367 beq common_errNullObject @ null "this", throw exception 3368 cmp r0, #0 @ already resolved? 3369 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3370 EXPORT_PC() @ must export for invoke 3371 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3372 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3373 3374 3375/* ------------------------------ */ 3376 .balign 64 3377.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3378/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3379/* File: armv5te/OP_INVOKE_DIRECT.S */ 3380 /* 3381 * Handle a direct method call. 3382 * 3383 * (We could defer the "is 'this' pointer null" test to the common 3384 * method invocation code, and use a flag to indicate that static 3385 * calls don't count. If we do this as part of copying the arguments 3386 * out we could avoiding loading the first arg twice.) 3387 * 3388 * for: invoke-direct, invoke-direct/range 3389 */ 3390 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3391 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3392 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3393 FETCH(r1, 1) @ r1<- BBBB 3394 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3395 FETCH(r10, 2) @ r10<- GFED or CCCC 3396 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3397 .if (!1) 3398 and r10, r10, #15 @ r10<- D (or stays CCCC) 3399 .endif 3400 cmp r0, #0 @ already resolved? 3401 EXPORT_PC() @ must export for invoke 3402 GET_VREG(r2, r10) @ r2<- "this" ptr 3403 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3404.LOP_INVOKE_DIRECT_RANGE_finish: 3405 cmp r2, #0 @ null "this" ref? 3406 bne common_invokeMethodRange @ no, continue on 3407 b common_errNullObject @ yes, throw exception 3408 3409 3410/* ------------------------------ */ 3411 .balign 64 3412.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3413/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3414/* File: armv5te/OP_INVOKE_STATIC.S */ 3415 /* 3416 * Handle a static method call. 3417 * 3418 * for: invoke-static, invoke-static/range 3419 */ 3420 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3421 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3422 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3423 FETCH(r1, 1) @ r1<- BBBB 3424 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3425 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3426 cmp r0, #0 @ already resolved? 3427 EXPORT_PC() @ must export for invoke 3428 bne common_invokeMethodRange @ yes, continue on 34290: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3430 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3431 mov r2, #METHOD_STATIC @ resolver method type 3432 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3433 cmp r0, #0 @ got null? 3434 bne common_invokeMethodRange @ no, continue 3435 b common_exceptionThrown @ yes, handle exception 3436 3437 3438/* ------------------------------ */ 3439 .balign 64 3440.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3441/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3442/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3443 /* 3444 * Handle an interface method call. 3445 * 3446 * for: invoke-interface, invoke-interface/range 3447 */ 3448 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3449 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3450 FETCH(r2, 2) @ r2<- FEDC or CCCC 3451 FETCH(r1, 1) @ r1<- BBBB 3452 .if (!1) 3453 and r2, r2, #15 @ r2<- C (or stays CCCC) 3454 .endif 3455 EXPORT_PC() @ must export for invoke 3456 GET_VREG(r0, r2) @ r0<- first arg ("this") 3457 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3458 cmp r0, #0 @ null obj? 3459 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3460 beq common_errNullObject @ yes, fail 3461 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3462 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3463 cmp r0, #0 @ failed? 3464 beq common_exceptionThrown @ yes, handle exception 3465 b common_invokeMethodRange @ jump to common handler 3466 3467 3468/* ------------------------------ */ 3469 .balign 64 3470.L_OP_UNUSED_79: /* 0x79 */ 3471/* File: armv5te/OP_UNUSED_79.S */ 3472/* File: armv5te/unused.S */ 3473 bl common_abort 3474 3475 3476/* ------------------------------ */ 3477 .balign 64 3478.L_OP_UNUSED_7A: /* 0x7a */ 3479/* File: armv5te/OP_UNUSED_7A.S */ 3480/* File: armv5te/unused.S */ 3481 bl common_abort 3482 3483 3484/* ------------------------------ */ 3485 .balign 64 3486.L_OP_NEG_INT: /* 0x7b */ 3487/* File: armv5te/OP_NEG_INT.S */ 3488/* File: armv5te/unop.S */ 3489 /* 3490 * Generic 32-bit unary operation. Provide an "instr" line that 3491 * specifies an instruction that performs "result = op r0". 3492 * This could be an ARM instruction or a function call. 3493 * 3494 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3495 * int-to-byte, int-to-char, int-to-short 3496 */ 3497 /* unop vA, vB */ 3498 mov r3, rINST, lsr #12 @ r3<- B 3499 mov r9, rINST, lsr #8 @ r9<- A+ 3500 GET_VREG(r0, r3) @ r0<- vB 3501 and r9, r9, #15 3502 @ optional op; may set condition codes 3503 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3504 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3505 GET_INST_OPCODE(ip) @ extract opcode from rINST 3506 SET_VREG(r0, r9) @ vAA<- r0 3507 GOTO_OPCODE(ip) @ jump to next instruction 3508 /* 9-10 instructions */ 3509 3510 3511/* ------------------------------ */ 3512 .balign 64 3513.L_OP_NOT_INT: /* 0x7c */ 3514/* File: armv5te/OP_NOT_INT.S */ 3515/* File: armv5te/unop.S */ 3516 /* 3517 * Generic 32-bit unary operation. Provide an "instr" line that 3518 * specifies an instruction that performs "result = op r0". 3519 * This could be an ARM instruction or a function call. 3520 * 3521 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3522 * int-to-byte, int-to-char, int-to-short 3523 */ 3524 /* unop vA, vB */ 3525 mov r3, rINST, lsr #12 @ r3<- B 3526 mov r9, rINST, lsr #8 @ r9<- A+ 3527 GET_VREG(r0, r3) @ r0<- vB 3528 and r9, r9, #15 3529 @ optional op; may set condition codes 3530 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3531 mvn r0, r0 @ r0<- op, r0-r3 changed 3532 GET_INST_OPCODE(ip) @ extract opcode from rINST 3533 SET_VREG(r0, r9) @ vAA<- r0 3534 GOTO_OPCODE(ip) @ jump to next instruction 3535 /* 9-10 instructions */ 3536 3537 3538/* ------------------------------ */ 3539 .balign 64 3540.L_OP_NEG_LONG: /* 0x7d */ 3541/* File: armv5te/OP_NEG_LONG.S */ 3542/* File: armv5te/unopWide.S */ 3543 /* 3544 * Generic 64-bit unary operation. Provide an "instr" line that 3545 * specifies an instruction that performs "result = op r0/r1". 3546 * This could be an ARM instruction or a function call. 3547 * 3548 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3549 */ 3550 /* unop vA, vB */ 3551 mov r9, rINST, lsr #8 @ r9<- A+ 3552 mov r3, rINST, lsr #12 @ r3<- B 3553 and r9, r9, #15 3554 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3555 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3556 ldmia r3, {r0-r1} @ r0/r1<- vAA 3557 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3558 rsbs r0, r0, #0 @ optional op; may set condition codes 3559 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3560 GET_INST_OPCODE(ip) @ extract opcode from rINST 3561 stmia r9, {r0-r1} @ vAA<- r0/r1 3562 GOTO_OPCODE(ip) @ jump to next instruction 3563 /* 12-13 instructions */ 3564 3565 3566/* ------------------------------ */ 3567 .balign 64 3568.L_OP_NOT_LONG: /* 0x7e */ 3569/* File: armv5te/OP_NOT_LONG.S */ 3570/* File: armv5te/unopWide.S */ 3571 /* 3572 * Generic 64-bit unary operation. Provide an "instr" line that 3573 * specifies an instruction that performs "result = op r0/r1". 3574 * This could be an ARM instruction or a function call. 3575 * 3576 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3577 */ 3578 /* unop vA, vB */ 3579 mov r9, rINST, lsr #8 @ r9<- A+ 3580 mov r3, rINST, lsr #12 @ r3<- B 3581 and r9, r9, #15 3582 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3583 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3584 ldmia r3, {r0-r1} @ r0/r1<- vAA 3585 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3586 mvn r0, r0 @ optional op; may set condition codes 3587 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3588 GET_INST_OPCODE(ip) @ extract opcode from rINST 3589 stmia r9, {r0-r1} @ vAA<- r0/r1 3590 GOTO_OPCODE(ip) @ jump to next instruction 3591 /* 12-13 instructions */ 3592 3593 3594/* ------------------------------ */ 3595 .balign 64 3596.L_OP_NEG_FLOAT: /* 0x7f */ 3597/* File: armv5te/OP_NEG_FLOAT.S */ 3598/* File: armv5te/unop.S */ 3599 /* 3600 * Generic 32-bit unary operation. Provide an "instr" line that 3601 * specifies an instruction that performs "result = op r0". 3602 * This could be an ARM instruction or a function call. 3603 * 3604 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3605 * int-to-byte, int-to-char, int-to-short 3606 */ 3607 /* unop vA, vB */ 3608 mov r3, rINST, lsr #12 @ r3<- B 3609 mov r9, rINST, lsr #8 @ r9<- A+ 3610 GET_VREG(r0, r3) @ r0<- vB 3611 and r9, r9, #15 3612 @ optional op; may set condition codes 3613 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3614 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3615 GET_INST_OPCODE(ip) @ extract opcode from rINST 3616 SET_VREG(r0, r9) @ vAA<- r0 3617 GOTO_OPCODE(ip) @ jump to next instruction 3618 /* 9-10 instructions */ 3619 3620 3621/* ------------------------------ */ 3622 .balign 64 3623.L_OP_NEG_DOUBLE: /* 0x80 */ 3624/* File: armv5te/OP_NEG_DOUBLE.S */ 3625/* File: armv5te/unopWide.S */ 3626 /* 3627 * Generic 64-bit unary operation. Provide an "instr" line that 3628 * specifies an instruction that performs "result = op r0/r1". 3629 * This could be an ARM instruction or a function call. 3630 * 3631 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3632 */ 3633 /* unop vA, vB */ 3634 mov r9, rINST, lsr #8 @ r9<- A+ 3635 mov r3, rINST, lsr #12 @ r3<- B 3636 and r9, r9, #15 3637 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3638 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3639 ldmia r3, {r0-r1} @ r0/r1<- vAA 3640 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3641 @ optional op; may set condition codes 3642 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3643 GET_INST_OPCODE(ip) @ extract opcode from rINST 3644 stmia r9, {r0-r1} @ vAA<- r0/r1 3645 GOTO_OPCODE(ip) @ jump to next instruction 3646 /* 12-13 instructions */ 3647 3648 3649/* ------------------------------ */ 3650 .balign 64 3651.L_OP_INT_TO_LONG: /* 0x81 */ 3652/* File: armv5te/OP_INT_TO_LONG.S */ 3653/* File: armv5te/unopWider.S */ 3654 /* 3655 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3656 * that specifies an instruction that performs "result = op r0", where 3657 * "result" is a 64-bit quantity in r0/r1. 3658 * 3659 * For: int-to-long, int-to-double, float-to-long, float-to-double 3660 */ 3661 /* unop vA, vB */ 3662 mov r9, rINST, lsr #8 @ r9<- A+ 3663 mov r3, rINST, lsr #12 @ r3<- B 3664 and r9, r9, #15 3665 GET_VREG(r0, r3) @ r0<- vB 3666 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3667 @ optional op; may set condition codes 3668 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3669 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3670 GET_INST_OPCODE(ip) @ extract opcode from rINST 3671 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3672 GOTO_OPCODE(ip) @ jump to next instruction 3673 /* 10-11 instructions */ 3674 3675 3676/* ------------------------------ */ 3677 .balign 64 3678.L_OP_INT_TO_FLOAT: /* 0x82 */ 3679/* File: armv5te/OP_INT_TO_FLOAT.S */ 3680/* File: armv5te/unop.S */ 3681 /* 3682 * Generic 32-bit unary operation. Provide an "instr" line that 3683 * specifies an instruction that performs "result = op r0". 3684 * This could be an ARM instruction or a function call. 3685 * 3686 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3687 * int-to-byte, int-to-char, int-to-short 3688 */ 3689 /* unop vA, vB */ 3690 mov r3, rINST, lsr #12 @ r3<- B 3691 mov r9, rINST, lsr #8 @ r9<- A+ 3692 GET_VREG(r0, r3) @ r0<- vB 3693 and r9, r9, #15 3694 @ optional op; may set condition codes 3695 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3696 bl __aeabi_i2f @ r0<- op, r0-r3 changed 3697 GET_INST_OPCODE(ip) @ extract opcode from rINST 3698 SET_VREG(r0, r9) @ vAA<- r0 3699 GOTO_OPCODE(ip) @ jump to next instruction 3700 /* 9-10 instructions */ 3701 3702 3703/* ------------------------------ */ 3704 .balign 64 3705.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3706/* File: armv5te/OP_INT_TO_DOUBLE.S */ 3707/* File: armv5te/unopWider.S */ 3708 /* 3709 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3710 * that specifies an instruction that performs "result = op r0", where 3711 * "result" is a 64-bit quantity in r0/r1. 3712 * 3713 * For: int-to-long, int-to-double, float-to-long, float-to-double 3714 */ 3715 /* unop vA, vB */ 3716 mov r9, rINST, lsr #8 @ r9<- A+ 3717 mov r3, rINST, lsr #12 @ r3<- B 3718 and r9, r9, #15 3719 GET_VREG(r0, r3) @ r0<- vB 3720 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3721 @ optional op; may set condition codes 3722 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3723 bl __aeabi_i2d @ r0<- op, r0-r3 changed 3724 GET_INST_OPCODE(ip) @ extract opcode from rINST 3725 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3726 GOTO_OPCODE(ip) @ jump to next instruction 3727 /* 10-11 instructions */ 3728 3729 3730/* ------------------------------ */ 3731 .balign 64 3732.L_OP_LONG_TO_INT: /* 0x84 */ 3733/* File: armv5te/OP_LONG_TO_INT.S */ 3734/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3735/* File: armv5te/OP_MOVE.S */ 3736 /* for move, move-object, long-to-int */ 3737 /* op vA, vB */ 3738 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3739 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3740 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3741 GET_VREG(r2, r1) @ r2<- fp[B] 3742 and r0, r0, #15 3743 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3744 SET_VREG(r2, r0) @ fp[A]<- r2 3745 GOTO_OPCODE(ip) @ execute next instruction 3746 3747 3748/* ------------------------------ */ 3749 .balign 64 3750.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3751/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3752/* File: armv5te/unopNarrower.S */ 3753 /* 3754 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3755 * that specifies an instruction that performs "result = op r0/r1", where 3756 * "result" is a 32-bit quantity in r0. 3757 * 3758 * For: long-to-float, double-to-int, double-to-float 3759 * 3760 * (This would work for long-to-int, but that instruction is actually 3761 * an exact match for OP_MOVE.) 3762 */ 3763 /* unop vA, vB */ 3764 mov r3, rINST, lsr #12 @ r3<- B 3765 mov r9, rINST, lsr #8 @ r9<- A+ 3766 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3767 and r9, r9, #15 3768 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3769 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3770 @ optional op; may set condition codes 3771 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3772 GET_INST_OPCODE(ip) @ extract opcode from rINST 3773 SET_VREG(r0, r9) @ vA<- r0 3774 GOTO_OPCODE(ip) @ jump to next instruction 3775 /* 10-11 instructions */ 3776 3777 3778/* ------------------------------ */ 3779 .balign 64 3780.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3781/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3782/* File: armv5te/unopWide.S */ 3783 /* 3784 * Generic 64-bit unary operation. Provide an "instr" line that 3785 * specifies an instruction that performs "result = op r0/r1". 3786 * This could be an ARM instruction or a function call. 3787 * 3788 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3789 */ 3790 /* unop vA, vB */ 3791 mov r9, rINST, lsr #8 @ r9<- A+ 3792 mov r3, rINST, lsr #12 @ r3<- B 3793 and r9, r9, #15 3794 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3795 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3796 ldmia r3, {r0-r1} @ r0/r1<- vAA 3797 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3798 @ optional op; may set condition codes 3799 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3800 GET_INST_OPCODE(ip) @ extract opcode from rINST 3801 stmia r9, {r0-r1} @ vAA<- r0/r1 3802 GOTO_OPCODE(ip) @ jump to next instruction 3803 /* 12-13 instructions */ 3804 3805 3806/* ------------------------------ */ 3807 .balign 64 3808.L_OP_FLOAT_TO_INT: /* 0x87 */ 3809/* File: armv5te/OP_FLOAT_TO_INT.S */ 3810/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3811/* File: armv5te/unop.S */ 3812 /* 3813 * Generic 32-bit unary operation. Provide an "instr" line that 3814 * specifies an instruction that performs "result = op r0". 3815 * This could be an ARM instruction or a function call. 3816 * 3817 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3818 * int-to-byte, int-to-char, int-to-short 3819 */ 3820 /* unop vA, vB */ 3821 mov r3, rINST, lsr #12 @ r3<- B 3822 mov r9, rINST, lsr #8 @ r9<- A+ 3823 GET_VREG(r0, r3) @ r0<- vB 3824 and r9, r9, #15 3825 @ optional op; may set condition codes 3826 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3827 bl __aeabi_f2iz @ r0<- op, r0-r3 changed 3828 GET_INST_OPCODE(ip) @ extract opcode from rINST 3829 SET_VREG(r0, r9) @ vAA<- r0 3830 GOTO_OPCODE(ip) @ jump to next instruction 3831 /* 9-10 instructions */ 3832 3833 3834#if 0 3835@include "armv5te/unop.S" {"instr":"bl f2i_doconv"} 3836@break 3837/* 3838 * Convert the float in r0 to an int in r0. 3839 * 3840 * We have to clip values to int min/max per the specification. The 3841 * expected common case is a "reasonable" value that converts directly 3842 * to modest integer. The EABI convert function isn't doing this for us. 3843 */ 3844f2i_doconv: 3845 stmfd sp!, {r4, lr} 3846 mov r1, #0x4f000000 @ (float)maxint 3847 mov r4, r0 3848 bl __aeabi_fcmpge @ is arg >= maxint? 3849 cmp r0, #0 @ nonzero == yes 3850 mvnne r0, #0x80000000 @ return maxint (7fffffff) 3851 ldmnefd sp!, {r4, pc} 3852 3853 mov r0, r4 @ recover arg 3854 mov r1, #0xcf000000 @ (float)minint 3855 bl __aeabi_fcmple @ is arg <= minint? 3856 cmp r0, #0 @ nonzero == yes 3857 movne r0, #0x80000000 @ return minint (80000000) 3858 ldmnefd sp!, {r4, pc} 3859 3860 mov r0, r4 @ recover arg 3861 mov r1, r4 3862 bl __aeabi_fcmpeq @ is arg == self? 3863 cmp r0, #0 @ zero == no 3864 ldmeqfd sp!, {r4, pc} @ return zero for NaN 3865 3866 mov r0, r4 @ recover arg 3867 bl __aeabi_f2iz @ convert float to int 3868 ldmfd sp!, {r4, pc} 3869#endif 3870 3871/* ------------------------------ */ 3872 .balign 64 3873.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3874/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3875@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3876/* File: armv5te/unopWider.S */ 3877 /* 3878 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3879 * that specifies an instruction that performs "result = op r0", where 3880 * "result" is a 64-bit quantity in r0/r1. 3881 * 3882 * For: int-to-long, int-to-double, float-to-long, float-to-double 3883 */ 3884 /* unop vA, vB */ 3885 mov r9, rINST, lsr #8 @ r9<- A+ 3886 mov r3, rINST, lsr #12 @ r3<- B 3887 and r9, r9, #15 3888 GET_VREG(r0, r3) @ r0<- vB 3889 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3890 @ optional op; may set condition codes 3891 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3892 bl f2l_doconv @ r0<- op, r0-r3 changed 3893 GET_INST_OPCODE(ip) @ extract opcode from rINST 3894 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3895 GOTO_OPCODE(ip) @ jump to next instruction 3896 /* 10-11 instructions */ 3897 3898 3899 3900/* ------------------------------ */ 3901 .balign 64 3902.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3903/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */ 3904/* File: armv5te/unopWider.S */ 3905 /* 3906 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3907 * that specifies an instruction that performs "result = op r0", where 3908 * "result" is a 64-bit quantity in r0/r1. 3909 * 3910 * For: int-to-long, int-to-double, float-to-long, float-to-double 3911 */ 3912 /* unop vA, vB */ 3913 mov r9, rINST, lsr #8 @ r9<- A+ 3914 mov r3, rINST, lsr #12 @ r3<- B 3915 and r9, r9, #15 3916 GET_VREG(r0, r3) @ r0<- vB 3917 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3918 @ optional op; may set condition codes 3919 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3920 bl __aeabi_f2d @ r0<- op, r0-r3 changed 3921 GET_INST_OPCODE(ip) @ extract opcode from rINST 3922 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3923 GOTO_OPCODE(ip) @ jump to next instruction 3924 /* 10-11 instructions */ 3925 3926 3927/* ------------------------------ */ 3928 .balign 64 3929.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3930/* File: armv5te/OP_DOUBLE_TO_INT.S */ 3931/* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3932/* File: armv5te/unopNarrower.S */ 3933 /* 3934 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3935 * that specifies an instruction that performs "result = op r0/r1", where 3936 * "result" is a 32-bit quantity in r0. 3937 * 3938 * For: long-to-float, double-to-int, double-to-float 3939 * 3940 * (This would work for long-to-int, but that instruction is actually 3941 * an exact match for OP_MOVE.) 3942 */ 3943 /* unop vA, vB */ 3944 mov r3, rINST, lsr #12 @ r3<- B 3945 mov r9, rINST, lsr #8 @ r9<- A+ 3946 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3947 and r9, r9, #15 3948 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3949 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3950 @ optional op; may set condition codes 3951 bl __aeabi_d2iz @ r0<- op, r0-r3 changed 3952 GET_INST_OPCODE(ip) @ extract opcode from rINST 3953 SET_VREG(r0, r9) @ vA<- r0 3954 GOTO_OPCODE(ip) @ jump to next instruction 3955 /* 10-11 instructions */ 3956 3957 3958#if 0 3959@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"} 3960@break 3961/* 3962 * Convert the double in r0/r1 to an int in r0. 3963 * 3964 * We have to clip values to int min/max per the specification. The 3965 * expected common case is a "reasonable" value that converts directly 3966 * to modest integer. The EABI convert function isn't doing this for us. 3967 */ 3968d2i_doconv: 3969 stmfd sp!, {r4, r5, lr} @ save regs 3970 mov r2, #0x80000000 @ maxint, as a double (low word) 3971 mov r2, r2, asr #9 @ 0xffc00000 3972 sub sp, sp, #4 @ align for EABI 3973 mvn r3, #0xbe000000 @ maxint, as a double (high word) 3974 sub r3, r3, #0x00200000 @ 0x41dfffff 3975 mov r4, r0 @ save a copy of r0 3976 mov r5, r1 @ and r1 3977 bl __aeabi_dcmpge @ is arg >= maxint? 3978 cmp r0, #0 @ nonzero == yes 3979 mvnne r0, #0x80000000 @ return maxint (0x7fffffff) 3980 bne 1f 3981 3982 mov r0, r4 @ recover arg 3983 mov r1, r5 3984 mov r3, #0xc1000000 @ minint, as a double (high word) 3985 add r3, r3, #0x00e00000 @ 0xc1e00000 3986 mov r2, #0 @ minint, as a double (low word) 3987 bl __aeabi_dcmple @ is arg <= minint? 3988 cmp r0, #0 @ nonzero == yes 3989 movne r0, #0x80000000 @ return minint (80000000) 3990 bne 1f 3991 3992 mov r0, r4 @ recover arg 3993 mov r1, r5 3994 mov r2, r4 @ compare against self 3995 mov r3, r5 3996 bl __aeabi_dcmpeq @ is arg == self? 3997 cmp r0, #0 @ zero == no 3998 beq 1f @ return zero for NaN 3999 4000 mov r0, r4 @ recover arg 4001 mov r1, r5 4002 bl __aeabi_d2iz @ convert double to int 4003 40041: 4005 add sp, sp, #4 4006 ldmfd sp!, {r4, r5, pc} 4007#endif 4008 4009/* ------------------------------ */ 4010 .balign 64 4011.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 4012/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 4013@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 4014/* File: armv5te/unopWide.S */ 4015 /* 4016 * Generic 64-bit unary operation. Provide an "instr" line that 4017 * specifies an instruction that performs "result = op r0/r1". 4018 * This could be an ARM instruction or a function call. 4019 * 4020 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 4021 */ 4022 /* unop vA, vB */ 4023 mov r9, rINST, lsr #8 @ r9<- A+ 4024 mov r3, rINST, lsr #12 @ r3<- B 4025 and r9, r9, #15 4026 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4027 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 4028 ldmia r3, {r0-r1} @ r0/r1<- vAA 4029 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4030 @ optional op; may set condition codes 4031 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 4032 GET_INST_OPCODE(ip) @ extract opcode from rINST 4033 stmia r9, {r0-r1} @ vAA<- r0/r1 4034 GOTO_OPCODE(ip) @ jump to next instruction 4035 /* 12-13 instructions */ 4036 4037 4038 4039/* ------------------------------ */ 4040 .balign 64 4041.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 4042/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */ 4043/* File: armv5te/unopNarrower.S */ 4044 /* 4045 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4046 * that specifies an instruction that performs "result = op r0/r1", where 4047 * "result" is a 32-bit quantity in r0. 4048 * 4049 * For: long-to-float, double-to-int, double-to-float 4050 * 4051 * (This would work for long-to-int, but that instruction is actually 4052 * an exact match for OP_MOVE.) 4053 */ 4054 /* unop vA, vB */ 4055 mov r3, rINST, lsr #12 @ r3<- B 4056 mov r9, rINST, lsr #8 @ r9<- A+ 4057 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4058 and r9, r9, #15 4059 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4060 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4061 @ optional op; may set condition codes 4062 bl __aeabi_d2f @ r0<- op, r0-r3 changed 4063 GET_INST_OPCODE(ip) @ extract opcode from rINST 4064 SET_VREG(r0, r9) @ vA<- r0 4065 GOTO_OPCODE(ip) @ jump to next instruction 4066 /* 10-11 instructions */ 4067 4068 4069/* ------------------------------ */ 4070 .balign 64 4071.L_OP_INT_TO_BYTE: /* 0x8d */ 4072/* File: armv5te/OP_INT_TO_BYTE.S */ 4073/* File: armv5te/unop.S */ 4074 /* 4075 * Generic 32-bit unary operation. Provide an "instr" line that 4076 * specifies an instruction that performs "result = op r0". 4077 * This could be an ARM instruction or a function call. 4078 * 4079 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4080 * int-to-byte, int-to-char, int-to-short 4081 */ 4082 /* unop vA, vB */ 4083 mov r3, rINST, lsr #12 @ r3<- B 4084 mov r9, rINST, lsr #8 @ r9<- A+ 4085 GET_VREG(r0, r3) @ r0<- vB 4086 and r9, r9, #15 4087 mov r0, r0, asl #24 @ optional op; may set condition codes 4088 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4089 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4090 GET_INST_OPCODE(ip) @ extract opcode from rINST 4091 SET_VREG(r0, r9) @ vAA<- r0 4092 GOTO_OPCODE(ip) @ jump to next instruction 4093 /* 9-10 instructions */ 4094 4095 4096/* ------------------------------ */ 4097 .balign 64 4098.L_OP_INT_TO_CHAR: /* 0x8e */ 4099/* File: armv5te/OP_INT_TO_CHAR.S */ 4100/* File: armv5te/unop.S */ 4101 /* 4102 * Generic 32-bit unary operation. Provide an "instr" line that 4103 * specifies an instruction that performs "result = op r0". 4104 * This could be an ARM instruction or a function call. 4105 * 4106 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4107 * int-to-byte, int-to-char, int-to-short 4108 */ 4109 /* unop vA, vB */ 4110 mov r3, rINST, lsr #12 @ r3<- B 4111 mov r9, rINST, lsr #8 @ r9<- A+ 4112 GET_VREG(r0, r3) @ r0<- vB 4113 and r9, r9, #15 4114 mov r0, r0, asl #16 @ optional op; may set condition codes 4115 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4116 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4117 GET_INST_OPCODE(ip) @ extract opcode from rINST 4118 SET_VREG(r0, r9) @ vAA<- r0 4119 GOTO_OPCODE(ip) @ jump to next instruction 4120 /* 9-10 instructions */ 4121 4122 4123/* ------------------------------ */ 4124 .balign 64 4125.L_OP_INT_TO_SHORT: /* 0x8f */ 4126/* File: armv5te/OP_INT_TO_SHORT.S */ 4127/* File: armv5te/unop.S */ 4128 /* 4129 * Generic 32-bit unary operation. Provide an "instr" line that 4130 * specifies an instruction that performs "result = op r0". 4131 * This could be an ARM instruction or a function call. 4132 * 4133 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4134 * int-to-byte, int-to-char, int-to-short 4135 */ 4136 /* unop vA, vB */ 4137 mov r3, rINST, lsr #12 @ r3<- B 4138 mov r9, rINST, lsr #8 @ r9<- A+ 4139 GET_VREG(r0, r3) @ r0<- vB 4140 and r9, r9, #15 4141 mov r0, r0, asl #16 @ optional op; may set condition codes 4142 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4143 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4144 GET_INST_OPCODE(ip) @ extract opcode from rINST 4145 SET_VREG(r0, r9) @ vAA<- r0 4146 GOTO_OPCODE(ip) @ jump to next instruction 4147 /* 9-10 instructions */ 4148 4149 4150/* ------------------------------ */ 4151 .balign 64 4152.L_OP_ADD_INT: /* 0x90 */ 4153/* File: armv5te/OP_ADD_INT.S */ 4154/* File: armv5te/binop.S */ 4155 /* 4156 * Generic 32-bit binary operation. Provide an "instr" line that 4157 * specifies an instruction that performs "result = r0 op r1". 4158 * This could be an ARM instruction or a function call. (If the result 4159 * comes back in a register other than r0, you can override "result".) 4160 * 4161 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4162 * vCC (r1). Useful for integer division and modulus. Note that we 4163 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4164 * handles it correctly. 4165 * 4166 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4167 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4168 * mul-float, div-float, rem-float 4169 */ 4170 /* binop vAA, vBB, vCC */ 4171 FETCH(r0, 1) @ r0<- CCBB 4172 mov r9, rINST, lsr #8 @ r9<- AA 4173 mov r3, r0, lsr #8 @ r3<- CC 4174 and r2, r0, #255 @ r2<- BB 4175 GET_VREG(r1, r3) @ r1<- vCC 4176 GET_VREG(r0, r2) @ r0<- vBB 4177 .if 0 4178 cmp r1, #0 @ is second operand zero? 4179 beq common_errDivideByZero 4180 .endif 4181 4182 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4183 @ optional op; may set condition codes 4184 add r0, r0, r1 @ r0<- op, r0-r3 changed 4185 GET_INST_OPCODE(ip) @ extract opcode from rINST 4186 SET_VREG(r0, r9) @ vAA<- r0 4187 GOTO_OPCODE(ip) @ jump to next instruction 4188 /* 11-14 instructions */ 4189 4190 4191/* ------------------------------ */ 4192 .balign 64 4193.L_OP_SUB_INT: /* 0x91 */ 4194/* File: armv5te/OP_SUB_INT.S */ 4195/* File: armv5te/binop.S */ 4196 /* 4197 * Generic 32-bit binary operation. Provide an "instr" line that 4198 * specifies an instruction that performs "result = r0 op r1". 4199 * This could be an ARM instruction or a function call. (If the result 4200 * comes back in a register other than r0, you can override "result".) 4201 * 4202 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4203 * vCC (r1). Useful for integer division and modulus. Note that we 4204 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4205 * handles it correctly. 4206 * 4207 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4208 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4209 * mul-float, div-float, rem-float 4210 */ 4211 /* binop vAA, vBB, vCC */ 4212 FETCH(r0, 1) @ r0<- CCBB 4213 mov r9, rINST, lsr #8 @ r9<- AA 4214 mov r3, r0, lsr #8 @ r3<- CC 4215 and r2, r0, #255 @ r2<- BB 4216 GET_VREG(r1, r3) @ r1<- vCC 4217 GET_VREG(r0, r2) @ r0<- vBB 4218 .if 0 4219 cmp r1, #0 @ is second operand zero? 4220 beq common_errDivideByZero 4221 .endif 4222 4223 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4224 @ optional op; may set condition codes 4225 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4226 GET_INST_OPCODE(ip) @ extract opcode from rINST 4227 SET_VREG(r0, r9) @ vAA<- r0 4228 GOTO_OPCODE(ip) @ jump to next instruction 4229 /* 11-14 instructions */ 4230 4231 4232/* ------------------------------ */ 4233 .balign 64 4234.L_OP_MUL_INT: /* 0x92 */ 4235/* File: armv5te/OP_MUL_INT.S */ 4236/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4237/* File: armv5te/binop.S */ 4238 /* 4239 * Generic 32-bit binary operation. Provide an "instr" line that 4240 * specifies an instruction that performs "result = r0 op r1". 4241 * This could be an ARM instruction or a function call. (If the result 4242 * comes back in a register other than r0, you can override "result".) 4243 * 4244 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4245 * vCC (r1). Useful for integer division and modulus. Note that we 4246 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4247 * handles it correctly. 4248 * 4249 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4250 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4251 * mul-float, div-float, rem-float 4252 */ 4253 /* binop vAA, vBB, vCC */ 4254 FETCH(r0, 1) @ r0<- CCBB 4255 mov r9, rINST, lsr #8 @ r9<- AA 4256 mov r3, r0, lsr #8 @ r3<- CC 4257 and r2, r0, #255 @ r2<- BB 4258 GET_VREG(r1, r3) @ r1<- vCC 4259 GET_VREG(r0, r2) @ r0<- vBB 4260 .if 0 4261 cmp r1, #0 @ is second operand zero? 4262 beq common_errDivideByZero 4263 .endif 4264 4265 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4266 @ optional op; may set condition codes 4267 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4268 GET_INST_OPCODE(ip) @ extract opcode from rINST 4269 SET_VREG(r0, r9) @ vAA<- r0 4270 GOTO_OPCODE(ip) @ jump to next instruction 4271 /* 11-14 instructions */ 4272 4273 4274/* ------------------------------ */ 4275 .balign 64 4276.L_OP_DIV_INT: /* 0x93 */ 4277/* File: armv5te/OP_DIV_INT.S */ 4278/* File: armv5te/binop.S */ 4279 /* 4280 * Generic 32-bit binary operation. Provide an "instr" line that 4281 * specifies an instruction that performs "result = r0 op r1". 4282 * This could be an ARM instruction or a function call. (If the result 4283 * comes back in a register other than r0, you can override "result".) 4284 * 4285 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4286 * vCC (r1). Useful for integer division and modulus. Note that we 4287 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4288 * handles it correctly. 4289 * 4290 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4291 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4292 * mul-float, div-float, rem-float 4293 */ 4294 /* binop vAA, vBB, vCC */ 4295 FETCH(r0, 1) @ r0<- CCBB 4296 mov r9, rINST, lsr #8 @ r9<- AA 4297 mov r3, r0, lsr #8 @ r3<- CC 4298 and r2, r0, #255 @ r2<- BB 4299 GET_VREG(r1, r3) @ r1<- vCC 4300 GET_VREG(r0, r2) @ r0<- vBB 4301 .if 1 4302 cmp r1, #0 @ is second operand zero? 4303 beq common_errDivideByZero 4304 .endif 4305 4306 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4307 @ optional op; may set condition codes 4308 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4309 GET_INST_OPCODE(ip) @ extract opcode from rINST 4310 SET_VREG(r0, r9) @ vAA<- r0 4311 GOTO_OPCODE(ip) @ jump to next instruction 4312 /* 11-14 instructions */ 4313 4314 4315/* ------------------------------ */ 4316 .balign 64 4317.L_OP_REM_INT: /* 0x94 */ 4318/* File: armv5te/OP_REM_INT.S */ 4319/* idivmod returns quotient in r0 and remainder in r1 */ 4320/* File: armv5te/binop.S */ 4321 /* 4322 * Generic 32-bit binary operation. Provide an "instr" line that 4323 * specifies an instruction that performs "result = r0 op r1". 4324 * This could be an ARM instruction or a function call. (If the result 4325 * comes back in a register other than r0, you can override "result".) 4326 * 4327 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4328 * vCC (r1). Useful for integer division and modulus. Note that we 4329 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4330 * handles it correctly. 4331 * 4332 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4333 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4334 * mul-float, div-float, rem-float 4335 */ 4336 /* binop vAA, vBB, vCC */ 4337 FETCH(r0, 1) @ r0<- CCBB 4338 mov r9, rINST, lsr #8 @ r9<- AA 4339 mov r3, r0, lsr #8 @ r3<- CC 4340 and r2, r0, #255 @ r2<- BB 4341 GET_VREG(r1, r3) @ r1<- vCC 4342 GET_VREG(r0, r2) @ r0<- vBB 4343 .if 1 4344 cmp r1, #0 @ is second operand zero? 4345 beq common_errDivideByZero 4346 .endif 4347 4348 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4349 @ optional op; may set condition codes 4350 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4351 GET_INST_OPCODE(ip) @ extract opcode from rINST 4352 SET_VREG(r1, r9) @ vAA<- r1 4353 GOTO_OPCODE(ip) @ jump to next instruction 4354 /* 11-14 instructions */ 4355 4356 4357/* ------------------------------ */ 4358 .balign 64 4359.L_OP_AND_INT: /* 0x95 */ 4360/* File: armv5te/OP_AND_INT.S */ 4361/* File: armv5te/binop.S */ 4362 /* 4363 * Generic 32-bit binary operation. Provide an "instr" line that 4364 * specifies an instruction that performs "result = r0 op r1". 4365 * This could be an ARM instruction or a function call. (If the result 4366 * comes back in a register other than r0, you can override "result".) 4367 * 4368 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4369 * vCC (r1). Useful for integer division and modulus. Note that we 4370 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4371 * handles it correctly. 4372 * 4373 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4374 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4375 * mul-float, div-float, rem-float 4376 */ 4377 /* binop vAA, vBB, vCC */ 4378 FETCH(r0, 1) @ r0<- CCBB 4379 mov r9, rINST, lsr #8 @ r9<- AA 4380 mov r3, r0, lsr #8 @ r3<- CC 4381 and r2, r0, #255 @ r2<- BB 4382 GET_VREG(r1, r3) @ r1<- vCC 4383 GET_VREG(r0, r2) @ r0<- vBB 4384 .if 0 4385 cmp r1, #0 @ is second operand zero? 4386 beq common_errDivideByZero 4387 .endif 4388 4389 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4390 @ optional op; may set condition codes 4391 and r0, r0, r1 @ r0<- op, r0-r3 changed 4392 GET_INST_OPCODE(ip) @ extract opcode from rINST 4393 SET_VREG(r0, r9) @ vAA<- r0 4394 GOTO_OPCODE(ip) @ jump to next instruction 4395 /* 11-14 instructions */ 4396 4397 4398/* ------------------------------ */ 4399 .balign 64 4400.L_OP_OR_INT: /* 0x96 */ 4401/* File: armv5te/OP_OR_INT.S */ 4402/* File: armv5te/binop.S */ 4403 /* 4404 * Generic 32-bit binary operation. Provide an "instr" line that 4405 * specifies an instruction that performs "result = r0 op r1". 4406 * This could be an ARM instruction or a function call. (If the result 4407 * comes back in a register other than r0, you can override "result".) 4408 * 4409 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4410 * vCC (r1). Useful for integer division and modulus. Note that we 4411 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4412 * handles it correctly. 4413 * 4414 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4415 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4416 * mul-float, div-float, rem-float 4417 */ 4418 /* binop vAA, vBB, vCC */ 4419 FETCH(r0, 1) @ r0<- CCBB 4420 mov r9, rINST, lsr #8 @ r9<- AA 4421 mov r3, r0, lsr #8 @ r3<- CC 4422 and r2, r0, #255 @ r2<- BB 4423 GET_VREG(r1, r3) @ r1<- vCC 4424 GET_VREG(r0, r2) @ r0<- vBB 4425 .if 0 4426 cmp r1, #0 @ is second operand zero? 4427 beq common_errDivideByZero 4428 .endif 4429 4430 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4431 @ optional op; may set condition codes 4432 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4433 GET_INST_OPCODE(ip) @ extract opcode from rINST 4434 SET_VREG(r0, r9) @ vAA<- r0 4435 GOTO_OPCODE(ip) @ jump to next instruction 4436 /* 11-14 instructions */ 4437 4438 4439/* ------------------------------ */ 4440 .balign 64 4441.L_OP_XOR_INT: /* 0x97 */ 4442/* File: armv5te/OP_XOR_INT.S */ 4443/* File: armv5te/binop.S */ 4444 /* 4445 * Generic 32-bit binary operation. Provide an "instr" line that 4446 * specifies an instruction that performs "result = r0 op r1". 4447 * This could be an ARM instruction or a function call. (If the result 4448 * comes back in a register other than r0, you can override "result".) 4449 * 4450 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4451 * vCC (r1). Useful for integer division and modulus. Note that we 4452 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4453 * handles it correctly. 4454 * 4455 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4456 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4457 * mul-float, div-float, rem-float 4458 */ 4459 /* binop vAA, vBB, vCC */ 4460 FETCH(r0, 1) @ r0<- CCBB 4461 mov r9, rINST, lsr #8 @ r9<- AA 4462 mov r3, r0, lsr #8 @ r3<- CC 4463 and r2, r0, #255 @ r2<- BB 4464 GET_VREG(r1, r3) @ r1<- vCC 4465 GET_VREG(r0, r2) @ r0<- vBB 4466 .if 0 4467 cmp r1, #0 @ is second operand zero? 4468 beq common_errDivideByZero 4469 .endif 4470 4471 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4472 @ optional op; may set condition codes 4473 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4474 GET_INST_OPCODE(ip) @ extract opcode from rINST 4475 SET_VREG(r0, r9) @ vAA<- r0 4476 GOTO_OPCODE(ip) @ jump to next instruction 4477 /* 11-14 instructions */ 4478 4479 4480/* ------------------------------ */ 4481 .balign 64 4482.L_OP_SHL_INT: /* 0x98 */ 4483/* File: armv5te/OP_SHL_INT.S */ 4484/* File: armv5te/binop.S */ 4485 /* 4486 * Generic 32-bit binary operation. Provide an "instr" line that 4487 * specifies an instruction that performs "result = r0 op r1". 4488 * This could be an ARM instruction or a function call. (If the result 4489 * comes back in a register other than r0, you can override "result".) 4490 * 4491 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4492 * vCC (r1). Useful for integer division and modulus. Note that we 4493 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4494 * handles it correctly. 4495 * 4496 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4497 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4498 * mul-float, div-float, rem-float 4499 */ 4500 /* binop vAA, vBB, vCC */ 4501 FETCH(r0, 1) @ r0<- CCBB 4502 mov r9, rINST, lsr #8 @ r9<- AA 4503 mov r3, r0, lsr #8 @ r3<- CC 4504 and r2, r0, #255 @ r2<- BB 4505 GET_VREG(r1, r3) @ r1<- vCC 4506 GET_VREG(r0, r2) @ r0<- vBB 4507 .if 0 4508 cmp r1, #0 @ is second operand zero? 4509 beq common_errDivideByZero 4510 .endif 4511 4512 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4513 and r1, r1, #31 @ optional op; may set condition codes 4514 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4515 GET_INST_OPCODE(ip) @ extract opcode from rINST 4516 SET_VREG(r0, r9) @ vAA<- r0 4517 GOTO_OPCODE(ip) @ jump to next instruction 4518 /* 11-14 instructions */ 4519 4520 4521/* ------------------------------ */ 4522 .balign 64 4523.L_OP_SHR_INT: /* 0x99 */ 4524/* File: armv5te/OP_SHR_INT.S */ 4525/* File: armv5te/binop.S */ 4526 /* 4527 * Generic 32-bit binary operation. Provide an "instr" line that 4528 * specifies an instruction that performs "result = r0 op r1". 4529 * This could be an ARM instruction or a function call. (If the result 4530 * comes back in a register other than r0, you can override "result".) 4531 * 4532 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4533 * vCC (r1). Useful for integer division and modulus. Note that we 4534 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4535 * handles it correctly. 4536 * 4537 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4538 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4539 * mul-float, div-float, rem-float 4540 */ 4541 /* binop vAA, vBB, vCC */ 4542 FETCH(r0, 1) @ r0<- CCBB 4543 mov r9, rINST, lsr #8 @ r9<- AA 4544 mov r3, r0, lsr #8 @ r3<- CC 4545 and r2, r0, #255 @ r2<- BB 4546 GET_VREG(r1, r3) @ r1<- vCC 4547 GET_VREG(r0, r2) @ r0<- vBB 4548 .if 0 4549 cmp r1, #0 @ is second operand zero? 4550 beq common_errDivideByZero 4551 .endif 4552 4553 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4554 and r1, r1, #31 @ optional op; may set condition codes 4555 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4556 GET_INST_OPCODE(ip) @ extract opcode from rINST 4557 SET_VREG(r0, r9) @ vAA<- r0 4558 GOTO_OPCODE(ip) @ jump to next instruction 4559 /* 11-14 instructions */ 4560 4561 4562/* ------------------------------ */ 4563 .balign 64 4564.L_OP_USHR_INT: /* 0x9a */ 4565/* File: armv5te/OP_USHR_INT.S */ 4566/* File: armv5te/binop.S */ 4567 /* 4568 * Generic 32-bit binary operation. Provide an "instr" line that 4569 * specifies an instruction that performs "result = r0 op r1". 4570 * This could be an ARM instruction or a function call. (If the result 4571 * comes back in a register other than r0, you can override "result".) 4572 * 4573 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4574 * vCC (r1). Useful for integer division and modulus. Note that we 4575 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4576 * handles it correctly. 4577 * 4578 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4579 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4580 * mul-float, div-float, rem-float 4581 */ 4582 /* binop vAA, vBB, vCC */ 4583 FETCH(r0, 1) @ r0<- CCBB 4584 mov r9, rINST, lsr #8 @ r9<- AA 4585 mov r3, r0, lsr #8 @ r3<- CC 4586 and r2, r0, #255 @ r2<- BB 4587 GET_VREG(r1, r3) @ r1<- vCC 4588 GET_VREG(r0, r2) @ r0<- vBB 4589 .if 0 4590 cmp r1, #0 @ is second operand zero? 4591 beq common_errDivideByZero 4592 .endif 4593 4594 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4595 and r1, r1, #31 @ optional op; may set condition codes 4596 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4597 GET_INST_OPCODE(ip) @ extract opcode from rINST 4598 SET_VREG(r0, r9) @ vAA<- r0 4599 GOTO_OPCODE(ip) @ jump to next instruction 4600 /* 11-14 instructions */ 4601 4602 4603/* ------------------------------ */ 4604 .balign 64 4605.L_OP_ADD_LONG: /* 0x9b */ 4606/* File: armv5te/OP_ADD_LONG.S */ 4607/* File: armv5te/binopWide.S */ 4608 /* 4609 * Generic 64-bit binary operation. Provide an "instr" line that 4610 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4611 * This could be an ARM instruction or a function call. (If the result 4612 * comes back in a register other than r0, you can override "result".) 4613 * 4614 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4615 * vCC (r1). Useful for integer division and modulus. 4616 * 4617 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4618 * xor-long, add-double, sub-double, mul-double, div-double, 4619 * rem-double 4620 * 4621 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4622 */ 4623 /* binop vAA, vBB, vCC */ 4624 FETCH(r0, 1) @ r0<- CCBB 4625 mov r9, rINST, lsr #8 @ r9<- AA 4626 and r2, r0, #255 @ r2<- BB 4627 mov r3, r0, lsr #8 @ r3<- CC 4628 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4629 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4630 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4631 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4632 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4633 .if 0 4634 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4635 beq common_errDivideByZero 4636 .endif 4637 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4638 4639 adds r0, r0, r2 @ optional op; may set condition codes 4640 adc r1, r1, r3 @ result<- op, r0-r3 changed 4641 GET_INST_OPCODE(ip) @ extract opcode from rINST 4642 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4643 GOTO_OPCODE(ip) @ jump to next instruction 4644 /* 14-17 instructions */ 4645 4646 4647/* ------------------------------ */ 4648 .balign 64 4649.L_OP_SUB_LONG: /* 0x9c */ 4650/* File: armv5te/OP_SUB_LONG.S */ 4651/* File: armv5te/binopWide.S */ 4652 /* 4653 * Generic 64-bit binary operation. Provide an "instr" line that 4654 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4655 * This could be an ARM instruction or a function call. (If the result 4656 * comes back in a register other than r0, you can override "result".) 4657 * 4658 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4659 * vCC (r1). Useful for integer division and modulus. 4660 * 4661 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4662 * xor-long, add-double, sub-double, mul-double, div-double, 4663 * rem-double 4664 * 4665 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4666 */ 4667 /* binop vAA, vBB, vCC */ 4668 FETCH(r0, 1) @ r0<- CCBB 4669 mov r9, rINST, lsr #8 @ r9<- AA 4670 and r2, r0, #255 @ r2<- BB 4671 mov r3, r0, lsr #8 @ r3<- CC 4672 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4673 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4674 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4675 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4676 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4677 .if 0 4678 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4679 beq common_errDivideByZero 4680 .endif 4681 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4682 4683 subs r0, r0, r2 @ optional op; may set condition codes 4684 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4685 GET_INST_OPCODE(ip) @ extract opcode from rINST 4686 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4687 GOTO_OPCODE(ip) @ jump to next instruction 4688 /* 14-17 instructions */ 4689 4690 4691/* ------------------------------ */ 4692 .balign 64 4693.L_OP_MUL_LONG: /* 0x9d */ 4694/* File: armv5te/OP_MUL_LONG.S */ 4695 /* 4696 * Signed 64-bit integer multiply. 4697 * 4698 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4699 * WX 4700 * x YZ 4701 * -------- 4702 * ZW ZX 4703 * YW YX 4704 * 4705 * The low word of the result holds ZX, the high word holds 4706 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4707 * it doesn't fit in the low 64 bits. 4708 * 4709 * Unlike most ARM math operations, multiply instructions have 4710 * restrictions on using the same register more than once (Rd and Rm 4711 * cannot be the same). 4712 */ 4713 /* mul-long vAA, vBB, vCC */ 4714 FETCH(r0, 1) @ r0<- CCBB 4715 and r2, r0, #255 @ r2<- BB 4716 mov r3, r0, lsr #8 @ r3<- CC 4717 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4718 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4719 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4720 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4721 mul ip, r2, r1 @ ip<- ZxW 4722 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4723 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4724 mov r0, rINST, lsr #8 @ r0<- AA 4725 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4726 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4727 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4728 b .LOP_MUL_LONG_finish 4729 4730/* ------------------------------ */ 4731 .balign 64 4732.L_OP_DIV_LONG: /* 0x9e */ 4733/* File: armv5te/OP_DIV_LONG.S */ 4734/* File: armv5te/binopWide.S */ 4735 /* 4736 * Generic 64-bit binary operation. Provide an "instr" line that 4737 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4738 * This could be an ARM instruction or a function call. (If the result 4739 * comes back in a register other than r0, you can override "result".) 4740 * 4741 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4742 * vCC (r1). Useful for integer division and modulus. 4743 * 4744 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4745 * xor-long, add-double, sub-double, mul-double, div-double, 4746 * rem-double 4747 * 4748 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4749 */ 4750 /* binop vAA, vBB, vCC */ 4751 FETCH(r0, 1) @ r0<- CCBB 4752 mov r9, rINST, lsr #8 @ r9<- AA 4753 and r2, r0, #255 @ r2<- BB 4754 mov r3, r0, lsr #8 @ r3<- CC 4755 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4756 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4757 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4758 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4759 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4760 .if 1 4761 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4762 beq common_errDivideByZero 4763 .endif 4764 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4765 4766 @ optional op; may set condition codes 4767 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4768 GET_INST_OPCODE(ip) @ extract opcode from rINST 4769 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4770 GOTO_OPCODE(ip) @ jump to next instruction 4771 /* 14-17 instructions */ 4772 4773 4774/* ------------------------------ */ 4775 .balign 64 4776.L_OP_REM_LONG: /* 0x9f */ 4777/* File: armv5te/OP_REM_LONG.S */ 4778/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4779/* File: armv5te/binopWide.S */ 4780 /* 4781 * Generic 64-bit binary operation. Provide an "instr" line that 4782 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4783 * This could be an ARM instruction or a function call. (If the result 4784 * comes back in a register other than r0, you can override "result".) 4785 * 4786 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4787 * vCC (r1). Useful for integer division and modulus. 4788 * 4789 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4790 * xor-long, add-double, sub-double, mul-double, div-double, 4791 * rem-double 4792 * 4793 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4794 */ 4795 /* binop vAA, vBB, vCC */ 4796 FETCH(r0, 1) @ r0<- CCBB 4797 mov r9, rINST, lsr #8 @ r9<- AA 4798 and r2, r0, #255 @ r2<- BB 4799 mov r3, r0, lsr #8 @ r3<- CC 4800 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4801 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4802 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4803 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4804 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4805 .if 1 4806 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4807 beq common_errDivideByZero 4808 .endif 4809 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4810 4811 @ optional op; may set condition codes 4812 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4813 GET_INST_OPCODE(ip) @ extract opcode from rINST 4814 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4815 GOTO_OPCODE(ip) @ jump to next instruction 4816 /* 14-17 instructions */ 4817 4818 4819/* ------------------------------ */ 4820 .balign 64 4821.L_OP_AND_LONG: /* 0xa0 */ 4822/* File: armv5te/OP_AND_LONG.S */ 4823/* File: armv5te/binopWide.S */ 4824 /* 4825 * Generic 64-bit binary operation. Provide an "instr" line that 4826 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4827 * This could be an ARM instruction or a function call. (If the result 4828 * comes back in a register other than r0, you can override "result".) 4829 * 4830 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4831 * vCC (r1). Useful for integer division and modulus. 4832 * 4833 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4834 * xor-long, add-double, sub-double, mul-double, div-double, 4835 * rem-double 4836 * 4837 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4838 */ 4839 /* binop vAA, vBB, vCC */ 4840 FETCH(r0, 1) @ r0<- CCBB 4841 mov r9, rINST, lsr #8 @ r9<- AA 4842 and r2, r0, #255 @ r2<- BB 4843 mov r3, r0, lsr #8 @ r3<- CC 4844 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4845 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4846 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4847 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4848 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4849 .if 0 4850 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4851 beq common_errDivideByZero 4852 .endif 4853 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4854 4855 and r0, r0, r2 @ optional op; may set condition codes 4856 and r1, r1, r3 @ result<- op, r0-r3 changed 4857 GET_INST_OPCODE(ip) @ extract opcode from rINST 4858 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4859 GOTO_OPCODE(ip) @ jump to next instruction 4860 /* 14-17 instructions */ 4861 4862 4863/* ------------------------------ */ 4864 .balign 64 4865.L_OP_OR_LONG: /* 0xa1 */ 4866/* File: armv5te/OP_OR_LONG.S */ 4867/* File: armv5te/binopWide.S */ 4868 /* 4869 * Generic 64-bit binary operation. Provide an "instr" line that 4870 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4871 * This could be an ARM instruction or a function call. (If the result 4872 * comes back in a register other than r0, you can override "result".) 4873 * 4874 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4875 * vCC (r1). Useful for integer division and modulus. 4876 * 4877 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4878 * xor-long, add-double, sub-double, mul-double, div-double, 4879 * rem-double 4880 * 4881 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4882 */ 4883 /* binop vAA, vBB, vCC */ 4884 FETCH(r0, 1) @ r0<- CCBB 4885 mov r9, rINST, lsr #8 @ r9<- AA 4886 and r2, r0, #255 @ r2<- BB 4887 mov r3, r0, lsr #8 @ r3<- CC 4888 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4889 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4890 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4891 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4892 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4893 .if 0 4894 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4895 beq common_errDivideByZero 4896 .endif 4897 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4898 4899 orr r0, r0, r2 @ optional op; may set condition codes 4900 orr r1, r1, r3 @ result<- op, r0-r3 changed 4901 GET_INST_OPCODE(ip) @ extract opcode from rINST 4902 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4903 GOTO_OPCODE(ip) @ jump to next instruction 4904 /* 14-17 instructions */ 4905 4906 4907/* ------------------------------ */ 4908 .balign 64 4909.L_OP_XOR_LONG: /* 0xa2 */ 4910/* File: armv5te/OP_XOR_LONG.S */ 4911/* File: armv5te/binopWide.S */ 4912 /* 4913 * Generic 64-bit binary operation. Provide an "instr" line that 4914 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4915 * This could be an ARM instruction or a function call. (If the result 4916 * comes back in a register other than r0, you can override "result".) 4917 * 4918 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4919 * vCC (r1). Useful for integer division and modulus. 4920 * 4921 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4922 * xor-long, add-double, sub-double, mul-double, div-double, 4923 * rem-double 4924 * 4925 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4926 */ 4927 /* binop vAA, vBB, vCC */ 4928 FETCH(r0, 1) @ r0<- CCBB 4929 mov r9, rINST, lsr #8 @ r9<- AA 4930 and r2, r0, #255 @ r2<- BB 4931 mov r3, r0, lsr #8 @ r3<- CC 4932 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4933 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4934 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4935 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4936 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4937 .if 0 4938 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4939 beq common_errDivideByZero 4940 .endif 4941 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4942 4943 eor r0, r0, r2 @ optional op; may set condition codes 4944 eor r1, r1, r3 @ result<- op, r0-r3 changed 4945 GET_INST_OPCODE(ip) @ extract opcode from rINST 4946 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4947 GOTO_OPCODE(ip) @ jump to next instruction 4948 /* 14-17 instructions */ 4949 4950 4951/* ------------------------------ */ 4952 .balign 64 4953.L_OP_SHL_LONG: /* 0xa3 */ 4954/* File: armv5te/OP_SHL_LONG.S */ 4955 /* 4956 * Long integer shift. This is different from the generic 32/64-bit 4957 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4958 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4959 * 6 bits of the shift distance. 4960 */ 4961 /* shl-long vAA, vBB, vCC */ 4962 FETCH(r0, 1) @ r0<- CCBB 4963 mov r9, rINST, lsr #8 @ r9<- AA 4964 and r3, r0, #255 @ r3<- BB 4965 mov r0, r0, lsr #8 @ r0<- CC 4966 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4967 GET_VREG(r2, r0) @ r2<- vCC 4968 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4969 and r2, r2, #63 @ r2<- r2 & 0x3f 4970 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4971 4972 mov r1, r1, asl r2 @ r1<- r1 << r2 4973 rsb r3, r2, #32 @ r3<- 32 - r2 4974 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4975 subs ip, r2, #32 @ ip<- r2 - 32 4976 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4977 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4978 b .LOP_SHL_LONG_finish 4979 4980/* ------------------------------ */ 4981 .balign 64 4982.L_OP_SHR_LONG: /* 0xa4 */ 4983/* File: armv5te/OP_SHR_LONG.S */ 4984 /* 4985 * Long integer shift. This is different from the generic 32/64-bit 4986 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4987 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4988 * 6 bits of the shift distance. 4989 */ 4990 /* shr-long vAA, vBB, vCC */ 4991 FETCH(r0, 1) @ r0<- CCBB 4992 mov r9, rINST, lsr #8 @ r9<- AA 4993 and r3, r0, #255 @ r3<- BB 4994 mov r0, r0, lsr #8 @ r0<- CC 4995 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4996 GET_VREG(r2, r0) @ r2<- vCC 4997 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4998 and r2, r2, #63 @ r0<- r0 & 0x3f 4999 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5000 5001 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5002 rsb r3, r2, #32 @ r3<- 32 - r2 5003 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5004 subs ip, r2, #32 @ ip<- r2 - 32 5005 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 5006 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5007 b .LOP_SHR_LONG_finish 5008 5009/* ------------------------------ */ 5010 .balign 64 5011.L_OP_USHR_LONG: /* 0xa5 */ 5012/* File: armv5te/OP_USHR_LONG.S */ 5013 /* 5014 * Long integer shift. This is different from the generic 32/64-bit 5015 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5016 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5017 * 6 bits of the shift distance. 5018 */ 5019 /* ushr-long vAA, vBB, vCC */ 5020 FETCH(r0, 1) @ r0<- CCBB 5021 mov r9, rINST, lsr #8 @ r9<- AA 5022 and r3, r0, #255 @ r3<- BB 5023 mov r0, r0, lsr #8 @ r0<- CC 5024 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5025 GET_VREG(r2, r0) @ r2<- vCC 5026 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5027 and r2, r2, #63 @ r0<- r0 & 0x3f 5028 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5029 5030 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5031 rsb r3, r2, #32 @ r3<- 32 - r2 5032 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5033 subs ip, r2, #32 @ ip<- r2 - 32 5034 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 5035 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5036 b .LOP_USHR_LONG_finish 5037 5038/* ------------------------------ */ 5039 .balign 64 5040.L_OP_ADD_FLOAT: /* 0xa6 */ 5041/* File: armv5te/OP_ADD_FLOAT.S */ 5042/* File: armv5te/binop.S */ 5043 /* 5044 * Generic 32-bit binary operation. Provide an "instr" line that 5045 * specifies an instruction that performs "result = r0 op r1". 5046 * This could be an ARM instruction or a function call. (If the result 5047 * comes back in a register other than r0, you can override "result".) 5048 * 5049 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5050 * vCC (r1). Useful for integer division and modulus. Note that we 5051 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5052 * handles it correctly. 5053 * 5054 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5055 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5056 * mul-float, div-float, rem-float 5057 */ 5058 /* binop vAA, vBB, vCC */ 5059 FETCH(r0, 1) @ r0<- CCBB 5060 mov r9, rINST, lsr #8 @ r9<- AA 5061 mov r3, r0, lsr #8 @ r3<- CC 5062 and r2, r0, #255 @ r2<- BB 5063 GET_VREG(r1, r3) @ r1<- vCC 5064 GET_VREG(r0, r2) @ r0<- vBB 5065 .if 0 5066 cmp r1, #0 @ is second operand zero? 5067 beq common_errDivideByZero 5068 .endif 5069 5070 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5071 @ optional op; may set condition codes 5072 bl __aeabi_fadd @ r0<- op, r0-r3 changed 5073 GET_INST_OPCODE(ip) @ extract opcode from rINST 5074 SET_VREG(r0, r9) @ vAA<- r0 5075 GOTO_OPCODE(ip) @ jump to next instruction 5076 /* 11-14 instructions */ 5077 5078 5079/* ------------------------------ */ 5080 .balign 64 5081.L_OP_SUB_FLOAT: /* 0xa7 */ 5082/* File: armv5te/OP_SUB_FLOAT.S */ 5083/* File: armv5te/binop.S */ 5084 /* 5085 * Generic 32-bit binary operation. Provide an "instr" line that 5086 * specifies an instruction that performs "result = r0 op r1". 5087 * This could be an ARM instruction or a function call. (If the result 5088 * comes back in a register other than r0, you can override "result".) 5089 * 5090 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5091 * vCC (r1). Useful for integer division and modulus. Note that we 5092 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5093 * handles it correctly. 5094 * 5095 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5096 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5097 * mul-float, div-float, rem-float 5098 */ 5099 /* binop vAA, vBB, vCC */ 5100 FETCH(r0, 1) @ r0<- CCBB 5101 mov r9, rINST, lsr #8 @ r9<- AA 5102 mov r3, r0, lsr #8 @ r3<- CC 5103 and r2, r0, #255 @ r2<- BB 5104 GET_VREG(r1, r3) @ r1<- vCC 5105 GET_VREG(r0, r2) @ r0<- vBB 5106 .if 0 5107 cmp r1, #0 @ is second operand zero? 5108 beq common_errDivideByZero 5109 .endif 5110 5111 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5112 @ optional op; may set condition codes 5113 bl __aeabi_fsub @ r0<- op, r0-r3 changed 5114 GET_INST_OPCODE(ip) @ extract opcode from rINST 5115 SET_VREG(r0, r9) @ vAA<- r0 5116 GOTO_OPCODE(ip) @ jump to next instruction 5117 /* 11-14 instructions */ 5118 5119 5120/* ------------------------------ */ 5121 .balign 64 5122.L_OP_MUL_FLOAT: /* 0xa8 */ 5123/* File: armv5te/OP_MUL_FLOAT.S */ 5124/* File: armv5te/binop.S */ 5125 /* 5126 * Generic 32-bit binary operation. Provide an "instr" line that 5127 * specifies an instruction that performs "result = r0 op r1". 5128 * This could be an ARM instruction or a function call. (If the result 5129 * comes back in a register other than r0, you can override "result".) 5130 * 5131 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5132 * vCC (r1). Useful for integer division and modulus. Note that we 5133 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5134 * handles it correctly. 5135 * 5136 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5137 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5138 * mul-float, div-float, rem-float 5139 */ 5140 /* binop vAA, vBB, vCC */ 5141 FETCH(r0, 1) @ r0<- CCBB 5142 mov r9, rINST, lsr #8 @ r9<- AA 5143 mov r3, r0, lsr #8 @ r3<- CC 5144 and r2, r0, #255 @ r2<- BB 5145 GET_VREG(r1, r3) @ r1<- vCC 5146 GET_VREG(r0, r2) @ r0<- vBB 5147 .if 0 5148 cmp r1, #0 @ is second operand zero? 5149 beq common_errDivideByZero 5150 .endif 5151 5152 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5153 @ optional op; may set condition codes 5154 bl __aeabi_fmul @ r0<- op, r0-r3 changed 5155 GET_INST_OPCODE(ip) @ extract opcode from rINST 5156 SET_VREG(r0, r9) @ vAA<- r0 5157 GOTO_OPCODE(ip) @ jump to next instruction 5158 /* 11-14 instructions */ 5159 5160 5161/* ------------------------------ */ 5162 .balign 64 5163.L_OP_DIV_FLOAT: /* 0xa9 */ 5164/* File: armv5te/OP_DIV_FLOAT.S */ 5165/* File: armv5te/binop.S */ 5166 /* 5167 * Generic 32-bit binary operation. Provide an "instr" line that 5168 * specifies an instruction that performs "result = r0 op r1". 5169 * This could be an ARM instruction or a function call. (If the result 5170 * comes back in a register other than r0, you can override "result".) 5171 * 5172 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5173 * vCC (r1). Useful for integer division and modulus. Note that we 5174 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5175 * handles it correctly. 5176 * 5177 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5178 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5179 * mul-float, div-float, rem-float 5180 */ 5181 /* binop vAA, vBB, vCC */ 5182 FETCH(r0, 1) @ r0<- CCBB 5183 mov r9, rINST, lsr #8 @ r9<- AA 5184 mov r3, r0, lsr #8 @ r3<- CC 5185 and r2, r0, #255 @ r2<- BB 5186 GET_VREG(r1, r3) @ r1<- vCC 5187 GET_VREG(r0, r2) @ r0<- vBB 5188 .if 0 5189 cmp r1, #0 @ is second operand zero? 5190 beq common_errDivideByZero 5191 .endif 5192 5193 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5194 @ optional op; may set condition codes 5195 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 5196 GET_INST_OPCODE(ip) @ extract opcode from rINST 5197 SET_VREG(r0, r9) @ vAA<- r0 5198 GOTO_OPCODE(ip) @ jump to next instruction 5199 /* 11-14 instructions */ 5200 5201 5202/* ------------------------------ */ 5203 .balign 64 5204.L_OP_REM_FLOAT: /* 0xaa */ 5205/* File: armv5te/OP_REM_FLOAT.S */ 5206/* EABI doesn't define a float remainder function, but libm does */ 5207/* File: armv5te/binop.S */ 5208 /* 5209 * Generic 32-bit binary operation. Provide an "instr" line that 5210 * specifies an instruction that performs "result = r0 op r1". 5211 * This could be an ARM instruction or a function call. (If the result 5212 * comes back in a register other than r0, you can override "result".) 5213 * 5214 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5215 * vCC (r1). Useful for integer division and modulus. Note that we 5216 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5217 * handles it correctly. 5218 * 5219 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5220 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5221 * mul-float, div-float, rem-float 5222 */ 5223 /* binop vAA, vBB, vCC */ 5224 FETCH(r0, 1) @ r0<- CCBB 5225 mov r9, rINST, lsr #8 @ r9<- AA 5226 mov r3, r0, lsr #8 @ r3<- CC 5227 and r2, r0, #255 @ r2<- BB 5228 GET_VREG(r1, r3) @ r1<- vCC 5229 GET_VREG(r0, r2) @ r0<- vBB 5230 .if 0 5231 cmp r1, #0 @ is second operand zero? 5232 beq common_errDivideByZero 5233 .endif 5234 5235 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5236 @ optional op; may set condition codes 5237 bl fmodf @ r0<- op, r0-r3 changed 5238 GET_INST_OPCODE(ip) @ extract opcode from rINST 5239 SET_VREG(r0, r9) @ vAA<- r0 5240 GOTO_OPCODE(ip) @ jump to next instruction 5241 /* 11-14 instructions */ 5242 5243 5244/* ------------------------------ */ 5245 .balign 64 5246.L_OP_ADD_DOUBLE: /* 0xab */ 5247/* File: armv5te/OP_ADD_DOUBLE.S */ 5248/* File: armv5te/binopWide.S */ 5249 /* 5250 * Generic 64-bit binary operation. Provide an "instr" line that 5251 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5252 * This could be an ARM instruction or a function call. (If the result 5253 * comes back in a register other than r0, you can override "result".) 5254 * 5255 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5256 * vCC (r1). Useful for integer division and modulus. 5257 * 5258 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5259 * xor-long, add-double, sub-double, mul-double, div-double, 5260 * rem-double 5261 * 5262 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5263 */ 5264 /* binop vAA, vBB, vCC */ 5265 FETCH(r0, 1) @ r0<- CCBB 5266 mov r9, rINST, lsr #8 @ r9<- AA 5267 and r2, r0, #255 @ r2<- BB 5268 mov r3, r0, lsr #8 @ r3<- CC 5269 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5270 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5271 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5272 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5273 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5274 .if 0 5275 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5276 beq common_errDivideByZero 5277 .endif 5278 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5279 5280 @ optional op; may set condition codes 5281 bl __aeabi_dadd @ result<- op, r0-r3 changed 5282 GET_INST_OPCODE(ip) @ extract opcode from rINST 5283 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5284 GOTO_OPCODE(ip) @ jump to next instruction 5285 /* 14-17 instructions */ 5286 5287 5288/* ------------------------------ */ 5289 .balign 64 5290.L_OP_SUB_DOUBLE: /* 0xac */ 5291/* File: armv5te/OP_SUB_DOUBLE.S */ 5292/* File: armv5te/binopWide.S */ 5293 /* 5294 * Generic 64-bit binary operation. Provide an "instr" line that 5295 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5296 * This could be an ARM instruction or a function call. (If the result 5297 * comes back in a register other than r0, you can override "result".) 5298 * 5299 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5300 * vCC (r1). Useful for integer division and modulus. 5301 * 5302 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5303 * xor-long, add-double, sub-double, mul-double, div-double, 5304 * rem-double 5305 * 5306 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5307 */ 5308 /* binop vAA, vBB, vCC */ 5309 FETCH(r0, 1) @ r0<- CCBB 5310 mov r9, rINST, lsr #8 @ r9<- AA 5311 and r2, r0, #255 @ r2<- BB 5312 mov r3, r0, lsr #8 @ r3<- CC 5313 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5314 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5315 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5316 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5317 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5318 .if 0 5319 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5320 beq common_errDivideByZero 5321 .endif 5322 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5323 5324 @ optional op; may set condition codes 5325 bl __aeabi_dsub @ result<- op, r0-r3 changed 5326 GET_INST_OPCODE(ip) @ extract opcode from rINST 5327 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5328 GOTO_OPCODE(ip) @ jump to next instruction 5329 /* 14-17 instructions */ 5330 5331 5332/* ------------------------------ */ 5333 .balign 64 5334.L_OP_MUL_DOUBLE: /* 0xad */ 5335/* File: armv5te/OP_MUL_DOUBLE.S */ 5336/* File: armv5te/binopWide.S */ 5337 /* 5338 * Generic 64-bit binary operation. Provide an "instr" line that 5339 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5340 * This could be an ARM instruction or a function call. (If the result 5341 * comes back in a register other than r0, you can override "result".) 5342 * 5343 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5344 * vCC (r1). Useful for integer division and modulus. 5345 * 5346 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5347 * xor-long, add-double, sub-double, mul-double, div-double, 5348 * rem-double 5349 * 5350 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5351 */ 5352 /* binop vAA, vBB, vCC */ 5353 FETCH(r0, 1) @ r0<- CCBB 5354 mov r9, rINST, lsr #8 @ r9<- AA 5355 and r2, r0, #255 @ r2<- BB 5356 mov r3, r0, lsr #8 @ r3<- CC 5357 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5358 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5359 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5360 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5361 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5362 .if 0 5363 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5364 beq common_errDivideByZero 5365 .endif 5366 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5367 5368 @ optional op; may set condition codes 5369 bl __aeabi_dmul @ result<- op, r0-r3 changed 5370 GET_INST_OPCODE(ip) @ extract opcode from rINST 5371 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5372 GOTO_OPCODE(ip) @ jump to next instruction 5373 /* 14-17 instructions */ 5374 5375 5376/* ------------------------------ */ 5377 .balign 64 5378.L_OP_DIV_DOUBLE: /* 0xae */ 5379/* File: armv5te/OP_DIV_DOUBLE.S */ 5380/* File: armv5te/binopWide.S */ 5381 /* 5382 * Generic 64-bit binary operation. Provide an "instr" line that 5383 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5384 * This could be an ARM instruction or a function call. (If the result 5385 * comes back in a register other than r0, you can override "result".) 5386 * 5387 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5388 * vCC (r1). Useful for integer division and modulus. 5389 * 5390 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5391 * xor-long, add-double, sub-double, mul-double, div-double, 5392 * rem-double 5393 * 5394 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5395 */ 5396 /* binop vAA, vBB, vCC */ 5397 FETCH(r0, 1) @ r0<- CCBB 5398 mov r9, rINST, lsr #8 @ r9<- AA 5399 and r2, r0, #255 @ r2<- BB 5400 mov r3, r0, lsr #8 @ r3<- CC 5401 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5402 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5403 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5404 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5405 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5406 .if 0 5407 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5408 beq common_errDivideByZero 5409 .endif 5410 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5411 5412 @ optional op; may set condition codes 5413 bl __aeabi_ddiv @ result<- op, r0-r3 changed 5414 GET_INST_OPCODE(ip) @ extract opcode from rINST 5415 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5416 GOTO_OPCODE(ip) @ jump to next instruction 5417 /* 14-17 instructions */ 5418 5419 5420/* ------------------------------ */ 5421 .balign 64 5422.L_OP_REM_DOUBLE: /* 0xaf */ 5423/* File: armv5te/OP_REM_DOUBLE.S */ 5424/* EABI doesn't define a double remainder function, but libm does */ 5425/* File: armv5te/binopWide.S */ 5426 /* 5427 * Generic 64-bit binary operation. Provide an "instr" line that 5428 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5429 * This could be an ARM instruction or a function call. (If the result 5430 * comes back in a register other than r0, you can override "result".) 5431 * 5432 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5433 * vCC (r1). Useful for integer division and modulus. 5434 * 5435 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5436 * xor-long, add-double, sub-double, mul-double, div-double, 5437 * rem-double 5438 * 5439 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5440 */ 5441 /* binop vAA, vBB, vCC */ 5442 FETCH(r0, 1) @ r0<- CCBB 5443 mov r9, rINST, lsr #8 @ r9<- AA 5444 and r2, r0, #255 @ r2<- BB 5445 mov r3, r0, lsr #8 @ r3<- CC 5446 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5447 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5448 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5449 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5450 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5451 .if 0 5452 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5453 beq common_errDivideByZero 5454 .endif 5455 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5456 5457 @ optional op; may set condition codes 5458 bl fmod @ result<- op, r0-r3 changed 5459 GET_INST_OPCODE(ip) @ extract opcode from rINST 5460 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5461 GOTO_OPCODE(ip) @ jump to next instruction 5462 /* 14-17 instructions */ 5463 5464 5465/* ------------------------------ */ 5466 .balign 64 5467.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5468/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5469/* File: armv5te/binop2addr.S */ 5470 /* 5471 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5472 * that specifies an instruction that performs "result = r0 op r1". 5473 * This could be an ARM instruction or a function call. (If the result 5474 * comes back in a register other than r0, you can override "result".) 5475 * 5476 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5477 * vCC (r1). Useful for integer division and modulus. 5478 * 5479 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5480 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5481 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5482 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5483 */ 5484 /* binop/2addr vA, vB */ 5485 mov r9, rINST, lsr #8 @ r9<- A+ 5486 mov r3, rINST, lsr #12 @ r3<- B 5487 and r9, r9, #15 5488 GET_VREG(r1, r3) @ r1<- vB 5489 GET_VREG(r0, r9) @ r0<- vA 5490 .if 0 5491 cmp r1, #0 @ is second operand zero? 5492 beq common_errDivideByZero 5493 .endif 5494 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5495 5496 @ optional op; may set condition codes 5497 add r0, r0, r1 @ r0<- op, r0-r3 changed 5498 GET_INST_OPCODE(ip) @ extract opcode from rINST 5499 SET_VREG(r0, r9) @ vAA<- r0 5500 GOTO_OPCODE(ip) @ jump to next instruction 5501 /* 10-13 instructions */ 5502 5503 5504/* ------------------------------ */ 5505 .balign 64 5506.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5507/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5508/* File: armv5te/binop2addr.S */ 5509 /* 5510 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5511 * that specifies an instruction that performs "result = r0 op r1". 5512 * This could be an ARM instruction or a function call. (If the result 5513 * comes back in a register other than r0, you can override "result".) 5514 * 5515 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5516 * vCC (r1). Useful for integer division and modulus. 5517 * 5518 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5519 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5520 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5521 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5522 */ 5523 /* binop/2addr vA, vB */ 5524 mov r9, rINST, lsr #8 @ r9<- A+ 5525 mov r3, rINST, lsr #12 @ r3<- B 5526 and r9, r9, #15 5527 GET_VREG(r1, r3) @ r1<- vB 5528 GET_VREG(r0, r9) @ r0<- vA 5529 .if 0 5530 cmp r1, #0 @ is second operand zero? 5531 beq common_errDivideByZero 5532 .endif 5533 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5534 5535 @ optional op; may set condition codes 5536 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5537 GET_INST_OPCODE(ip) @ extract opcode from rINST 5538 SET_VREG(r0, r9) @ vAA<- r0 5539 GOTO_OPCODE(ip) @ jump to next instruction 5540 /* 10-13 instructions */ 5541 5542 5543/* ------------------------------ */ 5544 .balign 64 5545.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5546/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5547/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5548/* File: armv5te/binop2addr.S */ 5549 /* 5550 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5551 * that specifies an instruction that performs "result = r0 op r1". 5552 * This could be an ARM instruction or a function call. (If the result 5553 * comes back in a register other than r0, you can override "result".) 5554 * 5555 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5556 * vCC (r1). Useful for integer division and modulus. 5557 * 5558 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5559 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5560 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5561 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5562 */ 5563 /* binop/2addr vA, vB */ 5564 mov r9, rINST, lsr #8 @ r9<- A+ 5565 mov r3, rINST, lsr #12 @ r3<- B 5566 and r9, r9, #15 5567 GET_VREG(r1, r3) @ r1<- vB 5568 GET_VREG(r0, r9) @ r0<- vA 5569 .if 0 5570 cmp r1, #0 @ is second operand zero? 5571 beq common_errDivideByZero 5572 .endif 5573 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5574 5575 @ optional op; may set condition codes 5576 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5577 GET_INST_OPCODE(ip) @ extract opcode from rINST 5578 SET_VREG(r0, r9) @ vAA<- r0 5579 GOTO_OPCODE(ip) @ jump to next instruction 5580 /* 10-13 instructions */ 5581 5582 5583/* ------------------------------ */ 5584 .balign 64 5585.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5586/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5587/* File: armv5te/binop2addr.S */ 5588 /* 5589 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5590 * that specifies an instruction that performs "result = r0 op r1". 5591 * This could be an ARM instruction or a function call. (If the result 5592 * comes back in a register other than r0, you can override "result".) 5593 * 5594 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5595 * vCC (r1). Useful for integer division and modulus. 5596 * 5597 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5598 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5599 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5600 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5601 */ 5602 /* binop/2addr vA, vB */ 5603 mov r9, rINST, lsr #8 @ r9<- A+ 5604 mov r3, rINST, lsr #12 @ r3<- B 5605 and r9, r9, #15 5606 GET_VREG(r1, r3) @ r1<- vB 5607 GET_VREG(r0, r9) @ r0<- vA 5608 .if 1 5609 cmp r1, #0 @ is second operand zero? 5610 beq common_errDivideByZero 5611 .endif 5612 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5613 5614 @ optional op; may set condition codes 5615 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5616 GET_INST_OPCODE(ip) @ extract opcode from rINST 5617 SET_VREG(r0, r9) @ vAA<- r0 5618 GOTO_OPCODE(ip) @ jump to next instruction 5619 /* 10-13 instructions */ 5620 5621 5622/* ------------------------------ */ 5623 .balign 64 5624.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5625/* File: armv5te/OP_REM_INT_2ADDR.S */ 5626/* idivmod returns quotient in r0 and remainder in r1 */ 5627/* File: armv5te/binop2addr.S */ 5628 /* 5629 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5630 * that specifies an instruction that performs "result = r0 op r1". 5631 * This could be an ARM instruction or a function call. (If the result 5632 * comes back in a register other than r0, you can override "result".) 5633 * 5634 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5635 * vCC (r1). Useful for integer division and modulus. 5636 * 5637 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5638 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5639 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5640 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5641 */ 5642 /* binop/2addr vA, vB */ 5643 mov r9, rINST, lsr #8 @ r9<- A+ 5644 mov r3, rINST, lsr #12 @ r3<- B 5645 and r9, r9, #15 5646 GET_VREG(r1, r3) @ r1<- vB 5647 GET_VREG(r0, r9) @ r0<- vA 5648 .if 1 5649 cmp r1, #0 @ is second operand zero? 5650 beq common_errDivideByZero 5651 .endif 5652 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5653 5654 @ optional op; may set condition codes 5655 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5656 GET_INST_OPCODE(ip) @ extract opcode from rINST 5657 SET_VREG(r1, r9) @ vAA<- r1 5658 GOTO_OPCODE(ip) @ jump to next instruction 5659 /* 10-13 instructions */ 5660 5661 5662/* ------------------------------ */ 5663 .balign 64 5664.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5665/* File: armv5te/OP_AND_INT_2ADDR.S */ 5666/* File: armv5te/binop2addr.S */ 5667 /* 5668 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5669 * that specifies an instruction that performs "result = r0 op r1". 5670 * This could be an ARM instruction or a function call. (If the result 5671 * comes back in a register other than r0, you can override "result".) 5672 * 5673 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5674 * vCC (r1). Useful for integer division and modulus. 5675 * 5676 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5677 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5678 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5679 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5680 */ 5681 /* binop/2addr vA, vB */ 5682 mov r9, rINST, lsr #8 @ r9<- A+ 5683 mov r3, rINST, lsr #12 @ r3<- B 5684 and r9, r9, #15 5685 GET_VREG(r1, r3) @ r1<- vB 5686 GET_VREG(r0, r9) @ r0<- vA 5687 .if 0 5688 cmp r1, #0 @ is second operand zero? 5689 beq common_errDivideByZero 5690 .endif 5691 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5692 5693 @ optional op; may set condition codes 5694 and r0, r0, r1 @ r0<- op, r0-r3 changed 5695 GET_INST_OPCODE(ip) @ extract opcode from rINST 5696 SET_VREG(r0, r9) @ vAA<- r0 5697 GOTO_OPCODE(ip) @ jump to next instruction 5698 /* 10-13 instructions */ 5699 5700 5701/* ------------------------------ */ 5702 .balign 64 5703.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5704/* File: armv5te/OP_OR_INT_2ADDR.S */ 5705/* File: armv5te/binop2addr.S */ 5706 /* 5707 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5708 * that specifies an instruction that performs "result = r0 op r1". 5709 * This could be an ARM instruction or a function call. (If the result 5710 * comes back in a register other than r0, you can override "result".) 5711 * 5712 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5713 * vCC (r1). Useful for integer division and modulus. 5714 * 5715 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5716 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5717 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5718 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5719 */ 5720 /* binop/2addr vA, vB */ 5721 mov r9, rINST, lsr #8 @ r9<- A+ 5722 mov r3, rINST, lsr #12 @ r3<- B 5723 and r9, r9, #15 5724 GET_VREG(r1, r3) @ r1<- vB 5725 GET_VREG(r0, r9) @ r0<- vA 5726 .if 0 5727 cmp r1, #0 @ is second operand zero? 5728 beq common_errDivideByZero 5729 .endif 5730 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5731 5732 @ optional op; may set condition codes 5733 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5734 GET_INST_OPCODE(ip) @ extract opcode from rINST 5735 SET_VREG(r0, r9) @ vAA<- r0 5736 GOTO_OPCODE(ip) @ jump to next instruction 5737 /* 10-13 instructions */ 5738 5739 5740/* ------------------------------ */ 5741 .balign 64 5742.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5743/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5744/* File: armv5te/binop2addr.S */ 5745 /* 5746 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5747 * that specifies an instruction that performs "result = r0 op r1". 5748 * This could be an ARM instruction or a function call. (If the result 5749 * comes back in a register other than r0, you can override "result".) 5750 * 5751 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5752 * vCC (r1). Useful for integer division and modulus. 5753 * 5754 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5755 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5756 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5757 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5758 */ 5759 /* binop/2addr vA, vB */ 5760 mov r9, rINST, lsr #8 @ r9<- A+ 5761 mov r3, rINST, lsr #12 @ r3<- B 5762 and r9, r9, #15 5763 GET_VREG(r1, r3) @ r1<- vB 5764 GET_VREG(r0, r9) @ r0<- vA 5765 .if 0 5766 cmp r1, #0 @ is second operand zero? 5767 beq common_errDivideByZero 5768 .endif 5769 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5770 5771 @ optional op; may set condition codes 5772 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5773 GET_INST_OPCODE(ip) @ extract opcode from rINST 5774 SET_VREG(r0, r9) @ vAA<- r0 5775 GOTO_OPCODE(ip) @ jump to next instruction 5776 /* 10-13 instructions */ 5777 5778 5779/* ------------------------------ */ 5780 .balign 64 5781.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5782/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5783/* File: armv5te/binop2addr.S */ 5784 /* 5785 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5786 * that specifies an instruction that performs "result = r0 op r1". 5787 * This could be an ARM instruction or a function call. (If the result 5788 * comes back in a register other than r0, you can override "result".) 5789 * 5790 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5791 * vCC (r1). Useful for integer division and modulus. 5792 * 5793 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5794 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5795 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5796 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5797 */ 5798 /* binop/2addr vA, vB */ 5799 mov r9, rINST, lsr #8 @ r9<- A+ 5800 mov r3, rINST, lsr #12 @ r3<- B 5801 and r9, r9, #15 5802 GET_VREG(r1, r3) @ r1<- vB 5803 GET_VREG(r0, r9) @ r0<- vA 5804 .if 0 5805 cmp r1, #0 @ is second operand zero? 5806 beq common_errDivideByZero 5807 .endif 5808 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5809 5810 and r1, r1, #31 @ optional op; may set condition codes 5811 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5812 GET_INST_OPCODE(ip) @ extract opcode from rINST 5813 SET_VREG(r0, r9) @ vAA<- r0 5814 GOTO_OPCODE(ip) @ jump to next instruction 5815 /* 10-13 instructions */ 5816 5817 5818/* ------------------------------ */ 5819 .balign 64 5820.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5821/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5822/* File: armv5te/binop2addr.S */ 5823 /* 5824 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5825 * that specifies an instruction that performs "result = r0 op r1". 5826 * This could be an ARM instruction or a function call. (If the result 5827 * comes back in a register other than r0, you can override "result".) 5828 * 5829 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5830 * vCC (r1). Useful for integer division and modulus. 5831 * 5832 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5833 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5834 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5835 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5836 */ 5837 /* binop/2addr vA, vB */ 5838 mov r9, rINST, lsr #8 @ r9<- A+ 5839 mov r3, rINST, lsr #12 @ r3<- B 5840 and r9, r9, #15 5841 GET_VREG(r1, r3) @ r1<- vB 5842 GET_VREG(r0, r9) @ r0<- vA 5843 .if 0 5844 cmp r1, #0 @ is second operand zero? 5845 beq common_errDivideByZero 5846 .endif 5847 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5848 5849 and r1, r1, #31 @ optional op; may set condition codes 5850 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5851 GET_INST_OPCODE(ip) @ extract opcode from rINST 5852 SET_VREG(r0, r9) @ vAA<- r0 5853 GOTO_OPCODE(ip) @ jump to next instruction 5854 /* 10-13 instructions */ 5855 5856 5857/* ------------------------------ */ 5858 .balign 64 5859.L_OP_USHR_INT_2ADDR: /* 0xba */ 5860/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5861/* File: armv5te/binop2addr.S */ 5862 /* 5863 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5864 * that specifies an instruction that performs "result = r0 op r1". 5865 * This could be an ARM instruction or a function call. (If the result 5866 * comes back in a register other than r0, you can override "result".) 5867 * 5868 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5869 * vCC (r1). Useful for integer division and modulus. 5870 * 5871 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5872 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5873 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5874 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5875 */ 5876 /* binop/2addr vA, vB */ 5877 mov r9, rINST, lsr #8 @ r9<- A+ 5878 mov r3, rINST, lsr #12 @ r3<- B 5879 and r9, r9, #15 5880 GET_VREG(r1, r3) @ r1<- vB 5881 GET_VREG(r0, r9) @ r0<- vA 5882 .if 0 5883 cmp r1, #0 @ is second operand zero? 5884 beq common_errDivideByZero 5885 .endif 5886 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5887 5888 and r1, r1, #31 @ optional op; may set condition codes 5889 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5890 GET_INST_OPCODE(ip) @ extract opcode from rINST 5891 SET_VREG(r0, r9) @ vAA<- r0 5892 GOTO_OPCODE(ip) @ jump to next instruction 5893 /* 10-13 instructions */ 5894 5895 5896/* ------------------------------ */ 5897 .balign 64 5898.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5899/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5900/* File: armv5te/binopWide2addr.S */ 5901 /* 5902 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5903 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5904 * This could be an ARM instruction or a function call. (If the result 5905 * comes back in a register other than r0, you can override "result".) 5906 * 5907 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5908 * vCC (r1). Useful for integer division and modulus. 5909 * 5910 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5911 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5912 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5913 * rem-double/2addr 5914 */ 5915 /* binop/2addr vA, vB */ 5916 mov r9, rINST, lsr #8 @ r9<- A+ 5917 mov r1, rINST, lsr #12 @ r1<- B 5918 and r9, r9, #15 5919 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5920 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5921 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5922 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5923 .if 0 5924 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5925 beq common_errDivideByZero 5926 .endif 5927 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5928 5929 adds r0, r0, r2 @ optional op; may set condition codes 5930 adc r1, r1, r3 @ result<- op, r0-r3 changed 5931 GET_INST_OPCODE(ip) @ extract opcode from rINST 5932 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5933 GOTO_OPCODE(ip) @ jump to next instruction 5934 /* 12-15 instructions */ 5935 5936 5937/* ------------------------------ */ 5938 .balign 64 5939.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5940/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5941/* File: armv5te/binopWide2addr.S */ 5942 /* 5943 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5944 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5945 * This could be an ARM instruction or a function call. (If the result 5946 * comes back in a register other than r0, you can override "result".) 5947 * 5948 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5949 * vCC (r1). Useful for integer division and modulus. 5950 * 5951 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5952 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5953 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5954 * rem-double/2addr 5955 */ 5956 /* binop/2addr vA, vB */ 5957 mov r9, rINST, lsr #8 @ r9<- A+ 5958 mov r1, rINST, lsr #12 @ r1<- B 5959 and r9, r9, #15 5960 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5961 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5962 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5963 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5964 .if 0 5965 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5966 beq common_errDivideByZero 5967 .endif 5968 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5969 5970 subs r0, r0, r2 @ optional op; may set condition codes 5971 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5972 GET_INST_OPCODE(ip) @ extract opcode from rINST 5973 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5974 GOTO_OPCODE(ip) @ jump to next instruction 5975 /* 12-15 instructions */ 5976 5977 5978/* ------------------------------ */ 5979 .balign 64 5980.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5981/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 5982 /* 5983 * Signed 64-bit integer multiply, "/2addr" version. 5984 * 5985 * See OP_MUL_LONG for an explanation. 5986 * 5987 * We get a little tight on registers, so to avoid looking up &fp[A] 5988 * again we stuff it into rINST. 5989 */ 5990 /* mul-long/2addr vA, vB */ 5991 mov r9, rINST, lsr #8 @ r9<- A+ 5992 mov r1, rINST, lsr #12 @ r1<- B 5993 and r9, r9, #15 5994 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5995 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5996 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5997 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5998 mul ip, r2, r1 @ ip<- ZxW 5999 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 6000 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 6001 mov r0, rINST @ r0<- &fp[A] (free up rINST) 6002 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6003 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 6004 GET_INST_OPCODE(ip) @ extract opcode from rINST 6005 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 6006 GOTO_OPCODE(ip) @ jump to next instruction 6007 6008/* ------------------------------ */ 6009 .balign 64 6010.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 6011/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 6012/* File: armv5te/binopWide2addr.S */ 6013 /* 6014 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6015 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6016 * This could be an ARM instruction or a function call. (If the result 6017 * comes back in a register other than r0, you can override "result".) 6018 * 6019 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6020 * vCC (r1). Useful for integer division and modulus. 6021 * 6022 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6023 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6024 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6025 * rem-double/2addr 6026 */ 6027 /* binop/2addr vA, vB */ 6028 mov r9, rINST, lsr #8 @ r9<- A+ 6029 mov r1, rINST, lsr #12 @ r1<- B 6030 and r9, r9, #15 6031 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6032 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6033 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6034 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6035 .if 1 6036 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6037 beq common_errDivideByZero 6038 .endif 6039 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6040 6041 @ optional op; may set condition codes 6042 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6043 GET_INST_OPCODE(ip) @ extract opcode from rINST 6044 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6045 GOTO_OPCODE(ip) @ jump to next instruction 6046 /* 12-15 instructions */ 6047 6048 6049/* ------------------------------ */ 6050 .balign 64 6051.L_OP_REM_LONG_2ADDR: /* 0xbf */ 6052/* File: armv5te/OP_REM_LONG_2ADDR.S */ 6053/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 6054/* File: armv5te/binopWide2addr.S */ 6055 /* 6056 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6057 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6058 * This could be an ARM instruction or a function call. (If the result 6059 * comes back in a register other than r0, you can override "result".) 6060 * 6061 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6062 * vCC (r1). Useful for integer division and modulus. 6063 * 6064 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6065 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6066 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6067 * rem-double/2addr 6068 */ 6069 /* binop/2addr vA, vB */ 6070 mov r9, rINST, lsr #8 @ r9<- A+ 6071 mov r1, rINST, lsr #12 @ r1<- B 6072 and r9, r9, #15 6073 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6074 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6075 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6076 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6077 .if 1 6078 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6079 beq common_errDivideByZero 6080 .endif 6081 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6082 6083 @ optional op; may set condition codes 6084 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6085 GET_INST_OPCODE(ip) @ extract opcode from rINST 6086 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 6087 GOTO_OPCODE(ip) @ jump to next instruction 6088 /* 12-15 instructions */ 6089 6090 6091/* ------------------------------ */ 6092 .balign 64 6093.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 6094/* File: armv5te/OP_AND_LONG_2ADDR.S */ 6095/* File: armv5te/binopWide2addr.S */ 6096 /* 6097 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6098 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6099 * This could be an ARM instruction or a function call. (If the result 6100 * comes back in a register other than r0, you can override "result".) 6101 * 6102 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6103 * vCC (r1). Useful for integer division and modulus. 6104 * 6105 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6106 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6107 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6108 * rem-double/2addr 6109 */ 6110 /* binop/2addr vA, vB */ 6111 mov r9, rINST, lsr #8 @ r9<- A+ 6112 mov r1, rINST, lsr #12 @ r1<- B 6113 and r9, r9, #15 6114 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6115 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6116 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6117 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6118 .if 0 6119 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6120 beq common_errDivideByZero 6121 .endif 6122 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6123 6124 and r0, r0, r2 @ optional op; may set condition codes 6125 and r1, r1, r3 @ result<- op, r0-r3 changed 6126 GET_INST_OPCODE(ip) @ extract opcode from rINST 6127 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6128 GOTO_OPCODE(ip) @ jump to next instruction 6129 /* 12-15 instructions */ 6130 6131 6132/* ------------------------------ */ 6133 .balign 64 6134.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6135/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6136/* File: armv5te/binopWide2addr.S */ 6137 /* 6138 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6139 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6140 * This could be an ARM instruction or a function call. (If the result 6141 * comes back in a register other than r0, you can override "result".) 6142 * 6143 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6144 * vCC (r1). Useful for integer division and modulus. 6145 * 6146 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6147 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6148 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6149 * rem-double/2addr 6150 */ 6151 /* binop/2addr vA, vB */ 6152 mov r9, rINST, lsr #8 @ r9<- A+ 6153 mov r1, rINST, lsr #12 @ r1<- B 6154 and r9, r9, #15 6155 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6156 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6157 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6158 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6159 .if 0 6160 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6161 beq common_errDivideByZero 6162 .endif 6163 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6164 6165 orr r0, r0, r2 @ optional op; may set condition codes 6166 orr r1, r1, r3 @ result<- op, r0-r3 changed 6167 GET_INST_OPCODE(ip) @ extract opcode from rINST 6168 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6169 GOTO_OPCODE(ip) @ jump to next instruction 6170 /* 12-15 instructions */ 6171 6172 6173/* ------------------------------ */ 6174 .balign 64 6175.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6176/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6177/* File: armv5te/binopWide2addr.S */ 6178 /* 6179 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6180 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6181 * This could be an ARM instruction or a function call. (If the result 6182 * comes back in a register other than r0, you can override "result".) 6183 * 6184 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6185 * vCC (r1). Useful for integer division and modulus. 6186 * 6187 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6188 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6189 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6190 * rem-double/2addr 6191 */ 6192 /* binop/2addr vA, vB */ 6193 mov r9, rINST, lsr #8 @ r9<- A+ 6194 mov r1, rINST, lsr #12 @ r1<- B 6195 and r9, r9, #15 6196 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6197 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6198 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6199 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6200 .if 0 6201 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6202 beq common_errDivideByZero 6203 .endif 6204 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6205 6206 eor r0, r0, r2 @ optional op; may set condition codes 6207 eor r1, r1, r3 @ result<- op, r0-r3 changed 6208 GET_INST_OPCODE(ip) @ extract opcode from rINST 6209 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6210 GOTO_OPCODE(ip) @ jump to next instruction 6211 /* 12-15 instructions */ 6212 6213 6214/* ------------------------------ */ 6215 .balign 64 6216.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6217/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6218 /* 6219 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6220 * 32-bit shift distance. 6221 */ 6222 /* shl-long/2addr vA, vB */ 6223 mov r9, rINST, lsr #8 @ r9<- A+ 6224 mov r3, rINST, lsr #12 @ r3<- B 6225 and r9, r9, #15 6226 GET_VREG(r2, r3) @ r2<- vB 6227 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6228 and r2, r2, #63 @ r2<- r2 & 0x3f 6229 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6230 6231 mov r1, r1, asl r2 @ r1<- r1 << r2 6232 rsb r3, r2, #32 @ r3<- 32 - r2 6233 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6234 subs ip, r2, #32 @ ip<- r2 - 32 6235 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6236 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6237 mov r0, r0, asl r2 @ r0<- r0 << r2 6238 b .LOP_SHL_LONG_2ADDR_finish 6239 6240/* ------------------------------ */ 6241 .balign 64 6242.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6243/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6244 /* 6245 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6246 * 32-bit shift distance. 6247 */ 6248 /* shr-long/2addr vA, vB */ 6249 mov r9, rINST, lsr #8 @ r9<- A+ 6250 mov r3, rINST, lsr #12 @ r3<- B 6251 and r9, r9, #15 6252 GET_VREG(r2, r3) @ r2<- vB 6253 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6254 and r2, r2, #63 @ r2<- r2 & 0x3f 6255 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6256 6257 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6258 rsb r3, r2, #32 @ r3<- 32 - r2 6259 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6260 subs ip, r2, #32 @ ip<- r2 - 32 6261 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6262 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6263 mov r1, r1, asr r2 @ r1<- r1 >> r2 6264 b .LOP_SHR_LONG_2ADDR_finish 6265 6266/* ------------------------------ */ 6267 .balign 64 6268.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6269/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6270 /* 6271 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6272 * 32-bit shift distance. 6273 */ 6274 /* ushr-long/2addr vA, vB */ 6275 mov r9, rINST, lsr #8 @ r9<- A+ 6276 mov r3, rINST, lsr #12 @ r3<- B 6277 and r9, r9, #15 6278 GET_VREG(r2, r3) @ r2<- vB 6279 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6280 and r2, r2, #63 @ r2<- r2 & 0x3f 6281 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6282 6283 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6284 rsb r3, r2, #32 @ r3<- 32 - r2 6285 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6286 subs ip, r2, #32 @ ip<- r2 - 32 6287 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6288 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6289 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6290 b .LOP_USHR_LONG_2ADDR_finish 6291 6292/* ------------------------------ */ 6293 .balign 64 6294.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6295/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */ 6296/* File: armv5te/binop2addr.S */ 6297 /* 6298 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6299 * that specifies an instruction that performs "result = r0 op r1". 6300 * This could be an ARM instruction or a function call. (If the result 6301 * comes back in a register other than r0, you can override "result".) 6302 * 6303 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6304 * vCC (r1). Useful for integer division and modulus. 6305 * 6306 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6307 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6308 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6309 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6310 */ 6311 /* binop/2addr vA, vB */ 6312 mov r9, rINST, lsr #8 @ r9<- A+ 6313 mov r3, rINST, lsr #12 @ r3<- B 6314 and r9, r9, #15 6315 GET_VREG(r1, r3) @ r1<- vB 6316 GET_VREG(r0, r9) @ r0<- vA 6317 .if 0 6318 cmp r1, #0 @ is second operand zero? 6319 beq common_errDivideByZero 6320 .endif 6321 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6322 6323 @ optional op; may set condition codes 6324 bl __aeabi_fadd @ r0<- op, r0-r3 changed 6325 GET_INST_OPCODE(ip) @ extract opcode from rINST 6326 SET_VREG(r0, r9) @ vAA<- r0 6327 GOTO_OPCODE(ip) @ jump to next instruction 6328 /* 10-13 instructions */ 6329 6330 6331/* ------------------------------ */ 6332 .balign 64 6333.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6334/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */ 6335/* File: armv5te/binop2addr.S */ 6336 /* 6337 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6338 * that specifies an instruction that performs "result = r0 op r1". 6339 * This could be an ARM instruction or a function call. (If the result 6340 * comes back in a register other than r0, you can override "result".) 6341 * 6342 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6343 * vCC (r1). Useful for integer division and modulus. 6344 * 6345 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6346 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6347 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6348 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6349 */ 6350 /* binop/2addr vA, vB */ 6351 mov r9, rINST, lsr #8 @ r9<- A+ 6352 mov r3, rINST, lsr #12 @ r3<- B 6353 and r9, r9, #15 6354 GET_VREG(r1, r3) @ r1<- vB 6355 GET_VREG(r0, r9) @ r0<- vA 6356 .if 0 6357 cmp r1, #0 @ is second operand zero? 6358 beq common_errDivideByZero 6359 .endif 6360 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6361 6362 @ optional op; may set condition codes 6363 bl __aeabi_fsub @ r0<- op, r0-r3 changed 6364 GET_INST_OPCODE(ip) @ extract opcode from rINST 6365 SET_VREG(r0, r9) @ vAA<- r0 6366 GOTO_OPCODE(ip) @ jump to next instruction 6367 /* 10-13 instructions */ 6368 6369 6370/* ------------------------------ */ 6371 .balign 64 6372.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6373/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */ 6374/* File: armv5te/binop2addr.S */ 6375 /* 6376 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6377 * that specifies an instruction that performs "result = r0 op r1". 6378 * This could be an ARM instruction or a function call. (If the result 6379 * comes back in a register other than r0, you can override "result".) 6380 * 6381 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6382 * vCC (r1). Useful for integer division and modulus. 6383 * 6384 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6385 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6386 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6387 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6388 */ 6389 /* binop/2addr vA, vB */ 6390 mov r9, rINST, lsr #8 @ r9<- A+ 6391 mov r3, rINST, lsr #12 @ r3<- B 6392 and r9, r9, #15 6393 GET_VREG(r1, r3) @ r1<- vB 6394 GET_VREG(r0, r9) @ r0<- vA 6395 .if 0 6396 cmp r1, #0 @ is second operand zero? 6397 beq common_errDivideByZero 6398 .endif 6399 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6400 6401 @ optional op; may set condition codes 6402 bl __aeabi_fmul @ r0<- op, r0-r3 changed 6403 GET_INST_OPCODE(ip) @ extract opcode from rINST 6404 SET_VREG(r0, r9) @ vAA<- r0 6405 GOTO_OPCODE(ip) @ jump to next instruction 6406 /* 10-13 instructions */ 6407 6408 6409/* ------------------------------ */ 6410 .balign 64 6411.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6412/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */ 6413/* File: armv5te/binop2addr.S */ 6414 /* 6415 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6416 * that specifies an instruction that performs "result = r0 op r1". 6417 * This could be an ARM instruction or a function call. (If the result 6418 * comes back in a register other than r0, you can override "result".) 6419 * 6420 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6421 * vCC (r1). Useful for integer division and modulus. 6422 * 6423 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6424 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6425 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6426 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6427 */ 6428 /* binop/2addr vA, vB */ 6429 mov r9, rINST, lsr #8 @ r9<- A+ 6430 mov r3, rINST, lsr #12 @ r3<- B 6431 and r9, r9, #15 6432 GET_VREG(r1, r3) @ r1<- vB 6433 GET_VREG(r0, r9) @ r0<- vA 6434 .if 0 6435 cmp r1, #0 @ is second operand zero? 6436 beq common_errDivideByZero 6437 .endif 6438 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6439 6440 @ optional op; may set condition codes 6441 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 6442 GET_INST_OPCODE(ip) @ extract opcode from rINST 6443 SET_VREG(r0, r9) @ vAA<- r0 6444 GOTO_OPCODE(ip) @ jump to next instruction 6445 /* 10-13 instructions */ 6446 6447 6448/* ------------------------------ */ 6449 .balign 64 6450.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6451/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6452/* EABI doesn't define a float remainder function, but libm does */ 6453/* File: armv5te/binop2addr.S */ 6454 /* 6455 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6456 * that specifies an instruction that performs "result = r0 op r1". 6457 * This could be an ARM instruction or a function call. (If the result 6458 * comes back in a register other than r0, you can override "result".) 6459 * 6460 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6461 * vCC (r1). Useful for integer division and modulus. 6462 * 6463 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6464 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6465 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6466 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6467 */ 6468 /* binop/2addr vA, vB */ 6469 mov r9, rINST, lsr #8 @ r9<- A+ 6470 mov r3, rINST, lsr #12 @ r3<- B 6471 and r9, r9, #15 6472 GET_VREG(r1, r3) @ r1<- vB 6473 GET_VREG(r0, r9) @ r0<- vA 6474 .if 0 6475 cmp r1, #0 @ is second operand zero? 6476 beq common_errDivideByZero 6477 .endif 6478 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6479 6480 @ optional op; may set condition codes 6481 bl fmodf @ r0<- op, r0-r3 changed 6482 GET_INST_OPCODE(ip) @ extract opcode from rINST 6483 SET_VREG(r0, r9) @ vAA<- r0 6484 GOTO_OPCODE(ip) @ jump to next instruction 6485 /* 10-13 instructions */ 6486 6487 6488/* ------------------------------ */ 6489 .balign 64 6490.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6491/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */ 6492/* File: armv5te/binopWide2addr.S */ 6493 /* 6494 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6495 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6496 * This could be an ARM instruction or a function call. (If the result 6497 * comes back in a register other than r0, you can override "result".) 6498 * 6499 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6500 * vCC (r1). Useful for integer division and modulus. 6501 * 6502 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6503 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6504 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6505 * rem-double/2addr 6506 */ 6507 /* binop/2addr vA, vB */ 6508 mov r9, rINST, lsr #8 @ r9<- A+ 6509 mov r1, rINST, lsr #12 @ r1<- B 6510 and r9, r9, #15 6511 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6512 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6513 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6514 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6515 .if 0 6516 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6517 beq common_errDivideByZero 6518 .endif 6519 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6520 6521 @ optional op; may set condition codes 6522 bl __aeabi_dadd @ result<- op, r0-r3 changed 6523 GET_INST_OPCODE(ip) @ extract opcode from rINST 6524 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6525 GOTO_OPCODE(ip) @ jump to next instruction 6526 /* 12-15 instructions */ 6527 6528 6529/* ------------------------------ */ 6530 .balign 64 6531.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6532/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */ 6533/* File: armv5te/binopWide2addr.S */ 6534 /* 6535 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6536 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6537 * This could be an ARM instruction or a function call. (If the result 6538 * comes back in a register other than r0, you can override "result".) 6539 * 6540 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6541 * vCC (r1). Useful for integer division and modulus. 6542 * 6543 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6544 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6545 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6546 * rem-double/2addr 6547 */ 6548 /* binop/2addr vA, vB */ 6549 mov r9, rINST, lsr #8 @ r9<- A+ 6550 mov r1, rINST, lsr #12 @ r1<- B 6551 and r9, r9, #15 6552 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6553 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6554 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6555 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6556 .if 0 6557 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6558 beq common_errDivideByZero 6559 .endif 6560 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6561 6562 @ optional op; may set condition codes 6563 bl __aeabi_dsub @ result<- op, r0-r3 changed 6564 GET_INST_OPCODE(ip) @ extract opcode from rINST 6565 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6566 GOTO_OPCODE(ip) @ jump to next instruction 6567 /* 12-15 instructions */ 6568 6569 6570/* ------------------------------ */ 6571 .balign 64 6572.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6573/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */ 6574/* File: armv5te/binopWide2addr.S */ 6575 /* 6576 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6577 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6578 * This could be an ARM instruction or a function call. (If the result 6579 * comes back in a register other than r0, you can override "result".) 6580 * 6581 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6582 * vCC (r1). Useful for integer division and modulus. 6583 * 6584 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6585 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6586 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6587 * rem-double/2addr 6588 */ 6589 /* binop/2addr vA, vB */ 6590 mov r9, rINST, lsr #8 @ r9<- A+ 6591 mov r1, rINST, lsr #12 @ r1<- B 6592 and r9, r9, #15 6593 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6594 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6595 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6596 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6597 .if 0 6598 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6599 beq common_errDivideByZero 6600 .endif 6601 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6602 6603 @ optional op; may set condition codes 6604 bl __aeabi_dmul @ result<- op, r0-r3 changed 6605 GET_INST_OPCODE(ip) @ extract opcode from rINST 6606 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6607 GOTO_OPCODE(ip) @ jump to next instruction 6608 /* 12-15 instructions */ 6609 6610 6611/* ------------------------------ */ 6612 .balign 64 6613.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6614/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */ 6615/* File: armv5te/binopWide2addr.S */ 6616 /* 6617 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6618 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6619 * This could be an ARM instruction or a function call. (If the result 6620 * comes back in a register other than r0, you can override "result".) 6621 * 6622 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6623 * vCC (r1). Useful for integer division and modulus. 6624 * 6625 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6626 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6627 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6628 * rem-double/2addr 6629 */ 6630 /* binop/2addr vA, vB */ 6631 mov r9, rINST, lsr #8 @ r9<- A+ 6632 mov r1, rINST, lsr #12 @ r1<- B 6633 and r9, r9, #15 6634 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6635 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6636 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6637 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6638 .if 0 6639 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6640 beq common_errDivideByZero 6641 .endif 6642 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6643 6644 @ optional op; may set condition codes 6645 bl __aeabi_ddiv @ result<- op, r0-r3 changed 6646 GET_INST_OPCODE(ip) @ extract opcode from rINST 6647 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6648 GOTO_OPCODE(ip) @ jump to next instruction 6649 /* 12-15 instructions */ 6650 6651 6652/* ------------------------------ */ 6653 .balign 64 6654.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6655/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6656/* EABI doesn't define a double remainder function, but libm does */ 6657/* File: armv5te/binopWide2addr.S */ 6658 /* 6659 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6660 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6661 * This could be an ARM instruction or a function call. (If the result 6662 * comes back in a register other than r0, you can override "result".) 6663 * 6664 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6665 * vCC (r1). Useful for integer division and modulus. 6666 * 6667 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6668 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6669 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6670 * rem-double/2addr 6671 */ 6672 /* binop/2addr vA, vB */ 6673 mov r9, rINST, lsr #8 @ r9<- A+ 6674 mov r1, rINST, lsr #12 @ r1<- B 6675 and r9, r9, #15 6676 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6677 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6678 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6679 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6680 .if 0 6681 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6682 beq common_errDivideByZero 6683 .endif 6684 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6685 6686 @ optional op; may set condition codes 6687 bl fmod @ result<- op, r0-r3 changed 6688 GET_INST_OPCODE(ip) @ extract opcode from rINST 6689 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6690 GOTO_OPCODE(ip) @ jump to next instruction 6691 /* 12-15 instructions */ 6692 6693 6694/* ------------------------------ */ 6695 .balign 64 6696.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6697/* File: armv5te/OP_ADD_INT_LIT16.S */ 6698/* File: armv5te/binopLit16.S */ 6699 /* 6700 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6701 * that specifies an instruction that performs "result = r0 op r1". 6702 * This could be an ARM instruction or a function call. (If the result 6703 * comes back in a register other than r0, you can override "result".) 6704 * 6705 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6706 * vCC (r1). Useful for integer division and modulus. 6707 * 6708 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6709 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6710 */ 6711 /* binop/lit16 vA, vB, #+CCCC */ 6712 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6713 mov r2, rINST, lsr #12 @ r2<- B 6714 mov r9, rINST, lsr #8 @ r9<- A+ 6715 GET_VREG(r0, r2) @ r0<- vB 6716 and r9, r9, #15 6717 .if 0 6718 cmp r1, #0 @ is second operand zero? 6719 beq common_errDivideByZero 6720 .endif 6721 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6722 6723 add r0, r0, r1 @ r0<- op, r0-r3 changed 6724 GET_INST_OPCODE(ip) @ extract opcode from rINST 6725 SET_VREG(r0, r9) @ vAA<- r0 6726 GOTO_OPCODE(ip) @ jump to next instruction 6727 /* 10-13 instructions */ 6728 6729 6730/* ------------------------------ */ 6731 .balign 64 6732.L_OP_RSUB_INT: /* 0xd1 */ 6733/* File: armv5te/OP_RSUB_INT.S */ 6734/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6735/* File: armv5te/binopLit16.S */ 6736 /* 6737 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6738 * that specifies an instruction that performs "result = r0 op r1". 6739 * This could be an ARM instruction or a function call. (If the result 6740 * comes back in a register other than r0, you can override "result".) 6741 * 6742 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6743 * vCC (r1). Useful for integer division and modulus. 6744 * 6745 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6746 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6747 */ 6748 /* binop/lit16 vA, vB, #+CCCC */ 6749 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6750 mov r2, rINST, lsr #12 @ r2<- B 6751 mov r9, rINST, lsr #8 @ r9<- A+ 6752 GET_VREG(r0, r2) @ r0<- vB 6753 and r9, r9, #15 6754 .if 0 6755 cmp r1, #0 @ is second operand zero? 6756 beq common_errDivideByZero 6757 .endif 6758 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6759 6760 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6761 GET_INST_OPCODE(ip) @ extract opcode from rINST 6762 SET_VREG(r0, r9) @ vAA<- r0 6763 GOTO_OPCODE(ip) @ jump to next instruction 6764 /* 10-13 instructions */ 6765 6766 6767/* ------------------------------ */ 6768 .balign 64 6769.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6770/* File: armv5te/OP_MUL_INT_LIT16.S */ 6771/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6772/* File: armv5te/binopLit16.S */ 6773 /* 6774 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6775 * that specifies an instruction that performs "result = r0 op r1". 6776 * This could be an ARM instruction or a function call. (If the result 6777 * comes back in a register other than r0, you can override "result".) 6778 * 6779 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6780 * vCC (r1). Useful for integer division and modulus. 6781 * 6782 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6783 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6784 */ 6785 /* binop/lit16 vA, vB, #+CCCC */ 6786 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6787 mov r2, rINST, lsr #12 @ r2<- B 6788 mov r9, rINST, lsr #8 @ r9<- A+ 6789 GET_VREG(r0, r2) @ r0<- vB 6790 and r9, r9, #15 6791 .if 0 6792 cmp r1, #0 @ is second operand zero? 6793 beq common_errDivideByZero 6794 .endif 6795 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6796 6797 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6798 GET_INST_OPCODE(ip) @ extract opcode from rINST 6799 SET_VREG(r0, r9) @ vAA<- r0 6800 GOTO_OPCODE(ip) @ jump to next instruction 6801 /* 10-13 instructions */ 6802 6803 6804/* ------------------------------ */ 6805 .balign 64 6806.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6807/* File: armv5te/OP_DIV_INT_LIT16.S */ 6808/* File: armv5te/binopLit16.S */ 6809 /* 6810 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6811 * that specifies an instruction that performs "result = r0 op r1". 6812 * This could be an ARM instruction or a function call. (If the result 6813 * comes back in a register other than r0, you can override "result".) 6814 * 6815 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6816 * vCC (r1). Useful for integer division and modulus. 6817 * 6818 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6819 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6820 */ 6821 /* binop/lit16 vA, vB, #+CCCC */ 6822 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6823 mov r2, rINST, lsr #12 @ r2<- B 6824 mov r9, rINST, lsr #8 @ r9<- A+ 6825 GET_VREG(r0, r2) @ r0<- vB 6826 and r9, r9, #15 6827 .if 1 6828 cmp r1, #0 @ is second operand zero? 6829 beq common_errDivideByZero 6830 .endif 6831 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6832 6833 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6834 GET_INST_OPCODE(ip) @ extract opcode from rINST 6835 SET_VREG(r0, r9) @ vAA<- r0 6836 GOTO_OPCODE(ip) @ jump to next instruction 6837 /* 10-13 instructions */ 6838 6839 6840/* ------------------------------ */ 6841 .balign 64 6842.L_OP_REM_INT_LIT16: /* 0xd4 */ 6843/* File: armv5te/OP_REM_INT_LIT16.S */ 6844/* idivmod returns quotient in r0 and remainder in r1 */ 6845/* File: armv5te/binopLit16.S */ 6846 /* 6847 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6848 * that specifies an instruction that performs "result = r0 op r1". 6849 * This could be an ARM instruction or a function call. (If the result 6850 * comes back in a register other than r0, you can override "result".) 6851 * 6852 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6853 * vCC (r1). Useful for integer division and modulus. 6854 * 6855 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6856 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6857 */ 6858 /* binop/lit16 vA, vB, #+CCCC */ 6859 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6860 mov r2, rINST, lsr #12 @ r2<- B 6861 mov r9, rINST, lsr #8 @ r9<- A+ 6862 GET_VREG(r0, r2) @ r0<- vB 6863 and r9, r9, #15 6864 .if 1 6865 cmp r1, #0 @ is second operand zero? 6866 beq common_errDivideByZero 6867 .endif 6868 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6869 6870 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6871 GET_INST_OPCODE(ip) @ extract opcode from rINST 6872 SET_VREG(r1, r9) @ vAA<- r1 6873 GOTO_OPCODE(ip) @ jump to next instruction 6874 /* 10-13 instructions */ 6875 6876 6877/* ------------------------------ */ 6878 .balign 64 6879.L_OP_AND_INT_LIT16: /* 0xd5 */ 6880/* File: armv5te/OP_AND_INT_LIT16.S */ 6881/* File: armv5te/binopLit16.S */ 6882 /* 6883 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6884 * that specifies an instruction that performs "result = r0 op r1". 6885 * This could be an ARM instruction or a function call. (If the result 6886 * comes back in a register other than r0, you can override "result".) 6887 * 6888 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6889 * vCC (r1). Useful for integer division and modulus. 6890 * 6891 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6892 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6893 */ 6894 /* binop/lit16 vA, vB, #+CCCC */ 6895 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6896 mov r2, rINST, lsr #12 @ r2<- B 6897 mov r9, rINST, lsr #8 @ r9<- A+ 6898 GET_VREG(r0, r2) @ r0<- vB 6899 and r9, r9, #15 6900 .if 0 6901 cmp r1, #0 @ is second operand zero? 6902 beq common_errDivideByZero 6903 .endif 6904 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6905 6906 and r0, r0, r1 @ r0<- op, r0-r3 changed 6907 GET_INST_OPCODE(ip) @ extract opcode from rINST 6908 SET_VREG(r0, r9) @ vAA<- r0 6909 GOTO_OPCODE(ip) @ jump to next instruction 6910 /* 10-13 instructions */ 6911 6912 6913/* ------------------------------ */ 6914 .balign 64 6915.L_OP_OR_INT_LIT16: /* 0xd6 */ 6916/* File: armv5te/OP_OR_INT_LIT16.S */ 6917/* File: armv5te/binopLit16.S */ 6918 /* 6919 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6920 * that specifies an instruction that performs "result = r0 op r1". 6921 * This could be an ARM instruction or a function call. (If the result 6922 * comes back in a register other than r0, you can override "result".) 6923 * 6924 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6925 * vCC (r1). Useful for integer division and modulus. 6926 * 6927 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6928 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6929 */ 6930 /* binop/lit16 vA, vB, #+CCCC */ 6931 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6932 mov r2, rINST, lsr #12 @ r2<- B 6933 mov r9, rINST, lsr #8 @ r9<- A+ 6934 GET_VREG(r0, r2) @ r0<- vB 6935 and r9, r9, #15 6936 .if 0 6937 cmp r1, #0 @ is second operand zero? 6938 beq common_errDivideByZero 6939 .endif 6940 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6941 6942 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6943 GET_INST_OPCODE(ip) @ extract opcode from rINST 6944 SET_VREG(r0, r9) @ vAA<- r0 6945 GOTO_OPCODE(ip) @ jump to next instruction 6946 /* 10-13 instructions */ 6947 6948 6949/* ------------------------------ */ 6950 .balign 64 6951.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6952/* File: armv5te/OP_XOR_INT_LIT16.S */ 6953/* File: armv5te/binopLit16.S */ 6954 /* 6955 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6956 * that specifies an instruction that performs "result = r0 op r1". 6957 * This could be an ARM instruction or a function call. (If the result 6958 * comes back in a register other than r0, you can override "result".) 6959 * 6960 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6961 * vCC (r1). Useful for integer division and modulus. 6962 * 6963 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6964 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6965 */ 6966 /* binop/lit16 vA, vB, #+CCCC */ 6967 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6968 mov r2, rINST, lsr #12 @ r2<- B 6969 mov r9, rINST, lsr #8 @ r9<- A+ 6970 GET_VREG(r0, r2) @ r0<- vB 6971 and r9, r9, #15 6972 .if 0 6973 cmp r1, #0 @ is second operand zero? 6974 beq common_errDivideByZero 6975 .endif 6976 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6977 6978 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6979 GET_INST_OPCODE(ip) @ extract opcode from rINST 6980 SET_VREG(r0, r9) @ vAA<- r0 6981 GOTO_OPCODE(ip) @ jump to next instruction 6982 /* 10-13 instructions */ 6983 6984 6985/* ------------------------------ */ 6986 .balign 64 6987.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6988/* File: armv5te/OP_ADD_INT_LIT8.S */ 6989/* File: armv5te/binopLit8.S */ 6990 /* 6991 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6992 * that specifies an instruction that performs "result = r0 op r1". 6993 * This could be an ARM instruction or a function call. (If the result 6994 * comes back in a register other than r0, you can override "result".) 6995 * 6996 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6997 * vCC (r1). Useful for integer division and modulus. 6998 * 6999 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7000 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7001 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7002 */ 7003 /* binop/lit8 vAA, vBB, #+CC */ 7004 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7005 mov r9, rINST, lsr #8 @ r9<- AA 7006 and r2, r3, #255 @ r2<- BB 7007 GET_VREG(r0, r2) @ r0<- vBB 7008 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7009 .if 0 7010 @cmp r1, #0 @ is second operand zero? 7011 beq common_errDivideByZero 7012 .endif 7013 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7014 7015 @ optional op; may set condition codes 7016 add r0, r0, r1 @ r0<- op, r0-r3 changed 7017 GET_INST_OPCODE(ip) @ extract opcode from rINST 7018 SET_VREG(r0, r9) @ vAA<- r0 7019 GOTO_OPCODE(ip) @ jump to next instruction 7020 /* 10-12 instructions */ 7021 7022 7023/* ------------------------------ */ 7024 .balign 64 7025.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 7026/* File: armv5te/OP_RSUB_INT_LIT8.S */ 7027/* File: armv5te/binopLit8.S */ 7028 /* 7029 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7030 * that specifies an instruction that performs "result = r0 op r1". 7031 * This could be an ARM instruction or a function call. (If the result 7032 * comes back in a register other than r0, you can override "result".) 7033 * 7034 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7035 * vCC (r1). Useful for integer division and modulus. 7036 * 7037 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7038 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7039 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7040 */ 7041 /* binop/lit8 vAA, vBB, #+CC */ 7042 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7043 mov r9, rINST, lsr #8 @ r9<- AA 7044 and r2, r3, #255 @ r2<- BB 7045 GET_VREG(r0, r2) @ r0<- vBB 7046 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7047 .if 0 7048 @cmp r1, #0 @ is second operand zero? 7049 beq common_errDivideByZero 7050 .endif 7051 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7052 7053 @ optional op; may set condition codes 7054 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 7055 GET_INST_OPCODE(ip) @ extract opcode from rINST 7056 SET_VREG(r0, r9) @ vAA<- r0 7057 GOTO_OPCODE(ip) @ jump to next instruction 7058 /* 10-12 instructions */ 7059 7060 7061/* ------------------------------ */ 7062 .balign 64 7063.L_OP_MUL_INT_LIT8: /* 0xda */ 7064/* File: armv5te/OP_MUL_INT_LIT8.S */ 7065/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 7066/* File: armv5te/binopLit8.S */ 7067 /* 7068 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7069 * that specifies an instruction that performs "result = r0 op r1". 7070 * This could be an ARM instruction or a function call. (If the result 7071 * comes back in a register other than r0, you can override "result".) 7072 * 7073 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7074 * vCC (r1). Useful for integer division and modulus. 7075 * 7076 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7077 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7078 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7079 */ 7080 /* binop/lit8 vAA, vBB, #+CC */ 7081 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7082 mov r9, rINST, lsr #8 @ r9<- AA 7083 and r2, r3, #255 @ r2<- BB 7084 GET_VREG(r0, r2) @ r0<- vBB 7085 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7086 .if 0 7087 @cmp r1, #0 @ is second operand zero? 7088 beq common_errDivideByZero 7089 .endif 7090 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7091 7092 @ optional op; may set condition codes 7093 mul r0, r1, r0 @ r0<- op, r0-r3 changed 7094 GET_INST_OPCODE(ip) @ extract opcode from rINST 7095 SET_VREG(r0, r9) @ vAA<- r0 7096 GOTO_OPCODE(ip) @ jump to next instruction 7097 /* 10-12 instructions */ 7098 7099 7100/* ------------------------------ */ 7101 .balign 64 7102.L_OP_DIV_INT_LIT8: /* 0xdb */ 7103/* File: armv5te/OP_DIV_INT_LIT8.S */ 7104/* File: armv5te/binopLit8.S */ 7105 /* 7106 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7107 * that specifies an instruction that performs "result = r0 op r1". 7108 * This could be an ARM instruction or a function call. (If the result 7109 * comes back in a register other than r0, you can override "result".) 7110 * 7111 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7112 * vCC (r1). Useful for integer division and modulus. 7113 * 7114 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7115 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7116 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7117 */ 7118 /* binop/lit8 vAA, vBB, #+CC */ 7119 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7120 mov r9, rINST, lsr #8 @ r9<- AA 7121 and r2, r3, #255 @ r2<- BB 7122 GET_VREG(r0, r2) @ r0<- vBB 7123 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7124 .if 1 7125 @cmp r1, #0 @ is second operand zero? 7126 beq common_errDivideByZero 7127 .endif 7128 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7129 7130 @ optional op; may set condition codes 7131 bl __aeabi_idiv @ r0<- op, r0-r3 changed 7132 GET_INST_OPCODE(ip) @ extract opcode from rINST 7133 SET_VREG(r0, r9) @ vAA<- r0 7134 GOTO_OPCODE(ip) @ jump to next instruction 7135 /* 10-12 instructions */ 7136 7137 7138/* ------------------------------ */ 7139 .balign 64 7140.L_OP_REM_INT_LIT8: /* 0xdc */ 7141/* File: armv5te/OP_REM_INT_LIT8.S */ 7142/* idivmod returns quotient in r0 and remainder in r1 */ 7143/* File: armv5te/binopLit8.S */ 7144 /* 7145 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7146 * that specifies an instruction that performs "result = r0 op r1". 7147 * This could be an ARM instruction or a function call. (If the result 7148 * comes back in a register other than r0, you can override "result".) 7149 * 7150 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7151 * vCC (r1). Useful for integer division and modulus. 7152 * 7153 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7154 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7155 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7156 */ 7157 /* binop/lit8 vAA, vBB, #+CC */ 7158 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7159 mov r9, rINST, lsr #8 @ r9<- AA 7160 and r2, r3, #255 @ r2<- BB 7161 GET_VREG(r0, r2) @ r0<- vBB 7162 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7163 .if 1 7164 @cmp r1, #0 @ is second operand zero? 7165 beq common_errDivideByZero 7166 .endif 7167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7168 7169 @ optional op; may set condition codes 7170 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7171 GET_INST_OPCODE(ip) @ extract opcode from rINST 7172 SET_VREG(r1, r9) @ vAA<- r1 7173 GOTO_OPCODE(ip) @ jump to next instruction 7174 /* 10-12 instructions */ 7175 7176 7177/* ------------------------------ */ 7178 .balign 64 7179.L_OP_AND_INT_LIT8: /* 0xdd */ 7180/* File: armv5te/OP_AND_INT_LIT8.S */ 7181/* File: armv5te/binopLit8.S */ 7182 /* 7183 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7184 * that specifies an instruction that performs "result = r0 op r1". 7185 * This could be an ARM instruction or a function call. (If the result 7186 * comes back in a register other than r0, you can override "result".) 7187 * 7188 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7189 * vCC (r1). Useful for integer division and modulus. 7190 * 7191 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7192 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7193 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7194 */ 7195 /* binop/lit8 vAA, vBB, #+CC */ 7196 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7197 mov r9, rINST, lsr #8 @ r9<- AA 7198 and r2, r3, #255 @ r2<- BB 7199 GET_VREG(r0, r2) @ r0<- vBB 7200 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7201 .if 0 7202 @cmp r1, #0 @ is second operand zero? 7203 beq common_errDivideByZero 7204 .endif 7205 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7206 7207 @ optional op; may set condition codes 7208 and r0, r0, r1 @ r0<- op, r0-r3 changed 7209 GET_INST_OPCODE(ip) @ extract opcode from rINST 7210 SET_VREG(r0, r9) @ vAA<- r0 7211 GOTO_OPCODE(ip) @ jump to next instruction 7212 /* 10-12 instructions */ 7213 7214 7215/* ------------------------------ */ 7216 .balign 64 7217.L_OP_OR_INT_LIT8: /* 0xde */ 7218/* File: armv5te/OP_OR_INT_LIT8.S */ 7219/* File: armv5te/binopLit8.S */ 7220 /* 7221 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7222 * that specifies an instruction that performs "result = r0 op r1". 7223 * This could be an ARM instruction or a function call. (If the result 7224 * comes back in a register other than r0, you can override "result".) 7225 * 7226 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7227 * vCC (r1). Useful for integer division and modulus. 7228 * 7229 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7230 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7231 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7232 */ 7233 /* binop/lit8 vAA, vBB, #+CC */ 7234 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7235 mov r9, rINST, lsr #8 @ r9<- AA 7236 and r2, r3, #255 @ r2<- BB 7237 GET_VREG(r0, r2) @ r0<- vBB 7238 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7239 .if 0 7240 @cmp r1, #0 @ is second operand zero? 7241 beq common_errDivideByZero 7242 .endif 7243 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7244 7245 @ optional op; may set condition codes 7246 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7247 GET_INST_OPCODE(ip) @ extract opcode from rINST 7248 SET_VREG(r0, r9) @ vAA<- r0 7249 GOTO_OPCODE(ip) @ jump to next instruction 7250 /* 10-12 instructions */ 7251 7252 7253/* ------------------------------ */ 7254 .balign 64 7255.L_OP_XOR_INT_LIT8: /* 0xdf */ 7256/* File: armv5te/OP_XOR_INT_LIT8.S */ 7257/* File: armv5te/binopLit8.S */ 7258 /* 7259 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7260 * that specifies an instruction that performs "result = r0 op r1". 7261 * This could be an ARM instruction or a function call. (If the result 7262 * comes back in a register other than r0, you can override "result".) 7263 * 7264 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7265 * vCC (r1). Useful for integer division and modulus. 7266 * 7267 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7268 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7269 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7270 */ 7271 /* binop/lit8 vAA, vBB, #+CC */ 7272 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7273 mov r9, rINST, lsr #8 @ r9<- AA 7274 and r2, r3, #255 @ r2<- BB 7275 GET_VREG(r0, r2) @ r0<- vBB 7276 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7277 .if 0 7278 @cmp r1, #0 @ is second operand zero? 7279 beq common_errDivideByZero 7280 .endif 7281 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7282 7283 @ optional op; may set condition codes 7284 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7285 GET_INST_OPCODE(ip) @ extract opcode from rINST 7286 SET_VREG(r0, r9) @ vAA<- r0 7287 GOTO_OPCODE(ip) @ jump to next instruction 7288 /* 10-12 instructions */ 7289 7290 7291/* ------------------------------ */ 7292 .balign 64 7293.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7294/* File: armv5te/OP_SHL_INT_LIT8.S */ 7295/* File: armv5te/binopLit8.S */ 7296 /* 7297 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7298 * that specifies an instruction that performs "result = r0 op r1". 7299 * This could be an ARM instruction or a function call. (If the result 7300 * comes back in a register other than r0, you can override "result".) 7301 * 7302 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7303 * vCC (r1). Useful for integer division and modulus. 7304 * 7305 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7306 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7307 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7308 */ 7309 /* binop/lit8 vAA, vBB, #+CC */ 7310 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7311 mov r9, rINST, lsr #8 @ r9<- AA 7312 and r2, r3, #255 @ r2<- BB 7313 GET_VREG(r0, r2) @ r0<- vBB 7314 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7315 .if 0 7316 @cmp r1, #0 @ is second operand zero? 7317 beq common_errDivideByZero 7318 .endif 7319 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7320 7321 and r1, r1, #31 @ optional op; may set condition codes 7322 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7323 GET_INST_OPCODE(ip) @ extract opcode from rINST 7324 SET_VREG(r0, r9) @ vAA<- r0 7325 GOTO_OPCODE(ip) @ jump to next instruction 7326 /* 10-12 instructions */ 7327 7328 7329/* ------------------------------ */ 7330 .balign 64 7331.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7332/* File: armv5te/OP_SHR_INT_LIT8.S */ 7333/* File: armv5te/binopLit8.S */ 7334 /* 7335 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7336 * that specifies an instruction that performs "result = r0 op r1". 7337 * This could be an ARM instruction or a function call. (If the result 7338 * comes back in a register other than r0, you can override "result".) 7339 * 7340 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7341 * vCC (r1). Useful for integer division and modulus. 7342 * 7343 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7344 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7345 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7346 */ 7347 /* binop/lit8 vAA, vBB, #+CC */ 7348 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7349 mov r9, rINST, lsr #8 @ r9<- AA 7350 and r2, r3, #255 @ r2<- BB 7351 GET_VREG(r0, r2) @ r0<- vBB 7352 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7353 .if 0 7354 @cmp r1, #0 @ is second operand zero? 7355 beq common_errDivideByZero 7356 .endif 7357 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7358 7359 and r1, r1, #31 @ optional op; may set condition codes 7360 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7361 GET_INST_OPCODE(ip) @ extract opcode from rINST 7362 SET_VREG(r0, r9) @ vAA<- r0 7363 GOTO_OPCODE(ip) @ jump to next instruction 7364 /* 10-12 instructions */ 7365 7366 7367/* ------------------------------ */ 7368 .balign 64 7369.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7370/* File: armv5te/OP_USHR_INT_LIT8.S */ 7371/* File: armv5te/binopLit8.S */ 7372 /* 7373 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7374 * that specifies an instruction that performs "result = r0 op r1". 7375 * This could be an ARM instruction or a function call. (If the result 7376 * comes back in a register other than r0, you can override "result".) 7377 * 7378 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7379 * vCC (r1). Useful for integer division and modulus. 7380 * 7381 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7382 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7383 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7384 */ 7385 /* binop/lit8 vAA, vBB, #+CC */ 7386 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7387 mov r9, rINST, lsr #8 @ r9<- AA 7388 and r2, r3, #255 @ r2<- BB 7389 GET_VREG(r0, r2) @ r0<- vBB 7390 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7391 .if 0 7392 @cmp r1, #0 @ is second operand zero? 7393 beq common_errDivideByZero 7394 .endif 7395 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7396 7397 and r1, r1, #31 @ optional op; may set condition codes 7398 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7399 GET_INST_OPCODE(ip) @ extract opcode from rINST 7400 SET_VREG(r0, r9) @ vAA<- r0 7401 GOTO_OPCODE(ip) @ jump to next instruction 7402 /* 10-12 instructions */ 7403 7404 7405/* ------------------------------ */ 7406 .balign 64 7407.L_OP_UNUSED_E3: /* 0xe3 */ 7408/* File: armv5te/OP_UNUSED_E3.S */ 7409/* File: armv5te/unused.S */ 7410 bl common_abort 7411 7412 7413/* ------------------------------ */ 7414 .balign 64 7415.L_OP_UNUSED_E4: /* 0xe4 */ 7416/* File: armv5te/OP_UNUSED_E4.S */ 7417/* File: armv5te/unused.S */ 7418 bl common_abort 7419 7420 7421/* ------------------------------ */ 7422 .balign 64 7423.L_OP_UNUSED_E5: /* 0xe5 */ 7424/* File: armv5te/OP_UNUSED_E5.S */ 7425/* File: armv5te/unused.S */ 7426 bl common_abort 7427 7428 7429/* ------------------------------ */ 7430 .balign 64 7431.L_OP_UNUSED_E6: /* 0xe6 */ 7432/* File: armv5te/OP_UNUSED_E6.S */ 7433/* File: armv5te/unused.S */ 7434 bl common_abort 7435 7436 7437/* ------------------------------ */ 7438 .balign 64 7439.L_OP_UNUSED_E7: /* 0xe7 */ 7440/* File: armv5te/OP_UNUSED_E7.S */ 7441/* File: armv5te/unused.S */ 7442 bl common_abort 7443 7444 7445/* ------------------------------ */ 7446 .balign 64 7447.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7448/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7449/* File: armv5te/OP_IGET_WIDE.S */ 7450 /* 7451 * Wide 32-bit instance field get. 7452 */ 7453 /* iget-wide vA, vB, field@CCCC */ 7454 mov r0, rINST, lsr #12 @ r0<- B 7455 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7456 FETCH(r1, 1) @ r1<- field ref CCCC 7457 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7458 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7459 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7460 cmp r0, #0 @ is resolved entry null? 7461 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 74628: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7463 EXPORT_PC() @ resolve() could throw 7464 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7465 bl dvmResolveInstField @ r0<- resolved InstField ptr 7466 cmp r0, #0 7467 bne .LOP_IGET_WIDE_VOLATILE_finish 7468 b common_exceptionThrown 7469 7470 7471/* ------------------------------ */ 7472 .balign 64 7473.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7474/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7475/* File: armv5te/OP_IPUT_WIDE.S */ 7476 /* iput-wide vA, vB, field@CCCC */ 7477 mov r0, rINST, lsr #12 @ r0<- B 7478 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7479 FETCH(r1, 1) @ r1<- field ref CCCC 7480 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7481 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7482 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7483 cmp r0, #0 @ is resolved entry null? 7484 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 74858: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7486 EXPORT_PC() @ resolve() could throw 7487 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7488 bl dvmResolveInstField @ r0<- resolved InstField ptr 7489 cmp r0, #0 @ success? 7490 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7491 b common_exceptionThrown 7492 7493 7494/* ------------------------------ */ 7495 .balign 64 7496.L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7497/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7498/* File: armv5te/OP_SGET_WIDE.S */ 7499 /* 7500 * 64-bit SGET handler. 7501 */ 7502 /* sget-wide vAA, field@BBBB */ 7503 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7504 FETCH(r1, 1) @ r1<- field ref BBBB 7505 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7506 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7507 cmp r0, #0 @ is resolved entry null? 7508 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7509.LOP_SGET_WIDE_VOLATILE_finish: 7510 mov r9, rINST, lsr #8 @ r9<- AA 7511 .if 1 7512 add r0, r0, #offStaticField_value @ r0<- pointer to data 7513 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 7514 .else 7515 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7516 .endif 7517 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7518 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7519 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7520 GET_INST_OPCODE(ip) @ extract opcode from rINST 7521 GOTO_OPCODE(ip) @ jump to next instruction 7522 7523 7524/* ------------------------------ */ 7525 .balign 64 7526.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7527/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7528/* File: armv5te/OP_SPUT_WIDE.S */ 7529 /* 7530 * 64-bit SPUT handler. 7531 */ 7532 /* sput-wide vAA, field@BBBB */ 7533 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7534 FETCH(r1, 1) @ r1<- field ref BBBB 7535 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7536 mov r9, rINST, lsr #8 @ r9<- AA 7537 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7538 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7539 cmp r2, #0 @ is resolved entry null? 7540 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7541.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7542 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7543 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7544 GET_INST_OPCODE(r10) @ extract opcode from rINST 7545 .if 1 7546 add r2, r2, #offStaticField_value @ r2<- pointer to data 7547 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 7548 .else 7549 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7550 .endif 7551 GOTO_OPCODE(r10) @ jump to next instruction 7552 7553 7554/* ------------------------------ */ 7555 .balign 64 7556.L_OP_BREAKPOINT: /* 0xec */ 7557/* File: armv5te/OP_BREAKPOINT.S */ 7558/* File: armv5te/unused.S */ 7559 bl common_abort 7560 7561 7562/* ------------------------------ */ 7563 .balign 64 7564.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7565/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7566 /* 7567 * Handle a throw-verification-error instruction. This throws an 7568 * exception for an error discovered during verification. The 7569 * exception is indicated by AA, with some detail provided by BBBB. 7570 */ 7571 /* op AA, ref@BBBB */ 7572 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7573 FETCH(r2, 1) @ r2<- BBBB 7574 EXPORT_PC() @ export the PC 7575 mov r1, rINST, lsr #8 @ r1<- AA 7576 bl dvmThrowVerificationError @ always throws 7577 b common_exceptionThrown @ handle exception 7578 7579/* ------------------------------ */ 7580 .balign 64 7581.L_OP_EXECUTE_INLINE: /* 0xee */ 7582/* File: armv5te/OP_EXECUTE_INLINE.S */ 7583 /* 7584 * Execute a "native inline" instruction. 7585 * 7586 * We need to call an InlineOp4Func: 7587 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7588 * 7589 * The first four args are in r0-r3, pointer to return value storage 7590 * is on the stack. The function's return value is a flag that tells 7591 * us if an exception was thrown. 7592 */ 7593 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7594 FETCH(r10, 1) @ r10<- BBBB 7595 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7596 EXPORT_PC() @ can throw 7597 sub sp, sp, #8 @ make room for arg, +64 bit align 7598 mov r0, rINST, lsr #12 @ r0<- B 7599 str r1, [sp] @ push &glue->retval 7600 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7601 add sp, sp, #8 @ pop stack 7602 cmp r0, #0 @ test boolean result of inline 7603 beq common_exceptionThrown @ returned false, handle exception 7604 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7605 GET_INST_OPCODE(ip) @ extract opcode from rINST 7606 GOTO_OPCODE(ip) @ jump to next instruction 7607 7608/* ------------------------------ */ 7609 .balign 64 7610.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7611/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7612 /* 7613 * Execute a "native inline" instruction, using "/range" semantics. 7614 * Same idea as execute-inline, but we get the args differently. 7615 * 7616 * We need to call an InlineOp4Func: 7617 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7618 * 7619 * The first four args are in r0-r3, pointer to return value storage 7620 * is on the stack. The function's return value is a flag that tells 7621 * us if an exception was thrown. 7622 */ 7623 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7624 FETCH(r10, 1) @ r10<- BBBB 7625 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7626 EXPORT_PC() @ can throw 7627 sub sp, sp, #8 @ make room for arg, +64 bit align 7628 mov r0, rINST, lsr #8 @ r0<- AA 7629 str r1, [sp] @ push &glue->retval 7630 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7631 add sp, sp, #8 @ pop stack 7632 cmp r0, #0 @ test boolean result of inline 7633 beq common_exceptionThrown @ returned false, handle exception 7634 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7635 GET_INST_OPCODE(ip) @ extract opcode from rINST 7636 GOTO_OPCODE(ip) @ jump to next instruction 7637 7638/* ------------------------------ */ 7639 .balign 64 7640.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7641/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7642 /* 7643 * invoke-direct-empty is a no-op in a "standard" interpreter. 7644 */ 7645 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7646 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7647 GOTO_OPCODE(ip) @ execute it 7648 7649/* ------------------------------ */ 7650 .balign 64 7651.L_OP_UNUSED_F1: /* 0xf1 */ 7652/* File: armv5te/OP_UNUSED_F1.S */ 7653/* File: armv5te/unused.S */ 7654 bl common_abort 7655 7656 7657/* ------------------------------ */ 7658 .balign 64 7659.L_OP_IGET_QUICK: /* 0xf2 */ 7660/* File: armv5te/OP_IGET_QUICK.S */ 7661 /* For: iget-quick, iget-object-quick */ 7662 /* op vA, vB, offset@CCCC */ 7663 mov r2, rINST, lsr #12 @ r2<- B 7664 GET_VREG(r3, r2) @ r3<- object we're operating on 7665 FETCH(r1, 1) @ r1<- field byte offset 7666 cmp r3, #0 @ check object for null 7667 mov r2, rINST, lsr #8 @ r2<- A(+) 7668 beq common_errNullObject @ object was null 7669 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7670 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7671 and r2, r2, #15 7672 GET_INST_OPCODE(ip) @ extract opcode from rINST 7673 SET_VREG(r0, r2) @ fp[A]<- r0 7674 GOTO_OPCODE(ip) @ jump to next instruction 7675 7676/* ------------------------------ */ 7677 .balign 64 7678.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7679/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7680 /* iget-wide-quick vA, vB, offset@CCCC */ 7681 mov r2, rINST, lsr #12 @ r2<- B 7682 GET_VREG(r3, r2) @ r3<- object we're operating on 7683 FETCH(ip, 1) @ ip<- field byte offset 7684 cmp r3, #0 @ check object for null 7685 mov r2, rINST, lsr #8 @ r2<- A(+) 7686 beq common_errNullObject @ object was null 7687 ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) 7688 and r2, r2, #15 7689 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7690 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7691 GET_INST_OPCODE(ip) @ extract opcode from rINST 7692 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7693 GOTO_OPCODE(ip) @ jump to next instruction 7694 7695/* ------------------------------ */ 7696 .balign 64 7697.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7698/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7699/* File: armv5te/OP_IGET_QUICK.S */ 7700 /* For: iget-quick, iget-object-quick */ 7701 /* op vA, vB, offset@CCCC */ 7702 mov r2, rINST, lsr #12 @ r2<- B 7703 GET_VREG(r3, r2) @ r3<- object we're operating on 7704 FETCH(r1, 1) @ r1<- field byte offset 7705 cmp r3, #0 @ check object for null 7706 mov r2, rINST, lsr #8 @ r2<- A(+) 7707 beq common_errNullObject @ object was null 7708 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7709 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7710 and r2, r2, #15 7711 GET_INST_OPCODE(ip) @ extract opcode from rINST 7712 SET_VREG(r0, r2) @ fp[A]<- r0 7713 GOTO_OPCODE(ip) @ jump to next instruction 7714 7715 7716/* ------------------------------ */ 7717 .balign 64 7718.L_OP_IPUT_QUICK: /* 0xf5 */ 7719/* File: armv5te/OP_IPUT_QUICK.S */ 7720 /* For: iput-quick, iput-object-quick */ 7721 /* op vA, vB, offset@CCCC */ 7722 mov r2, rINST, lsr #12 @ r2<- B 7723 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7724 FETCH(r1, 1) @ r1<- field byte offset 7725 cmp r3, #0 @ check object for null 7726 mov r2, rINST, lsr #8 @ r2<- A(+) 7727 beq common_errNullObject @ object was null 7728 and r2, r2, #15 7729 GET_VREG(r0, r2) @ r0<- fp[A] 7730 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7731 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7732 GET_INST_OPCODE(ip) @ extract opcode from rINST 7733 GOTO_OPCODE(ip) @ jump to next instruction 7734 7735/* ------------------------------ */ 7736 .balign 64 7737.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7738/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7739 /* iput-wide-quick vA, vB, offset@CCCC */ 7740 mov r0, rINST, lsr #8 @ r0<- A(+) 7741 mov r1, rINST, lsr #12 @ r1<- B 7742 and r0, r0, #15 7743 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7744 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7745 cmp r2, #0 @ check object for null 7746 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7747 beq common_errNullObject @ object was null 7748 FETCH(r3, 1) @ r3<- field byte offset 7749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7750 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7751 GET_INST_OPCODE(ip) @ extract opcode from rINST 7752 GOTO_OPCODE(ip) @ jump to next instruction 7753 7754/* ------------------------------ */ 7755 .balign 64 7756.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7757/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7758/* File: armv5te/OP_IPUT_QUICK.S */ 7759 /* For: iput-quick, iput-object-quick */ 7760 /* op vA, vB, offset@CCCC */ 7761 mov r2, rINST, lsr #12 @ r2<- B 7762 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7763 FETCH(r1, 1) @ r1<- field byte offset 7764 cmp r3, #0 @ check object for null 7765 mov r2, rINST, lsr #8 @ r2<- A(+) 7766 beq common_errNullObject @ object was null 7767 and r2, r2, #15 7768 GET_VREG(r0, r2) @ r0<- fp[A] 7769 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7770 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7771 GET_INST_OPCODE(ip) @ extract opcode from rINST 7772 GOTO_OPCODE(ip) @ jump to next instruction 7773 7774 7775/* ------------------------------ */ 7776 .balign 64 7777.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7778/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7779 /* 7780 * Handle an optimized virtual method call. 7781 * 7782 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7783 */ 7784 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7785 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7786 FETCH(r3, 2) @ r3<- FEDC or CCCC 7787 FETCH(r1, 1) @ r1<- BBBB 7788 .if (!0) 7789 and r3, r3, #15 @ r3<- C (or stays CCCC) 7790 .endif 7791 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7792 cmp r2, #0 @ is "this" null? 7793 beq common_errNullObject @ null "this", throw exception 7794 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7795 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7796 EXPORT_PC() @ invoke must export 7797 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7798 bl common_invokeMethodNoRange @ continue on 7799 7800/* ------------------------------ */ 7801 .balign 64 7802.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7803/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7804/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7805 /* 7806 * Handle an optimized virtual method call. 7807 * 7808 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7809 */ 7810 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7811 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7812 FETCH(r3, 2) @ r3<- FEDC or CCCC 7813 FETCH(r1, 1) @ r1<- BBBB 7814 .if (!1) 7815 and r3, r3, #15 @ r3<- C (or stays CCCC) 7816 .endif 7817 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7818 cmp r2, #0 @ is "this" null? 7819 beq common_errNullObject @ null "this", throw exception 7820 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7821 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7822 EXPORT_PC() @ invoke must export 7823 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7824 bl common_invokeMethodRange @ continue on 7825 7826 7827/* ------------------------------ */ 7828 .balign 64 7829.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7830/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7831 /* 7832 * Handle an optimized "super" method call. 7833 * 7834 * for: [opt] invoke-super-quick, invoke-super-quick/range 7835 */ 7836 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7837 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7838 FETCH(r10, 2) @ r10<- GFED or CCCC 7839 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7840 .if (!0) 7841 and r10, r10, #15 @ r10<- D (or stays CCCC) 7842 .endif 7843 FETCH(r1, 1) @ r1<- BBBB 7844 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7845 EXPORT_PC() @ must export for invoke 7846 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7847 GET_VREG(r3, r10) @ r3<- "this" 7848 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7849 cmp r3, #0 @ null "this" ref? 7850 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7851 beq common_errNullObject @ "this" is null, throw exception 7852 bl common_invokeMethodNoRange @ continue on 7853 7854/* ------------------------------ */ 7855 .balign 64 7856.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7857/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7858/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7859 /* 7860 * Handle an optimized "super" method call. 7861 * 7862 * for: [opt] invoke-super-quick, invoke-super-quick/range 7863 */ 7864 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7865 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7866 FETCH(r10, 2) @ r10<- GFED or CCCC 7867 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7868 .if (!1) 7869 and r10, r10, #15 @ r10<- D (or stays CCCC) 7870 .endif 7871 FETCH(r1, 1) @ r1<- BBBB 7872 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7873 EXPORT_PC() @ must export for invoke 7874 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7875 GET_VREG(r3, r10) @ r3<- "this" 7876 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7877 cmp r3, #0 @ null "this" ref? 7878 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7879 beq common_errNullObject @ "this" is null, throw exception 7880 bl common_invokeMethodRange @ continue on 7881 7882 7883/* ------------------------------ */ 7884 .balign 64 7885.L_OP_UNUSED_FC: /* 0xfc */ 7886/* File: armv5te/OP_UNUSED_FC.S */ 7887/* File: armv5te/unused.S */ 7888 bl common_abort 7889 7890 7891/* ------------------------------ */ 7892 .balign 64 7893.L_OP_UNUSED_FD: /* 0xfd */ 7894/* File: armv5te/OP_UNUSED_FD.S */ 7895/* File: armv5te/unused.S */ 7896 bl common_abort 7897 7898 7899/* ------------------------------ */ 7900 .balign 64 7901.L_OP_UNUSED_FE: /* 0xfe */ 7902/* File: armv5te/OP_UNUSED_FE.S */ 7903/* File: armv5te/unused.S */ 7904 bl common_abort 7905 7906 7907/* ------------------------------ */ 7908 .balign 64 7909.L_OP_UNUSED_FF: /* 0xff */ 7910/* File: armv5te/OP_UNUSED_FF.S */ 7911/* File: armv5te/unused.S */ 7912 bl common_abort 7913 7914 7915 7916 .balign 64 7917 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7918 .global dvmAsmInstructionEnd 7919dvmAsmInstructionEnd: 7920 7921/* 7922 * =========================================================================== 7923 * Sister implementations 7924 * =========================================================================== 7925 */ 7926 .global dvmAsmSisterStart 7927 .type dvmAsmSisterStart, %function 7928 .text 7929 .balign 4 7930dvmAsmSisterStart: 7931 7932/* continuation for OP_CONST_STRING */ 7933 7934 /* 7935 * Continuation if the String has not yet been resolved. 7936 * r1: BBBB (String ref) 7937 * r9: target register 7938 */ 7939.LOP_CONST_STRING_resolve: 7940 EXPORT_PC() 7941 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7942 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7943 bl dvmResolveString @ r0<- String reference 7944 cmp r0, #0 @ failed? 7945 beq common_exceptionThrown @ yup, handle the exception 7946 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7947 GET_INST_OPCODE(ip) @ extract opcode from rINST 7948 SET_VREG(r0, r9) @ vAA<- r0 7949 GOTO_OPCODE(ip) @ jump to next instruction 7950 7951/* continuation for OP_CONST_STRING_JUMBO */ 7952 7953 /* 7954 * Continuation if the String has not yet been resolved. 7955 * r1: BBBBBBBB (String ref) 7956 * r9: target register 7957 */ 7958.LOP_CONST_STRING_JUMBO_resolve: 7959 EXPORT_PC() 7960 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7961 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7962 bl dvmResolveString @ r0<- String reference 7963 cmp r0, #0 @ failed? 7964 beq common_exceptionThrown @ yup, handle the exception 7965 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7966 GET_INST_OPCODE(ip) @ extract opcode from rINST 7967 SET_VREG(r0, r9) @ vAA<- r0 7968 GOTO_OPCODE(ip) @ jump to next instruction 7969 7970/* continuation for OP_CONST_CLASS */ 7971 7972 /* 7973 * Continuation if the Class has not yet been resolved. 7974 * r1: BBBB (Class ref) 7975 * r9: target register 7976 */ 7977.LOP_CONST_CLASS_resolve: 7978 EXPORT_PC() 7979 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7980 mov r2, #1 @ r2<- true 7981 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7982 bl dvmResolveClass @ r0<- Class reference 7983 cmp r0, #0 @ failed? 7984 beq common_exceptionThrown @ yup, handle the exception 7985 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7986 GET_INST_OPCODE(ip) @ extract opcode from rINST 7987 SET_VREG(r0, r9) @ vAA<- r0 7988 GOTO_OPCODE(ip) @ jump to next instruction 7989 7990/* continuation for OP_CHECK_CAST */ 7991 7992 /* 7993 * Trivial test failed, need to perform full check. This is common. 7994 * r0 holds obj->clazz 7995 * r1 holds class resolved from BBBB 7996 * r9 holds object 7997 */ 7998.LOP_CHECK_CAST_fullcheck: 7999 bl dvmInstanceofNonTrivial @ r0<- boolean result 8000 cmp r0, #0 @ failed? 8001 bne .LOP_CHECK_CAST_okay @ no, success 8002 8003 @ A cast has failed. We need to throw a ClassCastException with the 8004 @ class of the object that failed to be cast. 8005 EXPORT_PC() @ about to throw 8006 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 8007 ldr r0, .LstrClassCastExceptionPtr 8008 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 8009 bl dvmThrowExceptionWithClassMessage 8010 b common_exceptionThrown 8011 8012 /* 8013 * Resolution required. This is the least-likely path. 8014 * 8015 * r2 holds BBBB 8016 * r9 holds object 8017 */ 8018.LOP_CHECK_CAST_resolve: 8019 EXPORT_PC() @ resolve() could throw 8020 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8021 mov r1, r2 @ r1<- BBBB 8022 mov r2, #0 @ r2<- false 8023 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8024 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8025 cmp r0, #0 @ got null? 8026 beq common_exceptionThrown @ yes, handle exception 8027 mov r1, r0 @ r1<- class resolved from BBB 8028 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8029 b .LOP_CHECK_CAST_resolved @ pick up where we left off 8030 8031.LstrClassCastExceptionPtr: 8032 .word .LstrClassCastException 8033 8034/* continuation for OP_INSTANCE_OF */ 8035 8036 /* 8037 * Trivial test failed, need to perform full check. This is common. 8038 * r0 holds obj->clazz 8039 * r1 holds class resolved from BBBB 8040 * r9 holds A 8041 */ 8042.LOP_INSTANCE_OF_fullcheck: 8043 bl dvmInstanceofNonTrivial @ r0<- boolean result 8044 @ fall through to OP_INSTANCE_OF_store 8045 8046 /* 8047 * r0 holds boolean result 8048 * r9 holds A 8049 */ 8050.LOP_INSTANCE_OF_store: 8051 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8052 SET_VREG(r0, r9) @ vA<- r0 8053 GET_INST_OPCODE(ip) @ extract opcode from rINST 8054 GOTO_OPCODE(ip) @ jump to next instruction 8055 8056 /* 8057 * Trivial test succeeded, save and bail. 8058 * r9 holds A 8059 */ 8060.LOP_INSTANCE_OF_trivial: 8061 mov r0, #1 @ indicate success 8062 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 8063 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8064 SET_VREG(r0, r9) @ vA<- r0 8065 GET_INST_OPCODE(ip) @ extract opcode from rINST 8066 GOTO_OPCODE(ip) @ jump to next instruction 8067 8068 /* 8069 * Resolution required. This is the least-likely path. 8070 * 8071 * r3 holds BBBB 8072 * r9 holds A 8073 */ 8074.LOP_INSTANCE_OF_resolve: 8075 EXPORT_PC() @ resolve() could throw 8076 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8077 mov r1, r3 @ r1<- BBBB 8078 mov r2, #1 @ r2<- true 8079 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8080 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8081 cmp r0, #0 @ got null? 8082 beq common_exceptionThrown @ yes, handle exception 8083 mov r1, r0 @ r1<- class resolved from BBB 8084 mov r3, rINST, lsr #12 @ r3<- B 8085 GET_VREG(r0, r3) @ r0<- vB (object) 8086 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 8087 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 8088 8089/* continuation for OP_NEW_INSTANCE */ 8090 8091 .balign 32 @ minimize cache lines 8092.LOP_NEW_INSTANCE_finish: @ r0=new object 8093 mov r3, rINST, lsr #8 @ r3<- AA 8094 cmp r0, #0 @ failed? 8095 beq common_exceptionThrown @ yes, handle the exception 8096 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8097 GET_INST_OPCODE(ip) @ extract opcode from rINST 8098 SET_VREG(r0, r3) @ vAA<- r0 8099 GOTO_OPCODE(ip) @ jump to next instruction 8100 8101 /* 8102 * Class initialization required. 8103 * 8104 * r0 holds class object 8105 */ 8106.LOP_NEW_INSTANCE_needinit: 8107 mov r9, r0 @ save r0 8108 bl dvmInitClass @ initialize class 8109 cmp r0, #0 @ check boolean result 8110 mov r0, r9 @ restore r0 8111 bne .LOP_NEW_INSTANCE_initialized @ success, continue 8112 b common_exceptionThrown @ failed, deal with init exception 8113 8114 /* 8115 * Resolution required. This is the least-likely path. 8116 * 8117 * r1 holds BBBB 8118 */ 8119.LOP_NEW_INSTANCE_resolve: 8120 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8121 mov r2, #0 @ r2<- false 8122 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8123 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8124 cmp r0, #0 @ got null? 8125 bne .LOP_NEW_INSTANCE_resolved @ no, continue 8126 b common_exceptionThrown @ yes, handle exception 8127 8128.LstrInstantiationErrorPtr: 8129 .word .LstrInstantiationError 8130 8131/* continuation for OP_NEW_ARRAY */ 8132 8133 8134 /* 8135 * Resolve class. (This is an uncommon case.) 8136 * 8137 * r1 holds array length 8138 * r2 holds class ref CCCC 8139 */ 8140.LOP_NEW_ARRAY_resolve: 8141 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8142 mov r9, r1 @ r9<- length (save) 8143 mov r1, r2 @ r1<- CCCC 8144 mov r2, #0 @ r2<- false 8145 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8146 bl dvmResolveClass @ r0<- call(clazz, ref) 8147 cmp r0, #0 @ got null? 8148 mov r1, r9 @ r1<- length (restore) 8149 beq common_exceptionThrown @ yes, handle exception 8150 @ fall through to OP_NEW_ARRAY_finish 8151 8152 /* 8153 * Finish allocation. 8154 * 8155 * r0 holds class 8156 * r1 holds array length 8157 */ 8158.LOP_NEW_ARRAY_finish: 8159 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8160 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8161 cmp r0, #0 @ failed? 8162 mov r2, rINST, lsr #8 @ r2<- A+ 8163 beq common_exceptionThrown @ yes, handle the exception 8164 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8165 and r2, r2, #15 @ r2<- A 8166 GET_INST_OPCODE(ip) @ extract opcode from rINST 8167 SET_VREG(r0, r2) @ vA<- r0 8168 GOTO_OPCODE(ip) @ jump to next instruction 8169 8170/* continuation for OP_FILLED_NEW_ARRAY */ 8171 8172 /* 8173 * On entry: 8174 * r0 holds array class 8175 * r10 holds AA or BA 8176 */ 8177.LOP_FILLED_NEW_ARRAY_continue: 8178 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8179 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8180 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8181 .if 0 8182 mov r1, r10 @ r1<- AA (length) 8183 .else 8184 mov r1, r10, lsr #4 @ r1<- B (length) 8185 .endif 8186 cmp r3, #'I' @ array of ints? 8187 cmpne r3, #'L' @ array of objects? 8188 cmpne r3, #'[' @ array of arrays? 8189 mov r9, r1 @ save length in r9 8190 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8191 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8192 cmp r0, #0 @ null return? 8193 beq common_exceptionThrown @ alloc failed, handle exception 8194 8195 FETCH(r1, 2) @ r1<- FEDC or CCCC 8196 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8197 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8198 subs r9, r9, #1 @ length--, check for neg 8199 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8200 bmi 2f @ was zero, bail 8201 8202 @ copy values from registers into the array 8203 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8204 .if 0 8205 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 82061: ldr r3, [r2], #4 @ r3<- *r2++ 8207 subs r9, r9, #1 @ count-- 8208 str r3, [r0], #4 @ *contents++ = vX 8209 bpl 1b 8210 @ continue at 2 8211 .else 8212 cmp r9, #4 @ length was initially 5? 8213 and r2, r10, #15 @ r2<- A 8214 bne 1f @ <= 4 args, branch 8215 GET_VREG(r3, r2) @ r3<- vA 8216 sub r9, r9, #1 @ count-- 8217 str r3, [r0, #16] @ contents[4] = vA 82181: and r2, r1, #15 @ r2<- F/E/D/C 8219 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8220 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8221 subs r9, r9, #1 @ count-- 8222 str r3, [r0], #4 @ *contents++ = vX 8223 bpl 1b 8224 @ continue at 2 8225 .endif 8226 82272: 8228 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8229 GOTO_OPCODE(ip) @ execute it 8230 8231 /* 8232 * Throw an exception indicating that we have not implemented this 8233 * mode of filled-new-array. 8234 */ 8235.LOP_FILLED_NEW_ARRAY_notimpl: 8236 ldr r0, .L_strInternalError 8237 ldr r1, .L_strFilledNewArrayNotImpl 8238 bl dvmThrowException 8239 b common_exceptionThrown 8240 8241 .if (!0) @ define in one or the other, not both 8242.L_strFilledNewArrayNotImpl: 8243 .word .LstrFilledNewArrayNotImpl 8244.L_strInternalError: 8245 .word .LstrInternalError 8246 .endif 8247 8248/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8249 8250 /* 8251 * On entry: 8252 * r0 holds array class 8253 * r10 holds AA or BA 8254 */ 8255.LOP_FILLED_NEW_ARRAY_RANGE_continue: 8256 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8257 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8258 ldrb r3, [r3, #1] @ r3<- descriptor[1] 8259 .if 1 8260 mov r1, r10 @ r1<- AA (length) 8261 .else 8262 mov r1, r10, lsr #4 @ r1<- B (length) 8263 .endif 8264 cmp r3, #'I' @ array of ints? 8265 cmpne r3, #'L' @ array of objects? 8266 cmpne r3, #'[' @ array of arrays? 8267 mov r9, r1 @ save length in r9 8268 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8269 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8270 cmp r0, #0 @ null return? 8271 beq common_exceptionThrown @ alloc failed, handle exception 8272 8273 FETCH(r1, 2) @ r1<- FEDC or CCCC 8274 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8275 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8276 subs r9, r9, #1 @ length--, check for neg 8277 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8278 bmi 2f @ was zero, bail 8279 8280 @ copy values from registers into the array 8281 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8282 .if 1 8283 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 82841: ldr r3, [r2], #4 @ r3<- *r2++ 8285 subs r9, r9, #1 @ count-- 8286 str r3, [r0], #4 @ *contents++ = vX 8287 bpl 1b 8288 @ continue at 2 8289 .else 8290 cmp r9, #4 @ length was initially 5? 8291 and r2, r10, #15 @ r2<- A 8292 bne 1f @ <= 4 args, branch 8293 GET_VREG(r3, r2) @ r3<- vA 8294 sub r9, r9, #1 @ count-- 8295 str r3, [r0, #16] @ contents[4] = vA 82961: and r2, r1, #15 @ r2<- F/E/D/C 8297 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8298 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8299 subs r9, r9, #1 @ count-- 8300 str r3, [r0], #4 @ *contents++ = vX 8301 bpl 1b 8302 @ continue at 2 8303 .endif 8304 83052: 8306 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8307 GOTO_OPCODE(ip) @ execute it 8308 8309 /* 8310 * Throw an exception indicating that we have not implemented this 8311 * mode of filled-new-array. 8312 */ 8313.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8314 ldr r0, .L_strInternalError 8315 ldr r1, .L_strFilledNewArrayNotImpl 8316 bl dvmThrowException 8317 b common_exceptionThrown 8318 8319 .if (!1) @ define in one or the other, not both 8320.L_strFilledNewArrayNotImpl: 8321 .word .LstrFilledNewArrayNotImpl 8322.L_strInternalError: 8323 .word .LstrInternalError 8324 .endif 8325 8326/* continuation for OP_CMPL_FLOAT */ 8327 8328 @ Test for NaN with a second comparison. EABI forbids testing bit 8329 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8330 @ make the library call. 8331.LOP_CMPL_FLOAT_gt_or_nan: 8332 mov r1, r9 @ reverse order 8333 mov r0, r10 8334 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8335 @bleq common_abort 8336 movcc r1, #1 @ (greater than) r1<- 1 8337 bcc .LOP_CMPL_FLOAT_finish 8338 mvn r1, #0 @ r1<- 1 or -1 for NaN 8339 b .LOP_CMPL_FLOAT_finish 8340 8341 8342#if 0 /* "clasic" form */ 8343 FETCH(r0, 1) @ r0<- CCBB 8344 and r2, r0, #255 @ r2<- BB 8345 mov r3, r0, lsr #8 @ r3<- CC 8346 GET_VREG(r9, r2) @ r9<- vBB 8347 GET_VREG(r10, r3) @ r10<- vCC 8348 mov r0, r9 @ r0<- vBB 8349 mov r1, r10 @ r1<- vCC 8350 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8351 cmp r0, #0 @ equal? 8352 movne r1, #0 @ yes, result is 0 8353 bne OP_CMPL_FLOAT_finish 8354 mov r0, r9 @ r0<- vBB 8355 mov r1, r10 @ r1<- vCC 8356 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8357 cmp r0, #0 @ less than? 8358 b OP_CMPL_FLOAT_continue 8359@%break 8360 8361OP_CMPL_FLOAT_continue: 8362 mvnne r1, #0 @ yes, result is -1 8363 bne OP_CMPL_FLOAT_finish 8364 mov r0, r9 @ r0<- vBB 8365 mov r1, r10 @ r1<- vCC 8366 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8367 cmp r0, #0 @ greater than? 8368 beq OP_CMPL_FLOAT_nan @ no, must be NaN 8369 mov r1, #1 @ yes, result is 1 8370 @ fall through to _finish 8371 8372OP_CMPL_FLOAT_finish: 8373 mov r3, rINST, lsr #8 @ r3<- AA 8374 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8375 SET_VREG(r1, r3) @ vAA<- r1 8376 GET_INST_OPCODE(ip) @ extract opcode from rINST 8377 GOTO_OPCODE(ip) @ jump to next instruction 8378 8379 /* 8380 * This is expected to be uncommon, so we double-branch (once to here, 8381 * again back to _finish). 8382 */ 8383OP_CMPL_FLOAT_nan: 8384 mvn r1, #0 @ r1<- 1 or -1 for NaN 8385 b OP_CMPL_FLOAT_finish 8386 8387#endif 8388 8389/* continuation for OP_CMPG_FLOAT */ 8390 8391 @ Test for NaN with a second comparison. EABI forbids testing bit 8392 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8393 @ make the library call. 8394.LOP_CMPG_FLOAT_gt_or_nan: 8395 mov r1, r9 @ reverse order 8396 mov r0, r10 8397 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8398 @bleq common_abort 8399 movcc r1, #1 @ (greater than) r1<- 1 8400 bcc .LOP_CMPG_FLOAT_finish 8401 mov r1, #1 @ r1<- 1 or -1 for NaN 8402 b .LOP_CMPG_FLOAT_finish 8403 8404 8405#if 0 /* "clasic" form */ 8406 FETCH(r0, 1) @ r0<- CCBB 8407 and r2, r0, #255 @ r2<- BB 8408 mov r3, r0, lsr #8 @ r3<- CC 8409 GET_VREG(r9, r2) @ r9<- vBB 8410 GET_VREG(r10, r3) @ r10<- vCC 8411 mov r0, r9 @ r0<- vBB 8412 mov r1, r10 @ r1<- vCC 8413 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8414 cmp r0, #0 @ equal? 8415 movne r1, #0 @ yes, result is 0 8416 bne OP_CMPG_FLOAT_finish 8417 mov r0, r9 @ r0<- vBB 8418 mov r1, r10 @ r1<- vCC 8419 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8420 cmp r0, #0 @ less than? 8421 b OP_CMPG_FLOAT_continue 8422@%break 8423 8424OP_CMPG_FLOAT_continue: 8425 mvnne r1, #0 @ yes, result is -1 8426 bne OP_CMPG_FLOAT_finish 8427 mov r0, r9 @ r0<- vBB 8428 mov r1, r10 @ r1<- vCC 8429 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8430 cmp r0, #0 @ greater than? 8431 beq OP_CMPG_FLOAT_nan @ no, must be NaN 8432 mov r1, #1 @ yes, result is 1 8433 @ fall through to _finish 8434 8435OP_CMPG_FLOAT_finish: 8436 mov r3, rINST, lsr #8 @ r3<- AA 8437 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8438 SET_VREG(r1, r3) @ vAA<- r1 8439 GET_INST_OPCODE(ip) @ extract opcode from rINST 8440 GOTO_OPCODE(ip) @ jump to next instruction 8441 8442 /* 8443 * This is expected to be uncommon, so we double-branch (once to here, 8444 * again back to _finish). 8445 */ 8446OP_CMPG_FLOAT_nan: 8447 mov r1, #1 @ r1<- 1 or -1 for NaN 8448 b OP_CMPG_FLOAT_finish 8449 8450#endif 8451 8452/* continuation for OP_CMPL_DOUBLE */ 8453 8454 @ Test for NaN with a second comparison. EABI forbids testing bit 8455 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8456 @ make the library call. 8457.LOP_CMPL_DOUBLE_gt_or_nan: 8458 ldmia r10, {r0-r1} @ reverse order 8459 ldmia r9, {r2-r3} 8460 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8461 @bleq common_abort 8462 movcc r1, #1 @ (greater than) r1<- 1 8463 bcc .LOP_CMPL_DOUBLE_finish 8464 mvn r1, #0 @ r1<- 1 or -1 for NaN 8465 b .LOP_CMPL_DOUBLE_finish 8466 8467/* continuation for OP_CMPG_DOUBLE */ 8468 8469 @ Test for NaN with a second comparison. EABI forbids testing bit 8470 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8471 @ make the library call. 8472.LOP_CMPG_DOUBLE_gt_or_nan: 8473 ldmia r10, {r0-r1} @ reverse order 8474 ldmia r9, {r2-r3} 8475 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8476 @bleq common_abort 8477 movcc r1, #1 @ (greater than) r1<- 1 8478 bcc .LOP_CMPG_DOUBLE_finish 8479 mov r1, #1 @ r1<- 1 or -1 for NaN 8480 b .LOP_CMPG_DOUBLE_finish 8481 8482/* continuation for OP_CMP_LONG */ 8483 8484.LOP_CMP_LONG_less: 8485 mvn r1, #0 @ r1<- -1 8486 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8487 @ instead, we just replicate the tail end. 8488 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8489 SET_VREG(r1, r9) @ vAA<- r1 8490 GET_INST_OPCODE(ip) @ extract opcode from rINST 8491 GOTO_OPCODE(ip) @ jump to next instruction 8492 8493.LOP_CMP_LONG_greater: 8494 mov r1, #1 @ r1<- 1 8495 @ fall through to _finish 8496 8497.LOP_CMP_LONG_finish: 8498 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8499 SET_VREG(r1, r9) @ vAA<- r1 8500 GET_INST_OPCODE(ip) @ extract opcode from rINST 8501 GOTO_OPCODE(ip) @ jump to next instruction 8502 8503/* continuation for OP_AGET_WIDE */ 8504 8505.LOP_AGET_WIDE_finish: 8506 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8507 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8508 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8509 GET_INST_OPCODE(ip) @ extract opcode from rINST 8510 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8511 GOTO_OPCODE(ip) @ jump to next instruction 8512 8513/* continuation for OP_APUT_WIDE */ 8514 8515.LOP_APUT_WIDE_finish: 8516 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8517 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8518 GET_INST_OPCODE(ip) @ extract opcode from rINST 8519 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8520 GOTO_OPCODE(ip) @ jump to next instruction 8521 8522/* continuation for OP_APUT_OBJECT */ 8523 /* 8524 * On entry: 8525 * r1 = vBB (arrayObj) 8526 * r9 = vAA (obj) 8527 * r10 = offset into array (vBB + vCC * width) 8528 */ 8529.LOP_APUT_OBJECT_finish: 8530 cmp r9, #0 @ storing null reference? 8531 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8532 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8533 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8534 bl dvmCanPutArrayElement @ test object type vs. array type 8535 cmp r0, #0 @ okay? 8536 beq common_errArrayStore @ no 8537.LOP_APUT_OBJECT_skip_check: 8538 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8539 GET_INST_OPCODE(ip) @ extract opcode from rINST 8540 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8541 GOTO_OPCODE(ip) @ jump to next instruction 8542 8543/* continuation for OP_IGET */ 8544 8545 /* 8546 * Currently: 8547 * r0 holds resolved field 8548 * r9 holds object 8549 */ 8550.LOP_IGET_finish: 8551 @bl common_squeak0 8552 cmp r9, #0 @ check object for null 8553 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8554 beq common_errNullObject @ object was null 8555 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8556 mov r2, rINST, lsr #8 @ r2<- A+ 8557 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8558 and r2, r2, #15 @ r2<- A 8559 GET_INST_OPCODE(ip) @ extract opcode from rINST 8560 SET_VREG(r0, r2) @ fp[A]<- r0 8561 GOTO_OPCODE(ip) @ jump to next instruction 8562 8563/* continuation for OP_IGET_WIDE */ 8564 8565 /* 8566 * Currently: 8567 * r0 holds resolved field 8568 * r9 holds object 8569 */ 8570.LOP_IGET_WIDE_finish: 8571 cmp r9, #0 @ check object for null 8572 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8573 beq common_errNullObject @ object was null 8574 .if 0 8575 add r0, r9, r3 @ r0<- address of field 8576 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 8577 .else 8578 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8579 .endif 8580 mov r2, rINST, lsr #8 @ r2<- A+ 8581 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8582 and r2, r2, #15 @ r2<- A 8583 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8584 GET_INST_OPCODE(ip) @ extract opcode from rINST 8585 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8586 GOTO_OPCODE(ip) @ jump to next instruction 8587 8588/* continuation for OP_IGET_OBJECT */ 8589 8590 /* 8591 * Currently: 8592 * r0 holds resolved field 8593 * r9 holds object 8594 */ 8595.LOP_IGET_OBJECT_finish: 8596 @bl common_squeak0 8597 cmp r9, #0 @ check object for null 8598 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8599 beq common_errNullObject @ object was null 8600 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8601 mov r2, rINST, lsr #8 @ r2<- A+ 8602 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8603 and r2, r2, #15 @ r2<- A 8604 GET_INST_OPCODE(ip) @ extract opcode from rINST 8605 SET_VREG(r0, r2) @ fp[A]<- r0 8606 GOTO_OPCODE(ip) @ jump to next instruction 8607 8608/* continuation for OP_IGET_BOOLEAN */ 8609 8610 /* 8611 * Currently: 8612 * r0 holds resolved field 8613 * r9 holds object 8614 */ 8615.LOP_IGET_BOOLEAN_finish: 8616 @bl common_squeak1 8617 cmp r9, #0 @ check object for null 8618 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8619 beq common_errNullObject @ object was null 8620 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8621 mov r2, rINST, lsr #8 @ r2<- A+ 8622 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8623 and r2, r2, #15 @ r2<- A 8624 GET_INST_OPCODE(ip) @ extract opcode from rINST 8625 SET_VREG(r0, r2) @ fp[A]<- r0 8626 GOTO_OPCODE(ip) @ jump to next instruction 8627 8628/* continuation for OP_IGET_BYTE */ 8629 8630 /* 8631 * Currently: 8632 * r0 holds resolved field 8633 * r9 holds object 8634 */ 8635.LOP_IGET_BYTE_finish: 8636 @bl common_squeak2 8637 cmp r9, #0 @ check object for null 8638 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8639 beq common_errNullObject @ object was null 8640 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8641 mov r2, rINST, lsr #8 @ r2<- A+ 8642 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8643 and r2, r2, #15 @ r2<- A 8644 GET_INST_OPCODE(ip) @ extract opcode from rINST 8645 SET_VREG(r0, r2) @ fp[A]<- r0 8646 GOTO_OPCODE(ip) @ jump to next instruction 8647 8648/* continuation for OP_IGET_CHAR */ 8649 8650 /* 8651 * Currently: 8652 * r0 holds resolved field 8653 * r9 holds object 8654 */ 8655.LOP_IGET_CHAR_finish: 8656 @bl common_squeak3 8657 cmp r9, #0 @ check object for null 8658 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8659 beq common_errNullObject @ object was null 8660 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8661 mov r2, rINST, lsr #8 @ r2<- A+ 8662 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8663 and r2, r2, #15 @ r2<- A 8664 GET_INST_OPCODE(ip) @ extract opcode from rINST 8665 SET_VREG(r0, r2) @ fp[A]<- r0 8666 GOTO_OPCODE(ip) @ jump to next instruction 8667 8668/* continuation for OP_IGET_SHORT */ 8669 8670 /* 8671 * Currently: 8672 * r0 holds resolved field 8673 * r9 holds object 8674 */ 8675.LOP_IGET_SHORT_finish: 8676 @bl common_squeak4 8677 cmp r9, #0 @ check object for null 8678 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8679 beq common_errNullObject @ object was null 8680 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8681 mov r2, rINST, lsr #8 @ r2<- A+ 8682 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8683 and r2, r2, #15 @ r2<- A 8684 GET_INST_OPCODE(ip) @ extract opcode from rINST 8685 SET_VREG(r0, r2) @ fp[A]<- r0 8686 GOTO_OPCODE(ip) @ jump to next instruction 8687 8688/* continuation for OP_IPUT */ 8689 8690 /* 8691 * Currently: 8692 * r0 holds resolved field 8693 * r9 holds object 8694 */ 8695.LOP_IPUT_finish: 8696 @bl common_squeak0 8697 mov r1, rINST, lsr #8 @ r1<- A+ 8698 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8699 and r1, r1, #15 @ r1<- A 8700 cmp r9, #0 @ check object for null 8701 GET_VREG(r0, r1) @ r0<- fp[A] 8702 beq common_errNullObject @ object was null 8703 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8704 GET_INST_OPCODE(ip) @ extract opcode from rINST 8705 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8706 GOTO_OPCODE(ip) @ jump to next instruction 8707 8708/* continuation for OP_IPUT_WIDE */ 8709 8710 /* 8711 * Currently: 8712 * r0 holds resolved field 8713 * r9 holds object 8714 */ 8715.LOP_IPUT_WIDE_finish: 8716 mov r2, rINST, lsr #8 @ r2<- A+ 8717 cmp r9, #0 @ check object for null 8718 and r2, r2, #15 @ r2<- A 8719 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8720 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8721 beq common_errNullObject @ object was null 8722 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8723 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8724 GET_INST_OPCODE(r10) @ extract opcode from rINST 8725 .if 0 8726 add r2, r9, r3 @ r2<- target address 8727 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 8728 .else 8729 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 8730 .endif 8731 GOTO_OPCODE(r10) @ jump to next instruction 8732 8733/* continuation for OP_IPUT_OBJECT */ 8734 8735 /* 8736 * Currently: 8737 * r0 holds resolved field 8738 * r9 holds object 8739 */ 8740.LOP_IPUT_OBJECT_finish: 8741 @bl common_squeak0 8742 mov r1, rINST, lsr #8 @ r1<- A+ 8743 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8744 and r1, r1, #15 @ r1<- A 8745 cmp r9, #0 @ check object for null 8746 GET_VREG(r0, r1) @ r0<- fp[A] 8747 beq common_errNullObject @ object was null 8748 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8749 GET_INST_OPCODE(ip) @ extract opcode from rINST 8750 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8751 GOTO_OPCODE(ip) @ jump to next instruction 8752 8753/* continuation for OP_IPUT_BOOLEAN */ 8754 8755 /* 8756 * Currently: 8757 * r0 holds resolved field 8758 * r9 holds object 8759 */ 8760.LOP_IPUT_BOOLEAN_finish: 8761 @bl common_squeak1 8762 mov r1, rINST, lsr #8 @ r1<- A+ 8763 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8764 and r1, r1, #15 @ r1<- A 8765 cmp r9, #0 @ check object for null 8766 GET_VREG(r0, r1) @ r0<- fp[A] 8767 beq common_errNullObject @ object was null 8768 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8769 GET_INST_OPCODE(ip) @ extract opcode from rINST 8770 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8771 GOTO_OPCODE(ip) @ jump to next instruction 8772 8773/* continuation for OP_IPUT_BYTE */ 8774 8775 /* 8776 * Currently: 8777 * r0 holds resolved field 8778 * r9 holds object 8779 */ 8780.LOP_IPUT_BYTE_finish: 8781 @bl common_squeak2 8782 mov r1, rINST, lsr #8 @ r1<- A+ 8783 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8784 and r1, r1, #15 @ r1<- A 8785 cmp r9, #0 @ check object for null 8786 GET_VREG(r0, r1) @ r0<- fp[A] 8787 beq common_errNullObject @ object was null 8788 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8789 GET_INST_OPCODE(ip) @ extract opcode from rINST 8790 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8791 GOTO_OPCODE(ip) @ jump to next instruction 8792 8793/* continuation for OP_IPUT_CHAR */ 8794 8795 /* 8796 * Currently: 8797 * r0 holds resolved field 8798 * r9 holds object 8799 */ 8800.LOP_IPUT_CHAR_finish: 8801 @bl common_squeak3 8802 mov r1, rINST, lsr #8 @ r1<- A+ 8803 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8804 and r1, r1, #15 @ r1<- A 8805 cmp r9, #0 @ check object for null 8806 GET_VREG(r0, r1) @ r0<- fp[A] 8807 beq common_errNullObject @ object was null 8808 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8809 GET_INST_OPCODE(ip) @ extract opcode from rINST 8810 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8811 GOTO_OPCODE(ip) @ jump to next instruction 8812 8813/* continuation for OP_IPUT_SHORT */ 8814 8815 /* 8816 * Currently: 8817 * r0 holds resolved field 8818 * r9 holds object 8819 */ 8820.LOP_IPUT_SHORT_finish: 8821 @bl common_squeak4 8822 mov r1, rINST, lsr #8 @ r1<- A+ 8823 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8824 and r1, r1, #15 @ r1<- A 8825 cmp r9, #0 @ check object for null 8826 GET_VREG(r0, r1) @ r0<- fp[A] 8827 beq common_errNullObject @ object was null 8828 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8829 GET_INST_OPCODE(ip) @ extract opcode from rINST 8830 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8831 GOTO_OPCODE(ip) @ jump to next instruction 8832 8833/* continuation for OP_SGET */ 8834 8835 /* 8836 * Continuation if the field has not yet been resolved. 8837 * r1: BBBB field ref 8838 */ 8839.LOP_SGET_resolve: 8840 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8841 EXPORT_PC() @ resolve() could throw, so export now 8842 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8843 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8844 cmp r0, #0 @ success? 8845 bne .LOP_SGET_finish @ yes, finish 8846 b common_exceptionThrown @ no, handle exception 8847 8848/* continuation for OP_SGET_WIDE */ 8849 8850 /* 8851 * Continuation if the field has not yet been resolved. 8852 * r1: BBBB field ref 8853 * 8854 * Returns StaticField pointer in r0. 8855 */ 8856.LOP_SGET_WIDE_resolve: 8857 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8858 EXPORT_PC() @ resolve() could throw, so export now 8859 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8860 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8861 cmp r0, #0 @ success? 8862 bne .LOP_SGET_WIDE_finish @ yes, finish 8863 b common_exceptionThrown @ no, handle exception 8864 8865/* continuation for OP_SGET_OBJECT */ 8866 8867 /* 8868 * Continuation if the field has not yet been resolved. 8869 * r1: BBBB field ref 8870 */ 8871.LOP_SGET_OBJECT_resolve: 8872 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8873 EXPORT_PC() @ resolve() could throw, so export now 8874 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8875 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8876 cmp r0, #0 @ success? 8877 bne .LOP_SGET_OBJECT_finish @ yes, finish 8878 b common_exceptionThrown @ no, handle exception 8879 8880/* continuation for OP_SGET_BOOLEAN */ 8881 8882 /* 8883 * Continuation if the field has not yet been resolved. 8884 * r1: BBBB field ref 8885 */ 8886.LOP_SGET_BOOLEAN_resolve: 8887 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8888 EXPORT_PC() @ resolve() could throw, so export now 8889 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8890 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8891 cmp r0, #0 @ success? 8892 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8893 b common_exceptionThrown @ no, handle exception 8894 8895/* continuation for OP_SGET_BYTE */ 8896 8897 /* 8898 * Continuation if the field has not yet been resolved. 8899 * r1: BBBB field ref 8900 */ 8901.LOP_SGET_BYTE_resolve: 8902 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8903 EXPORT_PC() @ resolve() could throw, so export now 8904 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8905 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8906 cmp r0, #0 @ success? 8907 bne .LOP_SGET_BYTE_finish @ yes, finish 8908 b common_exceptionThrown @ no, handle exception 8909 8910/* continuation for OP_SGET_CHAR */ 8911 8912 /* 8913 * Continuation if the field has not yet been resolved. 8914 * r1: BBBB field ref 8915 */ 8916.LOP_SGET_CHAR_resolve: 8917 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8918 EXPORT_PC() @ resolve() could throw, so export now 8919 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8920 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8921 cmp r0, #0 @ success? 8922 bne .LOP_SGET_CHAR_finish @ yes, finish 8923 b common_exceptionThrown @ no, handle exception 8924 8925/* continuation for OP_SGET_SHORT */ 8926 8927 /* 8928 * Continuation if the field has not yet been resolved. 8929 * r1: BBBB field ref 8930 */ 8931.LOP_SGET_SHORT_resolve: 8932 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8933 EXPORT_PC() @ resolve() could throw, so export now 8934 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8935 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8936 cmp r0, #0 @ success? 8937 bne .LOP_SGET_SHORT_finish @ yes, finish 8938 b common_exceptionThrown @ no, handle exception 8939 8940/* continuation for OP_SPUT */ 8941 8942 /* 8943 * Continuation if the field has not yet been resolved. 8944 * r1: BBBB field ref 8945 */ 8946.LOP_SPUT_resolve: 8947 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8948 EXPORT_PC() @ resolve() could throw, so export now 8949 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8950 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8951 cmp r0, #0 @ success? 8952 bne .LOP_SPUT_finish @ yes, finish 8953 b common_exceptionThrown @ no, handle exception 8954 8955/* continuation for OP_SPUT_WIDE */ 8956 8957 /* 8958 * Continuation if the field has not yet been resolved. 8959 * r1: BBBB field ref 8960 * r9: &fp[AA] 8961 * 8962 * Returns StaticField pointer in r2. 8963 */ 8964.LOP_SPUT_WIDE_resolve: 8965 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8966 EXPORT_PC() @ resolve() could throw, so export now 8967 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8968 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8969 cmp r0, #0 @ success? 8970 mov r2, r0 @ copy to r2 8971 bne .LOP_SPUT_WIDE_finish @ yes, finish 8972 b common_exceptionThrown @ no, handle exception 8973 8974/* continuation for OP_SPUT_OBJECT */ 8975 8976 /* 8977 * Continuation if the field has not yet been resolved. 8978 * r1: BBBB field ref 8979 */ 8980.LOP_SPUT_OBJECT_resolve: 8981 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8982 EXPORT_PC() @ resolve() could throw, so export now 8983 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8984 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8985 cmp r0, #0 @ success? 8986 bne .LOP_SPUT_OBJECT_finish @ yes, finish 8987 b common_exceptionThrown @ no, handle exception 8988 8989/* continuation for OP_SPUT_BOOLEAN */ 8990 8991 /* 8992 * Continuation if the field has not yet been resolved. 8993 * r1: BBBB field ref 8994 */ 8995.LOP_SPUT_BOOLEAN_resolve: 8996 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8997 EXPORT_PC() @ resolve() could throw, so export now 8998 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8999 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9000 cmp r0, #0 @ success? 9001 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 9002 b common_exceptionThrown @ no, handle exception 9003 9004/* continuation for OP_SPUT_BYTE */ 9005 9006 /* 9007 * Continuation if the field has not yet been resolved. 9008 * r1: BBBB field ref 9009 */ 9010.LOP_SPUT_BYTE_resolve: 9011 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9012 EXPORT_PC() @ resolve() could throw, so export now 9013 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9014 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9015 cmp r0, #0 @ success? 9016 bne .LOP_SPUT_BYTE_finish @ yes, finish 9017 b common_exceptionThrown @ no, handle exception 9018 9019/* continuation for OP_SPUT_CHAR */ 9020 9021 /* 9022 * Continuation if the field has not yet been resolved. 9023 * r1: BBBB field ref 9024 */ 9025.LOP_SPUT_CHAR_resolve: 9026 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9027 EXPORT_PC() @ resolve() could throw, so export now 9028 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9029 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9030 cmp r0, #0 @ success? 9031 bne .LOP_SPUT_CHAR_finish @ yes, finish 9032 b common_exceptionThrown @ no, handle exception 9033 9034/* continuation for OP_SPUT_SHORT */ 9035 9036 /* 9037 * Continuation if the field has not yet been resolved. 9038 * r1: BBBB field ref 9039 */ 9040.LOP_SPUT_SHORT_resolve: 9041 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9042 EXPORT_PC() @ resolve() could throw, so export now 9043 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9044 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9045 cmp r0, #0 @ success? 9046 bne .LOP_SPUT_SHORT_finish @ yes, finish 9047 b common_exceptionThrown @ no, handle exception 9048 9049/* continuation for OP_INVOKE_VIRTUAL */ 9050 9051 /* 9052 * At this point: 9053 * r0 = resolved base method 9054 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9055 */ 9056.LOP_INVOKE_VIRTUAL_continue: 9057 GET_VREG(r1, r10) @ r1<- "this" ptr 9058 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9059 cmp r1, #0 @ is "this" null? 9060 beq common_errNullObject @ null "this", throw exception 9061 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9062 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9063 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9064 bl common_invokeMethodNoRange @ continue on 9065 9066/* continuation for OP_INVOKE_SUPER */ 9067 9068 /* 9069 * At this point: 9070 * r0 = resolved base method 9071 * r9 = method->clazz 9072 */ 9073.LOP_INVOKE_SUPER_continue: 9074 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9075 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9076 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9077 EXPORT_PC() @ must export for invoke 9078 cmp r2, r3 @ compare (methodIndex, vtableCount) 9079 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 9080 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9081 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9082 bl common_invokeMethodNoRange @ continue on 9083 9084.LOP_INVOKE_SUPER_resolve: 9085 mov r0, r9 @ r0<- method->clazz 9086 mov r2, #METHOD_VIRTUAL @ resolver method type 9087 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9088 cmp r0, #0 @ got null? 9089 bne .LOP_INVOKE_SUPER_continue @ no, continue 9090 b common_exceptionThrown @ yes, handle exception 9091 9092 /* 9093 * Throw a NoSuchMethodError with the method name as the message. 9094 * r0 = resolved base method 9095 */ 9096.LOP_INVOKE_SUPER_nsm: 9097 ldr r1, [r0, #offMethod_name] @ r1<- method name 9098 b common_errNoSuchMethod 9099 9100/* continuation for OP_INVOKE_DIRECT */ 9101 9102 /* 9103 * On entry: 9104 * r1 = reference (BBBB or CCCC) 9105 * r10 = "this" register 9106 */ 9107.LOP_INVOKE_DIRECT_resolve: 9108 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9109 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9110 mov r2, #METHOD_DIRECT @ resolver method type 9111 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9112 cmp r0, #0 @ got null? 9113 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9114 bne .LOP_INVOKE_DIRECT_finish @ no, continue 9115 b common_exceptionThrown @ yes, handle exception 9116 9117/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 9118 9119 /* 9120 * At this point: 9121 * r0 = resolved base method 9122 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9123 */ 9124.LOP_INVOKE_VIRTUAL_RANGE_continue: 9125 GET_VREG(r1, r10) @ r1<- "this" ptr 9126 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9127 cmp r1, #0 @ is "this" null? 9128 beq common_errNullObject @ null "this", throw exception 9129 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9130 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9131 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9132 bl common_invokeMethodRange @ continue on 9133 9134/* continuation for OP_INVOKE_SUPER_RANGE */ 9135 9136 /* 9137 * At this point: 9138 * r0 = resolved base method 9139 * r9 = method->clazz 9140 */ 9141.LOP_INVOKE_SUPER_RANGE_continue: 9142 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9143 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9144 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9145 EXPORT_PC() @ must export for invoke 9146 cmp r2, r3 @ compare (methodIndex, vtableCount) 9147 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 9148 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9149 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9150 bl common_invokeMethodRange @ continue on 9151 9152.LOP_INVOKE_SUPER_RANGE_resolve: 9153 mov r0, r9 @ r0<- method->clazz 9154 mov r2, #METHOD_VIRTUAL @ resolver method type 9155 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9156 cmp r0, #0 @ got null? 9157 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 9158 b common_exceptionThrown @ yes, handle exception 9159 9160 /* 9161 * Throw a NoSuchMethodError with the method name as the message. 9162 * r0 = resolved base method 9163 */ 9164.LOP_INVOKE_SUPER_RANGE_nsm: 9165 ldr r1, [r0, #offMethod_name] @ r1<- method name 9166 b common_errNoSuchMethod 9167 9168/* continuation for OP_INVOKE_DIRECT_RANGE */ 9169 9170 /* 9171 * On entry: 9172 * r1 = reference (BBBB or CCCC) 9173 * r10 = "this" register 9174 */ 9175.LOP_INVOKE_DIRECT_RANGE_resolve: 9176 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9177 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9178 mov r2, #METHOD_DIRECT @ resolver method type 9179 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9180 cmp r0, #0 @ got null? 9181 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9182 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 9183 b common_exceptionThrown @ yes, handle exception 9184 9185/* continuation for OP_FLOAT_TO_LONG */ 9186/* 9187 * Convert the float in r0 to a long in r0/r1. 9188 * 9189 * We have to clip values to long min/max per the specification. The 9190 * expected common case is a "reasonable" value that converts directly 9191 * to modest integer. The EABI convert function isn't doing this for us. 9192 */ 9193f2l_doconv: 9194 stmfd sp!, {r4, lr} 9195 mov r1, #0x5f000000 @ (float)maxlong 9196 mov r4, r0 9197 bl __aeabi_fcmpge @ is arg >= maxlong? 9198 cmp r0, #0 @ nonzero == yes 9199 mvnne r0, #0 @ return maxlong (7fffffff) 9200 mvnne r1, #0x80000000 9201 ldmnefd sp!, {r4, pc} 9202 9203 mov r0, r4 @ recover arg 9204 mov r1, #0xdf000000 @ (float)minlong 9205 bl __aeabi_fcmple @ is arg <= minlong? 9206 cmp r0, #0 @ nonzero == yes 9207 movne r0, #0 @ return minlong (80000000) 9208 movne r1, #0x80000000 9209 ldmnefd sp!, {r4, pc} 9210 9211 mov r0, r4 @ recover arg 9212 mov r1, r4 9213 bl __aeabi_fcmpeq @ is arg == self? 9214 cmp r0, #0 @ zero == no 9215 moveq r1, #0 @ return zero for NaN 9216 ldmeqfd sp!, {r4, pc} 9217 9218 mov r0, r4 @ recover arg 9219 bl __aeabi_f2lz @ convert float to long 9220 ldmfd sp!, {r4, pc} 9221 9222/* continuation for OP_DOUBLE_TO_LONG */ 9223/* 9224 * Convert the double in r0/r1 to a long in r0/r1. 9225 * 9226 * We have to clip values to long min/max per the specification. The 9227 * expected common case is a "reasonable" value that converts directly 9228 * to modest integer. The EABI convert function isn't doing this for us. 9229 */ 9230d2l_doconv: 9231 stmfd sp!, {r4, r5, lr} @ save regs 9232 mov r3, #0x43000000 @ maxlong, as a double (high word) 9233 add r3, #0x00e00000 @ 0x43e00000 9234 mov r2, #0 @ maxlong, as a double (low word) 9235 sub sp, sp, #4 @ align for EABI 9236 mov r4, r0 @ save a copy of r0 9237 mov r5, r1 @ and r1 9238 bl __aeabi_dcmpge @ is arg >= maxlong? 9239 cmp r0, #0 @ nonzero == yes 9240 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9241 mvnne r1, #0x80000000 9242 bne 1f 9243 9244 mov r0, r4 @ recover arg 9245 mov r1, r5 9246 mov r3, #0xc3000000 @ minlong, as a double (high word) 9247 add r3, #0x00e00000 @ 0xc3e00000 9248 mov r2, #0 @ minlong, as a double (low word) 9249 bl __aeabi_dcmple @ is arg <= minlong? 9250 cmp r0, #0 @ nonzero == yes 9251 movne r0, #0 @ return minlong (8000000000000000) 9252 movne r1, #0x80000000 9253 bne 1f 9254 9255 mov r0, r4 @ recover arg 9256 mov r1, r5 9257 mov r2, r4 @ compare against self 9258 mov r3, r5 9259 bl __aeabi_dcmpeq @ is arg == self? 9260 cmp r0, #0 @ zero == no 9261 moveq r1, #0 @ return zero for NaN 9262 beq 1f 9263 9264 mov r0, r4 @ recover arg 9265 mov r1, r5 9266 bl __aeabi_d2lz @ convert double to long 9267 92681: 9269 add sp, sp, #4 9270 ldmfd sp!, {r4, r5, pc} 9271 9272/* continuation for OP_MUL_LONG */ 9273 9274.LOP_MUL_LONG_finish: 9275 GET_INST_OPCODE(ip) @ extract opcode from rINST 9276 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9277 GOTO_OPCODE(ip) @ jump to next instruction 9278 9279/* continuation for OP_SHL_LONG */ 9280 9281.LOP_SHL_LONG_finish: 9282 mov r0, r0, asl r2 @ r0<- r0 << r2 9283 GET_INST_OPCODE(ip) @ extract opcode from rINST 9284 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9285 GOTO_OPCODE(ip) @ jump to next instruction 9286 9287/* continuation for OP_SHR_LONG */ 9288 9289.LOP_SHR_LONG_finish: 9290 mov r1, r1, asr r2 @ r1<- r1 >> r2 9291 GET_INST_OPCODE(ip) @ extract opcode from rINST 9292 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9293 GOTO_OPCODE(ip) @ jump to next instruction 9294 9295/* continuation for OP_USHR_LONG */ 9296 9297.LOP_USHR_LONG_finish: 9298 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9299 GET_INST_OPCODE(ip) @ extract opcode from rINST 9300 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9301 GOTO_OPCODE(ip) @ jump to next instruction 9302 9303/* continuation for OP_SHL_LONG_2ADDR */ 9304 9305.LOP_SHL_LONG_2ADDR_finish: 9306 GET_INST_OPCODE(ip) @ extract opcode from rINST 9307 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9308 GOTO_OPCODE(ip) @ jump to next instruction 9309 9310/* continuation for OP_SHR_LONG_2ADDR */ 9311 9312.LOP_SHR_LONG_2ADDR_finish: 9313 GET_INST_OPCODE(ip) @ extract opcode from rINST 9314 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9315 GOTO_OPCODE(ip) @ jump to next instruction 9316 9317/* continuation for OP_USHR_LONG_2ADDR */ 9318 9319.LOP_USHR_LONG_2ADDR_finish: 9320 GET_INST_OPCODE(ip) @ extract opcode from rINST 9321 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9322 GOTO_OPCODE(ip) @ jump to next instruction 9323 9324/* continuation for OP_IGET_WIDE_VOLATILE */ 9325 9326 /* 9327 * Currently: 9328 * r0 holds resolved field 9329 * r9 holds object 9330 */ 9331.LOP_IGET_WIDE_VOLATILE_finish: 9332 cmp r9, #0 @ check object for null 9333 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9334 beq common_errNullObject @ object was null 9335 .if 1 9336 add r0, r9, r3 @ r0<- address of field 9337 bl android_quasiatomic_read_64 @ r0/r1<- contents of field 9338 .else 9339 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9340 .endif 9341 mov r2, rINST, lsr #8 @ r2<- A+ 9342 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9343 and r2, r2, #15 @ r2<- A 9344 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9345 GET_INST_OPCODE(ip) @ extract opcode from rINST 9346 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9347 GOTO_OPCODE(ip) @ jump to next instruction 9348 9349/* continuation for OP_IPUT_WIDE_VOLATILE */ 9350 9351 /* 9352 * Currently: 9353 * r0 holds resolved field 9354 * r9 holds object 9355 */ 9356.LOP_IPUT_WIDE_VOLATILE_finish: 9357 mov r2, rINST, lsr #8 @ r2<- A+ 9358 cmp r9, #0 @ check object for null 9359 and r2, r2, #15 @ r2<- A 9360 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9361 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9362 beq common_errNullObject @ object was null 9363 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9364 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9365 GET_INST_OPCODE(r10) @ extract opcode from rINST 9366 .if 1 9367 add r2, r9, r3 @ r2<- target address 9368 bl android_quasiatomic_swap_64 @ stores r0/r1 into addr r2 9369 .else 9370 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9371 .endif 9372 GOTO_OPCODE(r10) @ jump to next instruction 9373 9374/* continuation for OP_SGET_WIDE_VOLATILE */ 9375 9376 /* 9377 * Continuation if the field has not yet been resolved. 9378 * r1: BBBB field ref 9379 * 9380 * Returns StaticField pointer in r0. 9381 */ 9382.LOP_SGET_WIDE_VOLATILE_resolve: 9383 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9384 EXPORT_PC() @ resolve() could throw, so export now 9385 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9386 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9387 cmp r0, #0 @ success? 9388 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9389 b common_exceptionThrown @ no, handle exception 9390 9391/* continuation for OP_SPUT_WIDE_VOLATILE */ 9392 9393 /* 9394 * Continuation if the field has not yet been resolved. 9395 * r1: BBBB field ref 9396 * r9: &fp[AA] 9397 * 9398 * Returns StaticField pointer in r2. 9399 */ 9400.LOP_SPUT_WIDE_VOLATILE_resolve: 9401 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9402 EXPORT_PC() @ resolve() could throw, so export now 9403 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9404 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9405 cmp r0, #0 @ success? 9406 mov r2, r0 @ copy to r2 9407 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9408 b common_exceptionThrown @ no, handle exception 9409 9410/* continuation for OP_EXECUTE_INLINE */ 9411 9412 /* 9413 * Extract args, call function. 9414 * r0 = #of args (0-4) 9415 * r10 = call index 9416 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9417 * 9418 * Other ideas: 9419 * - Use a jump table from the main piece to jump directly into the 9420 * AND/LDR pairs. Costs a data load, saves a branch. 9421 * - Have five separate pieces that do the loading, so we can work the 9422 * interleave a little better. Increases code size. 9423 */ 9424.LOP_EXECUTE_INLINE_continue: 9425 rsb r0, r0, #4 @ r0<- 4-r0 9426 FETCH(r9, 2) @ r9<- FEDC 9427 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9428 bl common_abort @ (skipped due to ARM prefetch) 94294: and ip, r9, #0xf000 @ isolate F 9430 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 94313: and ip, r9, #0x0f00 @ isolate E 9432 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 94332: and ip, r9, #0x00f0 @ isolate D 9434 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 94351: and ip, r9, #0x000f @ isolate C 9436 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 94370: 9438 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9439 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9440 @ (not reached) 9441 9442.LOP_EXECUTE_INLINE_table: 9443 .word gDvmInlineOpsTable 9444 9445/* continuation for OP_EXECUTE_INLINE_RANGE */ 9446 9447 /* 9448 * Extract args, call function. 9449 * r0 = #of args (0-4) 9450 * r10 = call index 9451 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9452 */ 9453.LOP_EXECUTE_INLINE_RANGE_continue: 9454 rsb r0, r0, #4 @ r0<- 4-r0 9455 FETCH(r9, 2) @ r9<- CCCC 9456 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9457 bl common_abort @ (skipped due to ARM prefetch) 94584: add ip, r9, #3 @ base+3 9459 GET_VREG(r3, ip) @ r3<- vBase[3] 94603: add ip, r9, #2 @ base+2 9461 GET_VREG(r2, ip) @ r2<- vBase[2] 94622: add ip, r9, #1 @ base+1 9463 GET_VREG(r1, ip) @ r1<- vBase[1] 94641: add ip, r9, #0 @ (nop) 9465 GET_VREG(r0, ip) @ r0<- vBase[0] 94660: 9467 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9468 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9469 @ (not reached) 9470 9471.LOP_EXECUTE_INLINE_RANGE_table: 9472 .word gDvmInlineOpsTable 9473 9474 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9475 .global dvmAsmSisterEnd 9476dvmAsmSisterEnd: 9477 9478/* File: armv5te/footer.S */ 9479 9480/* 9481 * =========================================================================== 9482 * Common subroutines and data 9483 * =========================================================================== 9484 */ 9485 9486 9487 9488 .text 9489 .align 2 9490 9491#if defined(WITH_JIT) 9492#if defined(WITH_SELF_VERIFICATION) 9493 .global dvmJitToInterpPunt 9494dvmJitToInterpPunt: 9495 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9496 mov r2,#kSVSPunt @ r2<- interpreter entry point 9497 mov r3, #0 9498 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9499 b jitSVShadowRunEnd @ doesn't return 9500 9501 .global dvmJitToInterpSingleStep 9502dvmJitToInterpSingleStep: 9503 str lr,[rGLUE,#offGlue_jitResumeNPC] 9504 str r1,[rGLUE,#offGlue_jitResumeDPC] 9505 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9506 b jitSVShadowRunEnd @ doesn't return 9507 9508 .global dvmJitToInterpTraceSelectNoChain 9509dvmJitToInterpTraceSelectNoChain: 9510 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9511 mov r0,rPC @ pass our target PC 9512 mov r2,#kSVSTraceSelectNoChain @ r2<- interpreter entry point 9513 mov r3, #0 9514 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9515 b jitSVShadowRunEnd @ doesn't return 9516 9517 .global dvmJitToInterpTraceSelect 9518dvmJitToInterpTraceSelect: 9519 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9520 ldr r0,[lr, #-1] @ pass our target PC 9521 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9522 mov r3, #0 9523 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9524 b jitSVShadowRunEnd @ doesn't return 9525 9526 .global dvmJitToInterpBackwardBranch 9527dvmJitToInterpBackwardBranch: 9528 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9529 ldr r0,[lr, #-1] @ pass our target PC 9530 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9531 mov r3, #0 9532 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9533 b jitSVShadowRunEnd @ doesn't return 9534 9535 .global dvmJitToInterpNormal 9536dvmJitToInterpNormal: 9537 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9538 ldr r0,[lr, #-1] @ pass our target PC 9539 mov r2,#kSVSNormal @ r2<- interpreter entry point 9540 mov r3, #0 9541 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9542 b jitSVShadowRunEnd @ doesn't return 9543 9544 .global dvmJitToInterpNoChain 9545dvmJitToInterpNoChain: 9546 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9547 mov r0,rPC @ pass our target PC 9548 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9549 mov r3, #0 9550 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9551 b jitSVShadowRunEnd @ doesn't return 9552#else 9553/* 9554 * Return from the translation cache to the interpreter when the compiler is 9555 * having issues translating/executing a Dalvik instruction. We have to skip 9556 * the code cache lookup otherwise it is possible to indefinitely bouce 9557 * between the interpreter and the code cache if the instruction that fails 9558 * to be compiled happens to be at a trace start. 9559 */ 9560 .global dvmJitToInterpPunt 9561dvmJitToInterpPunt: 9562 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9563 mov rPC, r0 9564#if defined(WITH_JIT_TUNING) 9565 mov r0,lr 9566 bl dvmBumpPunt; 9567#endif 9568 EXPORT_PC() 9569 mov r0, #0 9570 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9571 adrl rIBASE, dvmAsmInstructionStart 9572 FETCH_INST() 9573 GET_INST_OPCODE(ip) 9574 GOTO_OPCODE(ip) 9575 9576/* 9577 * Return to the interpreter to handle a single instruction. 9578 * On entry: 9579 * r0 <= PC 9580 * r1 <= PC of resume instruction 9581 * lr <= resume point in translation 9582 */ 9583 .global dvmJitToInterpSingleStep 9584dvmJitToInterpSingleStep: 9585 str lr,[rGLUE,#offGlue_jitResumeNPC] 9586 str r1,[rGLUE,#offGlue_jitResumeDPC] 9587 mov r1,#kInterpEntryInstr 9588 @ enum is 4 byte in aapcs-EABI 9589 str r1, [rGLUE, #offGlue_entryPoint] 9590 mov rPC,r0 9591 EXPORT_PC() 9592 9593 adrl rIBASE, dvmAsmInstructionStart 9594 mov r2,#kJitSingleStep @ Ask for single step and then revert 9595 str r2,[rGLUE,#offGlue_jitState] 9596 mov r1,#1 @ set changeInterp to bail to debug interp 9597 b common_gotoBail 9598 9599/* 9600 * Return from the translation cache and immediately request 9601 * a translation for the exit target. Commonly used for callees. 9602 */ 9603 .global dvmJitToInterpTraceSelectNoChain 9604dvmJitToInterpTraceSelectNoChain: 9605#if defined(WITH_JIT_TUNING) 9606 bl dvmBumpNoChain 9607#endif 9608 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9609 mov r0,rPC 9610 bl dvmJitGetCodeAddr @ Is there a translation? 9611 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9612 mov r1, rPC @ arg1 of translation may need this 9613 mov lr, #0 @ in case target is HANDLER_INTERPRET 9614 cmp r0,#0 9615 bxne r0 @ continue native execution if so 9616 b 2f 9617 9618/* 9619 * Return from the translation cache and immediately request 9620 * a translation for the exit target. Commonly used following 9621 * invokes. 9622 */ 9623 .global dvmJitToInterpTraceSelect 9624dvmJitToInterpTraceSelect: 9625 ldr rPC,[lr, #-1] @ get our target PC 9626 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9627 add rINST,lr,#-5 @ save start of chain branch 9628 add rINST, #-4 @ .. which is 9 bytes back 9629 mov r0,rPC 9630 bl dvmJitGetCodeAddr @ Is there a translation? 9631 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9632 cmp r0,#0 9633 beq 2f 9634 mov r1,rINST 9635 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9636 mov r1, rPC @ arg1 of translation may need this 9637 mov lr, #0 @ in case target is HANDLER_INTERPRET 9638 cmp r0,#0 @ successful chain? 9639 bxne r0 @ continue native execution 9640 b toInterpreter @ didn't chain - resume with interpreter 9641 9642/* No translation, so request one if profiling isn't disabled*/ 96432: 9644 adrl rIBASE, dvmAsmInstructionStart 9645 GET_JIT_PROF_TABLE(r0) 9646 FETCH_INST() 9647 cmp r0, #0 9648 movne r2,#kJitTSelectRequestHot @ ask for trace selection 9649 bne common_selectTrace 9650 GET_INST_OPCODE(ip) 9651 GOTO_OPCODE(ip) 9652 9653/* 9654 * Return from the translation cache to the interpreter. 9655 * The return was done with a BLX from thumb mode, and 9656 * the following 32-bit word contains the target rPC value. 9657 * Note that lr (r14) will have its low-order bit set to denote 9658 * its thumb-mode origin. 9659 * 9660 * We'll need to stash our lr origin away, recover the new 9661 * target and then check to see if there is a translation available 9662 * for our new target. If so, we do a translation chain and 9663 * go back to native execution. Otherwise, it's back to the 9664 * interpreter (after treating this entry as a potential 9665 * trace start). 9666 */ 9667 .global dvmJitToInterpNormal 9668dvmJitToInterpNormal: 9669 ldr rPC,[lr, #-1] @ get our target PC 9670 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9671 add rINST,lr,#-5 @ save start of chain branch 9672 add rINST,#-4 @ .. which is 9 bytes back 9673#if defined(WITH_JIT_TUNING) 9674 bl dvmBumpNormal 9675#endif 9676 mov r0,rPC 9677 bl dvmJitGetCodeAddr @ Is there a translation? 9678 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9679 cmp r0,#0 9680 beq toInterpreter @ go if not, otherwise do chain 9681 mov r1,rINST 9682 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9683 mov r1, rPC @ arg1 of translation may need this 9684 mov lr, #0 @ in case target is HANDLER_INTERPRET 9685 cmp r0,#0 @ successful chain? 9686 bxne r0 @ continue native execution 9687 b toInterpreter @ didn't chain - resume with interpreter 9688 9689/* 9690 * Return from the translation cache to the interpreter to do method invocation. 9691 * Check if translation exists for the callee, but don't chain to it. 9692 */ 9693 .global dvmJitToInterpNoChain 9694dvmJitToInterpNoChain: 9695#if defined(WITH_JIT_TUNING) 9696 bl dvmBumpNoChain 9697#endif 9698 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9699 mov r0,rPC 9700 bl dvmJitGetCodeAddr @ Is there a translation? 9701 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9702 mov r1, rPC @ arg1 of translation may need this 9703 mov lr, #0 @ in case target is HANDLER_INTERPRET 9704 cmp r0,#0 9705 bxne r0 @ continue native execution if so 9706#endif 9707 9708/* 9709 * No translation, restore interpreter regs and start interpreting. 9710 * rGLUE & rFP were preserved in the translated code, and rPC has 9711 * already been restored by the time we get here. We'll need to set 9712 * up rIBASE & rINST, and load the address of the JitTable into r0. 9713 */ 9714toInterpreter: 9715 EXPORT_PC() 9716 adrl rIBASE, dvmAsmInstructionStart 9717 FETCH_INST() 9718 GET_JIT_PROF_TABLE(r0) 9719 @ NOTE: intended fallthrough 9720/* 9721 * Common code to update potential trace start counter, and initiate 9722 * a trace-build if appropriate. On entry, rPC should point to the 9723 * next instruction to execute, and rINST should be already loaded with 9724 * the next opcode word, and r0 holds a pointer to the jit profile 9725 * table (pJitProfTable). 9726 */ 9727common_testUpdateProfile: 9728 cmp r0,#0 9729 GET_INST_OPCODE(ip) 9730 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9731 9732common_updateProfile: 9733 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9734 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 9735 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 9736 GET_INST_OPCODE(ip) 9737 subs r1,r1,#1 @ decrement counter 9738 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 9739 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9740 9741/* 9742 * Here, we switch to the debug interpreter to request 9743 * trace selection. First, though, check to see if there 9744 * is already a native translation in place (and, if so, 9745 * jump to it now). 9746 */ 9747 GET_JIT_THRESHOLD(r1) 9748 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9749 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 9750 EXPORT_PC() 9751 mov r0,rPC 9752 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9753 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9754 mov r1, rPC @ arg1 of translation may need this 9755 mov lr, #0 @ in case target is HANDLER_INTERPRET 9756 cmp r0,#0 9757#if !defined(WITH_SELF_VERIFICATION) 9758 bxne r0 @ jump to the translation 9759 mov r2,#kJitTSelectRequest @ ask for trace selection 9760 @ fall-through to common_selectTrace 9761#else 9762 moveq r2,#kJitTSelectRequest @ ask for trace selection 9763 beq common_selectTrace 9764 /* 9765 * At this point, we have a target translation. However, if 9766 * that translation is actually the interpret-only pseudo-translation 9767 * we want to treat it the same as no translation. 9768 */ 9769 mov r10, r0 @ save target 9770 bl dvmCompilerGetInterpretTemplate 9771 cmp r0, r10 @ special case? 9772 bne jitSVShadowRunStart @ set up self verification shadow space 9773 GET_INST_OPCODE(ip) 9774 GOTO_OPCODE(ip) 9775 /* no return */ 9776#endif 9777 9778/* 9779 * On entry: 9780 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 9781 */ 9782common_selectTrace: 9783 str r2,[rGLUE,#offGlue_jitState] 9784 mov r2,#kInterpEntryInstr @ normal entry reason 9785 str r2,[rGLUE,#offGlue_entryPoint] 9786 mov r1,#1 @ set changeInterp 9787 b common_gotoBail 9788 9789#if defined(WITH_SELF_VERIFICATION) 9790/* 9791 * Save PC and registers to shadow memory for self verification mode 9792 * before jumping to native translation. 9793 * On entry: 9794 * rPC, rFP, rGLUE: the values that they should contain 9795 * r10: the address of the target translation. 9796 */ 9797jitSVShadowRunStart: 9798 mov r0,rPC @ r0<- program counter 9799 mov r1,rFP @ r1<- frame pointer 9800 mov r2,rGLUE @ r2<- InterpState pointer 9801 mov r3,r10 @ r3<- target translation 9802 bl dvmSelfVerificationSaveState @ save registers to shadow space 9803 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9804 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9805 bx r10 @ jump to the translation 9806 9807/* 9808 * Restore PC, registers, and interpState to original values 9809 * before jumping back to the interpreter. 9810 */ 9811jitSVShadowRunEnd: 9812 mov r1,rFP @ pass ending fp 9813 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9814 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9815 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9816 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9817 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9818 cmp r1,#0 @ check for punt condition 9819 beq 1f 9820 mov r2,#kJitSelfVerification @ ask for self verification 9821 str r2,[rGLUE,#offGlue_jitState] 9822 mov r2,#kInterpEntryInstr @ normal entry reason 9823 str r2,[rGLUE,#offGlue_entryPoint] 9824 mov r1,#1 @ set changeInterp 9825 b common_gotoBail 9826 98271: @ exit to interpreter without check 9828 EXPORT_PC() 9829 adrl rIBASE, dvmAsmInstructionStart 9830 FETCH_INST() 9831 GET_INST_OPCODE(ip) 9832 GOTO_OPCODE(ip) 9833#endif 9834 9835#endif 9836 9837/* 9838 * Common code when a backward branch is taken. 9839 * 9840 * TODO: we could avoid a branch by just setting r0 and falling through 9841 * into the common_periodicChecks code, and having a test on r0 at the 9842 * end determine if we should return to the caller or update & branch to 9843 * the next instr. 9844 * 9845 * On entry: 9846 * r9 is PC adjustment *in bytes* 9847 */ 9848common_backwardBranch: 9849 mov r0, #kInterpEntryInstr 9850 bl common_periodicChecks 9851#if defined(WITH_JIT) 9852 GET_JIT_PROF_TABLE(r0) 9853 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9854 cmp r0,#0 9855 bne common_updateProfile 9856 GET_INST_OPCODE(ip) 9857 GOTO_OPCODE(ip) 9858#else 9859 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9860 GET_INST_OPCODE(ip) @ extract opcode from rINST 9861 GOTO_OPCODE(ip) @ jump to next instruction 9862#endif 9863 9864 9865/* 9866 * Need to see if the thread needs to be suspended or debugger/profiler 9867 * activity has begun. If so, we suspend the thread or side-exit to 9868 * the debug interpreter as appropriate. 9869 * 9870 * The common case is no activity on any of these, so we want to figure 9871 * that out quickly. If something is up, we can then sort out what. 9872 * 9873 * We want to be fast if the VM was built without debugger or profiler 9874 * support, but we also need to recognize that the system is usually 9875 * shipped with both of these enabled. 9876 * 9877 * TODO: reduce this so we're just checking a single location. 9878 * 9879 * On entry: 9880 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 9881 * r9 is trampoline PC adjustment *in bytes* 9882 */ 9883common_periodicChecks: 9884 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9885 9886#if defined(WITH_DEBUGGER) 9887 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9888#endif 9889#if defined(WITH_PROFILER) 9890 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9891#endif 9892 9893 ldr ip, [r3] @ ip<- suspendCount (int) 9894 9895#if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9896 cmp r1, #0 @ debugger enabled? 9897 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9898 ldr r2, [r2] @ r2<- activeProfilers (int) 9899 orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive 9900 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 9901#elif defined(WITH_DEBUGGER) 9902 cmp r1, #0 @ debugger enabled? 9903 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9904 orrsne ip, ip, r1 @ yes, ip<- suspend | debugger; set Z 9905 @ (if not enabled, Z was set by test for r1==0, which is what we want) 9906#elif defined (WITH_PROFILER) 9907 ldr r2, [r2] @ r2<- activeProfilers (int) 9908 orrs ip, ip, r2 @ ip<- suspendCount | activeProfilers 9909#else 9910 cmp ip, #0 @ not ORing anything in; set Z 9911#endif 9912 9913 bxeq lr @ all zero, return 9914 9915 /* 9916 * One or more interesting events have happened. Figure out what. 9917 * 9918 * If debugging or profiling are compiled in, we need to disambiguate. 9919 * 9920 * r0 still holds the reentry type. 9921 */ 9922#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9923 ldr ip, [r3] @ ip<- suspendCount (int) 9924 cmp ip, #0 @ want suspend? 9925 beq 1f @ no, must be debugger/profiler 9926#endif 9927 9928 stmfd sp!, {r0, lr} @ preserve r0 and lr 9929#if defined(WITH_JIT) 9930 /* 9931 * Refresh the Jit's cached copy of profile table pointer. This pointer 9932 * doubles as the Jit's on/off switch. 9933 */ 9934 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 9935 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9936 ldr r3, [r3] @ r3 <- pJitProfTable 9937 EXPORT_PC() @ need for precise GC 9938 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 9939#else 9940 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9941 EXPORT_PC() @ need for precise GC 9942#endif 9943 bl dvmCheckSuspendPending @ do full check, suspend if necessary 9944 ldmfd sp!, {r0, lr} @ restore r0 and lr 9945 9946#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9947 9948 /* 9949 * Reload the debugger/profiler enable flags. We're checking to see 9950 * if either of these got set while we were suspended. 9951 * 9952 * We can't really avoid the #ifdefs here, because the fields don't 9953 * exist when the feature is disabled. 9954 */ 9955#if defined(WITH_DEBUGGER) 9956 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9957 cmp r1, #0 @ debugger enabled? 9958 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 9959#else 9960 mov r1, #0 9961#endif 9962#if defined(WITH_PROFILER) 9963 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9964 ldr r2, [r2] @ r2<- activeProfilers (int) 9965#else 9966 mov r2, #0 9967#endif 9968 9969 orrs r1, r1, r2 9970 beq 2f 9971 99721: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 9973 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 9974 add rPC, rPC, r9 @ update rPC 9975 mov r1, #1 @ "want switch" = true 9976 b common_gotoBail @ side exit 9977 9978#endif /*WITH_DEBUGGER || WITH_PROFILER*/ 9979 99802: 9981 bx lr @ nothing to do, return 9982 9983 9984/* 9985 * The equivalent of "goto bail", this calls through the "bail handler". 9986 * 9987 * State registers will be saved to the "glue" area before bailing. 9988 * 9989 * On entry: 9990 * r1 is "bool changeInterp", indicating if we want to switch to the 9991 * other interpreter or just bail all the way out 9992 */ 9993common_gotoBail: 9994 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9995 mov r0, rGLUE @ r0<- glue ptr 9996 b dvmMterpStdBail @ call(glue, changeInterp) 9997 9998 @add r1, r1, #1 @ using (boolean+1) 9999 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 10000 @bl _longjmp @ does not return 10001 @bl common_abort 10002 10003 10004/* 10005 * Common code for method invocation with range. 10006 * 10007 * On entry: 10008 * r0 is "Method* methodToCall", the method we're trying to call 10009 */ 10010common_invokeMethodRange: 10011.LinvokeNewRange: 10012 @ prepare to copy args to "outs" area of current frame 10013 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 10014 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10015 beq .LinvokeArgsDone @ if no args, skip the rest 10016 FETCH(r1, 2) @ r1<- CCCC 10017 10018 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 10019 @ (very few methods have > 10 args; could unroll for common cases) 10020 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 10021 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 10022 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 100231: ldr r1, [r3], #4 @ val = *fp++ 10024 subs r2, r2, #1 @ count-- 10025 str r1, [r10], #4 @ *outs++ = val 10026 bne 1b @ ...while count != 0 10027 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10028 b .LinvokeArgsDone 10029 10030/* 10031 * Common code for method invocation without range. 10032 * 10033 * On entry: 10034 * r0 is "Method* methodToCall", the method we're trying to call 10035 */ 10036common_invokeMethodNoRange: 10037.LinvokeNewNoRange: 10038 @ prepare to copy args to "outs" area of current frame 10039 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 10040 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10041 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 10042 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 10043 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10044 beq .LinvokeArgsDone 10045 10046 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 10047.LinvokeNonRange: 10048 rsb r2, r2, #5 @ r2<- 5-r2 10049 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 10050 bl common_abort @ (skipped due to ARM prefetch) 100515: and ip, rINST, #0x0f00 @ isolate A 10052 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 10053 mov r0, r0 @ nop 10054 str r2, [r10, #-4]! @ *--outs = vA 100554: and ip, r1, #0xf000 @ isolate G 10056 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 10057 mov r0, r0 @ nop 10058 str r2, [r10, #-4]! @ *--outs = vG 100593: and ip, r1, #0x0f00 @ isolate F 10060 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 10061 mov r0, r0 @ nop 10062 str r2, [r10, #-4]! @ *--outs = vF 100632: and ip, r1, #0x00f0 @ isolate E 10064 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 10065 mov r0, r0 @ nop 10066 str r2, [r10, #-4]! @ *--outs = vE 100671: and ip, r1, #0x000f @ isolate D 10068 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 10069 mov r0, r0 @ nop 10070 str r2, [r10, #-4]! @ *--outs = vD 100710: @ fall through to .LinvokeArgsDone 10072 10073.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 10074 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 10075 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 10076 @ find space for the new stack frame, check for overflow 10077 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 10078 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 10079 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 10080@ bl common_dumpRegs 10081 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 10082 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 10083 cmp r3, r9 @ bottom < interpStackEnd? 10084 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 10085 blo .LstackOverflow @ yes, this frame will overflow stack 10086 10087 @ set up newSaveArea 10088#ifdef EASY_GDB 10089 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 10090 str ip, [r10, #offStackSaveArea_prevSave] 10091#endif 10092 str rFP, [r10, #offStackSaveArea_prevFrame] 10093 str rPC, [r10, #offStackSaveArea_savedPc] 10094#if defined(WITH_JIT) 10095 mov r9, #0 10096 str r9, [r10, #offStackSaveArea_returnAddr] 10097#endif 10098 str r0, [r10, #offStackSaveArea_method] 10099 tst r3, #ACC_NATIVE 10100 bne .LinvokeNative 10101 10102 /* 10103 stmfd sp!, {r0-r3} 10104 bl common_printNewline 10105 mov r0, rFP 10106 mov r1, #0 10107 bl dvmDumpFp 10108 ldmfd sp!, {r0-r3} 10109 stmfd sp!, {r0-r3} 10110 mov r0, r1 10111 mov r1, r10 10112 bl dvmDumpFp 10113 bl common_printNewline 10114 ldmfd sp!, {r0-r3} 10115 */ 10116 10117 ldrh r9, [r2] @ r9 <- load INST from new PC 10118 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10119 mov rPC, r2 @ publish new rPC 10120 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10121 10122 @ Update "glue" values for the new method 10123 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10124 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10125 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10126#if defined(WITH_JIT) 10127 GET_JIT_PROF_TABLE(r0) 10128 mov rFP, r1 @ fp = newFp 10129 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10130 mov rINST, r9 @ publish new rINST 10131 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10132 cmp r0,#0 10133 bne common_updateProfile 10134 GOTO_OPCODE(ip) @ jump to next instruction 10135#else 10136 mov rFP, r1 @ fp = newFp 10137 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10138 mov rINST, r9 @ publish new rINST 10139 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10140 GOTO_OPCODE(ip) @ jump to next instruction 10141#endif 10142 10143.LinvokeNative: 10144 @ Prep for the native call 10145 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10146 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10147 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10148 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10149 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10150 mov r9, r3 @ r9<- glue->self (preserve) 10151 10152 mov r2, r0 @ r2<- methodToCall 10153 mov r0, r1 @ r0<- newFp (points to args) 10154 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10155 10156#ifdef ASSIST_DEBUGGER 10157 /* insert fake function header to help gdb find the stack frame */ 10158 b .Lskip 10159 .type dalvik_mterp, %function 10160dalvik_mterp: 10161 .fnstart 10162 MTERP_ENTRY1 10163 MTERP_ENTRY2 10164.Lskip: 10165#endif 10166 10167 @mov lr, pc @ set return addr 10168 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10169 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10170 10171#if defined(WITH_JIT) 10172 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10173#endif 10174 10175 @ native return; r9=self, r10=newSaveArea 10176 @ equivalent to dvmPopJniLocals 10177 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10178 ldr r1, [r9, #offThread_exception] @ check for exception 10179#if defined(WITH_JIT) 10180 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10181#endif 10182 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10183 cmp r1, #0 @ null? 10184 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10185#if defined(WITH_JIT) 10186 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10187#endif 10188 bne common_exceptionThrown @ no, handle exception 10189 10190 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10191 GET_INST_OPCODE(ip) @ extract opcode from rINST 10192 GOTO_OPCODE(ip) @ jump to next instruction 10193 10194.LstackOverflow: @ r0=methodToCall 10195 mov r1, r0 @ r1<- methodToCall 10196 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10197 bl dvmHandleStackOverflow 10198 b common_exceptionThrown 10199#ifdef ASSIST_DEBUGGER 10200 .fnend 10201#endif 10202 10203 10204 /* 10205 * Common code for method invocation, calling through "glue code". 10206 * 10207 * TODO: now that we have range and non-range invoke handlers, this 10208 * needs to be split into two. Maybe just create entry points 10209 * that set r9 and jump here? 10210 * 10211 * On entry: 10212 * r0 is "Method* methodToCall", the method we're trying to call 10213 * r9 is "bool methodCallRange", indicating if this is a /range variant 10214 */ 10215 .if 0 10216.LinvokeOld: 10217 sub sp, sp, #8 @ space for args + pad 10218 FETCH(ip, 2) @ ip<- FEDC or CCCC 10219 mov r2, r0 @ A2<- methodToCall 10220 mov r0, rGLUE @ A0<- glue 10221 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10222 mov r1, r9 @ A1<- methodCallRange 10223 mov r3, rINST, lsr #8 @ A3<- AA 10224 str ip, [sp, #0] @ A4<- ip 10225 bl dvmMterp_invokeMethod @ call the C invokeMethod 10226 add sp, sp, #8 @ remove arg area 10227 b common_resumeAfterGlueCall @ continue to next instruction 10228 .endif 10229 10230 10231 10232/* 10233 * Common code for handling a return instruction. 10234 * 10235 * This does not return. 10236 */ 10237common_returnFromMethod: 10238.LreturnNew: 10239 mov r0, #kInterpEntryReturn 10240 mov r9, #0 10241 bl common_periodicChecks 10242 10243 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10244 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10245 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10246 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10247 @ r2<- method we're returning to 10248 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10249 cmp r2, #0 @ is this a break frame? 10250 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10251 mov r1, #0 @ "want switch" = false 10252 beq common_gotoBail @ break frame, bail out completely 10253 10254 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10255 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10256 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10257 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10258#if defined(WITH_JIT) 10259 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10260 GET_JIT_PROF_TABLE(r0) 10261 mov rPC, r9 @ publish new rPC 10262 str r1, [rGLUE, #offGlue_methodClassDex] 10263 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10264 cmp r10, #0 @ caller is compiled code 10265 blxne r10 10266 GET_INST_OPCODE(ip) @ extract opcode from rINST 10267 cmp r0,#0 10268 bne common_updateProfile 10269 GOTO_OPCODE(ip) @ jump to next instruction 10270#else 10271 GET_INST_OPCODE(ip) @ extract opcode from rINST 10272 mov rPC, r9 @ publish new rPC 10273 str r1, [rGLUE, #offGlue_methodClassDex] 10274 GOTO_OPCODE(ip) @ jump to next instruction 10275#endif 10276 10277 /* 10278 * Return handling, calls through "glue code". 10279 */ 10280 .if 0 10281.LreturnOld: 10282 SAVE_PC_FP_TO_GLUE() @ export state 10283 mov r0, rGLUE @ arg to function 10284 bl dvmMterp_returnFromMethod 10285 b common_resumeAfterGlueCall 10286 .endif 10287 10288 10289/* 10290 * Somebody has thrown an exception. Handle it. 10291 * 10292 * If the exception processing code returns to us (instead of falling 10293 * out of the interpreter), continue with whatever the next instruction 10294 * now happens to be. 10295 * 10296 * This does not return. 10297 */ 10298 .global dvmMterpCommonExceptionThrown 10299dvmMterpCommonExceptionThrown: 10300common_exceptionThrown: 10301.LexceptionNew: 10302 mov r0, #kInterpEntryThrow 10303 mov r9, #0 10304 bl common_periodicChecks 10305 10306 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10307 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10308 mov r1, r10 @ r1<- self 10309 mov r0, r9 @ r0<- exception 10310 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10311 mov r3, #0 @ r3<- NULL 10312 str r3, [r10, #offThread_exception] @ self->exception = NULL 10313 10314 /* set up args and a local for "&fp" */ 10315 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10316 str rFP, [sp, #-4]! @ *--sp = fp 10317 mov ip, sp @ ip<- &fp 10318 mov r3, #0 @ r3<- false 10319 str ip, [sp, #-4]! @ *--sp = &fp 10320 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10321 mov r0, r10 @ r0<- self 10322 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10323 mov r2, r9 @ r2<- exception 10324 sub r1, rPC, r1 @ r1<- pc - method->insns 10325 mov r1, r1, asr #1 @ r1<- offset in code units 10326 10327 /* call, r0 gets catchRelPc (a code-unit offset) */ 10328 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10329 10330 /* fix earlier stack overflow if necessary; may trash rFP */ 10331 ldrb r1, [r10, #offThread_stackOverflowed] 10332 cmp r1, #0 @ did we overflow earlier? 10333 beq 1f @ no, skip ahead 10334 mov rFP, r0 @ save relPc result in rFP 10335 mov r0, r10 @ r0<- self 10336 mov r1, r9 @ r1<- exception 10337 bl dvmCleanupStackOverflow @ call(self) 10338 mov r0, rFP @ restore result 103391: 10340 10341 /* update frame pointer and check result from dvmFindCatchBlock */ 10342 ldr rFP, [sp, #4] @ retrieve the updated rFP 10343 cmp r0, #0 @ is catchRelPc < 0? 10344 add sp, sp, #8 @ restore stack 10345 bmi .LnotCaughtLocally 10346 10347 /* adjust locals to match self->curFrame and updated PC */ 10348 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10349 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10350 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10351 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10352 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10353 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10354 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10355 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10356 10357 /* release the tracked alloc on the exception */ 10358 mov r0, r9 @ r0<- exception 10359 mov r1, r10 @ r1<- self 10360 bl dvmReleaseTrackedAlloc @ release the exception 10361 10362 /* restore the exception if the handler wants it */ 10363 FETCH_INST() @ load rINST from rPC 10364 GET_INST_OPCODE(ip) @ extract opcode from rINST 10365 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10366 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10367 GOTO_OPCODE(ip) @ jump to next instruction 10368 10369.LnotCaughtLocally: @ r9=exception, r10=self 10370 /* fix stack overflow if necessary */ 10371 ldrb r1, [r10, #offThread_stackOverflowed] 10372 cmp r1, #0 @ did we overflow earlier? 10373 movne r0, r10 @ if yes: r0<- self 10374 movne r1, r9 @ if yes: r1<- exception 10375 blne dvmCleanupStackOverflow @ if yes: call(self) 10376 10377 @ may want to show "not caught locally" debug messages here 10378#if DVM_SHOW_EXCEPTION >= 2 10379 /* call __android_log_print(prio, tag, format, ...) */ 10380 /* "Exception %s from %s:%d not caught locally" */ 10381 @ dvmLineNumFromPC(method, pc - method->insns) 10382 ldr r0, [rGLUE, #offGlue_method] 10383 ldr r1, [r0, #offMethod_insns] 10384 sub r1, rPC, r1 10385 asr r1, r1, #1 10386 bl dvmLineNumFromPC 10387 str r0, [sp, #-4]! 10388 @ dvmGetMethodSourceFile(method) 10389 ldr r0, [rGLUE, #offGlue_method] 10390 bl dvmGetMethodSourceFile 10391 str r0, [sp, #-4]! 10392 @ exception->clazz->descriptor 10393 ldr r3, [r9, #offObject_clazz] 10394 ldr r3, [r3, #offClassObject_descriptor] 10395 @ 10396 ldr r2, strExceptionNotCaughtLocally 10397 ldr r1, strLogTag 10398 mov r0, #3 @ LOG_DEBUG 10399 bl __android_log_print 10400#endif 10401 str r9, [r10, #offThread_exception] @ restore exception 10402 mov r0, r9 @ r0<- exception 10403 mov r1, r10 @ r1<- self 10404 bl dvmReleaseTrackedAlloc @ release the exception 10405 mov r1, #0 @ "want switch" = false 10406 b common_gotoBail @ bail out 10407 10408 10409 /* 10410 * Exception handling, calls through "glue code". 10411 */ 10412 .if 0 10413.LexceptionOld: 10414 SAVE_PC_FP_TO_GLUE() @ export state 10415 mov r0, rGLUE @ arg to function 10416 bl dvmMterp_exceptionThrown 10417 b common_resumeAfterGlueCall 10418 .endif 10419 10420 10421/* 10422 * After returning from a "glued" function, pull out the updated 10423 * values and start executing at the next instruction. 10424 */ 10425common_resumeAfterGlueCall: 10426 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10427 FETCH_INST() @ load rINST from rPC 10428 GET_INST_OPCODE(ip) @ extract opcode from rINST 10429 GOTO_OPCODE(ip) @ jump to next instruction 10430 10431/* 10432 * Invalid array index. 10433 */ 10434common_errArrayIndex: 10435 EXPORT_PC() 10436 ldr r0, strArrayIndexException 10437 mov r1, #0 10438 bl dvmThrowException 10439 b common_exceptionThrown 10440 10441/* 10442 * Invalid array value. 10443 */ 10444common_errArrayStore: 10445 EXPORT_PC() 10446 ldr r0, strArrayStoreException 10447 mov r1, #0 10448 bl dvmThrowException 10449 b common_exceptionThrown 10450 10451/* 10452 * Integer divide or mod by zero. 10453 */ 10454common_errDivideByZero: 10455 EXPORT_PC() 10456 ldr r0, strArithmeticException 10457 ldr r1, strDivideByZero 10458 bl dvmThrowException 10459 b common_exceptionThrown 10460 10461/* 10462 * Attempt to allocate an array with a negative size. 10463 */ 10464common_errNegativeArraySize: 10465 EXPORT_PC() 10466 ldr r0, strNegativeArraySizeException 10467 mov r1, #0 10468 bl dvmThrowException 10469 b common_exceptionThrown 10470 10471/* 10472 * Invocation of a non-existent method. 10473 */ 10474common_errNoSuchMethod: 10475 EXPORT_PC() 10476 ldr r0, strNoSuchMethodError 10477 mov r1, #0 10478 bl dvmThrowException 10479 b common_exceptionThrown 10480 10481/* 10482 * We encountered a null object when we weren't expecting one. We 10483 * export the PC, throw a NullPointerException, and goto the exception 10484 * processing code. 10485 */ 10486common_errNullObject: 10487 EXPORT_PC() 10488 ldr r0, strNullPointerException 10489 mov r1, #0 10490 bl dvmThrowException 10491 b common_exceptionThrown 10492 10493/* 10494 * For debugging, cause an immediate fault. The source address will 10495 * be in lr (use a bl instruction to jump here). 10496 */ 10497common_abort: 10498 ldr pc, .LdeadFood 10499.LdeadFood: 10500 .word 0xdeadf00d 10501 10502/* 10503 * Spit out a "we were here", preserving all registers. (The attempt 10504 * to save ip won't work, but we need to save an even number of 10505 * registers for EABI 64-bit stack alignment.) 10506 */ 10507 .macro SQUEAK num 10508common_squeak\num: 10509 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10510 ldr r0, strSqueak 10511 mov r1, #\num 10512 bl printf 10513 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10514 bx lr 10515 .endm 10516 10517 SQUEAK 0 10518 SQUEAK 1 10519 SQUEAK 2 10520 SQUEAK 3 10521 SQUEAK 4 10522 SQUEAK 5 10523 10524/* 10525 * Spit out the number in r0, preserving registers. 10526 */ 10527common_printNum: 10528 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10529 mov r1, r0 10530 ldr r0, strSqueak 10531 bl printf 10532 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10533 bx lr 10534 10535/* 10536 * Print a newline, preserving registers. 10537 */ 10538common_printNewline: 10539 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10540 ldr r0, strNewline 10541 bl printf 10542 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10543 bx lr 10544 10545 /* 10546 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10547 */ 10548common_printHex: 10549 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10550 mov r1, r0 10551 ldr r0, strPrintHex 10552 bl printf 10553 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10554 bx lr 10555 10556/* 10557 * Print the 64-bit quantity in r0-r1, preserving registers. 10558 */ 10559common_printLong: 10560 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10561 mov r3, r1 10562 mov r2, r0 10563 ldr r0, strPrintLong 10564 bl printf 10565 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10566 bx lr 10567 10568/* 10569 * Print full method info. Pass the Method* in r0. Preserves regs. 10570 */ 10571common_printMethod: 10572 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10573 bl dvmMterpPrintMethod 10574 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10575 bx lr 10576 10577/* 10578 * Call a C helper function that dumps regs and possibly some 10579 * additional info. Requires the C function to be compiled in. 10580 */ 10581 .if 0 10582common_dumpRegs: 10583 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10584 bl dvmMterpDumpArmRegs 10585 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10586 bx lr 10587 .endif 10588 10589#if 0 10590/* 10591 * Experiment on VFP mode. 10592 * 10593 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10594 * 10595 * Updates the bits specified by "mask", setting them to the values in "val". 10596 */ 10597setFPSCR: 10598 and r0, r0, r1 @ make sure no stray bits are set 10599 fmrx r2, fpscr @ get VFP reg 10600 mvn r1, r1 @ bit-invert mask 10601 and r2, r2, r1 @ clear masked bits 10602 orr r2, r2, r0 @ set specified bits 10603 fmxr fpscr, r2 @ set VFP reg 10604 mov r0, r2 @ return new value 10605 bx lr 10606 10607 .align 2 10608 .global dvmConfigureFP 10609 .type dvmConfigureFP, %function 10610dvmConfigureFP: 10611 stmfd sp!, {ip, lr} 10612 /* 0x03000000 sets DN/FZ */ 10613 /* 0x00009f00 clears the six exception enable flags */ 10614 bl common_squeak0 10615 mov r0, #0x03000000 @ r0<- 0x03000000 10616 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10617 bl setFPSCR 10618 ldmfd sp!, {ip, pc} 10619#endif 10620 10621 10622/* 10623 * String references, must be close to the code that uses them. 10624 */ 10625 .align 2 10626strArithmeticException: 10627 .word .LstrArithmeticException 10628strArrayIndexException: 10629 .word .LstrArrayIndexException 10630strArrayStoreException: 10631 .word .LstrArrayStoreException 10632strDivideByZero: 10633 .word .LstrDivideByZero 10634strNegativeArraySizeException: 10635 .word .LstrNegativeArraySizeException 10636strNoSuchMethodError: 10637 .word .LstrNoSuchMethodError 10638strNullPointerException: 10639 .word .LstrNullPointerException 10640 10641strLogTag: 10642 .word .LstrLogTag 10643strExceptionNotCaughtLocally: 10644 .word .LstrExceptionNotCaughtLocally 10645 10646strNewline: 10647 .word .LstrNewline 10648strSqueak: 10649 .word .LstrSqueak 10650strPrintHex: 10651 .word .LstrPrintHex 10652strPrintLong: 10653 .word .LstrPrintLong 10654 10655/* 10656 * Zero-terminated ASCII string data. 10657 * 10658 * On ARM we have two choices: do like gcc does, and LDR from a .word 10659 * with the address, or use an ADR pseudo-op to get the address 10660 * directly. ADR saves 4 bytes and an indirection, but it's using a 10661 * PC-relative addressing mode and hence has a limited range, which 10662 * makes it not work well with mergeable string sections. 10663 */ 10664 .section .rodata.str1.4,"aMS",%progbits,1 10665 10666.LstrBadEntryPoint: 10667 .asciz "Bad entry point %d\n" 10668.LstrArithmeticException: 10669 .asciz "Ljava/lang/ArithmeticException;" 10670.LstrArrayIndexException: 10671 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10672.LstrArrayStoreException: 10673 .asciz "Ljava/lang/ArrayStoreException;" 10674.LstrClassCastException: 10675 .asciz "Ljava/lang/ClassCastException;" 10676.LstrDivideByZero: 10677 .asciz "divide by zero" 10678.LstrFilledNewArrayNotImpl: 10679 .asciz "filled-new-array only implemented for objects and 'int'" 10680.LstrInternalError: 10681 .asciz "Ljava/lang/InternalError;" 10682.LstrInstantiationError: 10683 .asciz "Ljava/lang/InstantiationError;" 10684.LstrNegativeArraySizeException: 10685 .asciz "Ljava/lang/NegativeArraySizeException;" 10686.LstrNoSuchMethodError: 10687 .asciz "Ljava/lang/NoSuchMethodError;" 10688.LstrNullPointerException: 10689 .asciz "Ljava/lang/NullPointerException;" 10690 10691.LstrLogTag: 10692 .asciz "mterp" 10693.LstrExceptionNotCaughtLocally: 10694 .asciz "Exception %s from %s:%d not caught locally\n" 10695 10696.LstrNewline: 10697 .asciz "\n" 10698.LstrSqueak: 10699 .asciz "<%d>" 10700.LstrPrintHex: 10701 .asciz "<0x%x>" 10702.LstrPrintLong: 10703 .asciz "<%lld>" 10704 10705