InterpAsm-armv5te-vfp.S revision 1da12167d913efde56ec3b40491524b051679f2c
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv5te-vfp'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188/* 189 * Null definition for overhead measuring purposes 190 */ 191#define GET_JIT_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitTable] 192#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 193#endif 194 195/* 196 * Convert a virtual register index into an address. 197 */ 198#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 199 add _reg, rFP, _vreg, lsl #2 200 201/* 202 * This is a #include, not a %include, because we want the C pre-processor 203 * to expand the macros into assembler assignment statements. 204 */ 205#include "../common/asm-constants.h" 206 207 208/* File: armv5te/platform.S */ 209/* 210 * =========================================================================== 211 * CPU-version-specific defines 212 * =========================================================================== 213 */ 214 215/* 216 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 217 * one-way branch. 218 * 219 * May modify IP. Does not modify LR. 220 */ 221.macro LDR_PC source 222 ldr pc, \source 223.endm 224 225/* 226 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 227 * Jump to subroutine. 228 * 229 * May modify IP and LR. 230 */ 231.macro LDR_PC_LR source 232 mov lr, pc 233 ldr pc, \source 234.endm 235 236/* 237 * Macro for "LDMFD SP!, {...regs...,PC}". 238 * 239 * May modify IP and LR. 240 */ 241.macro LDMFD_PC regs 242 ldmfd sp!, {\regs,pc} 243.endm 244 245 246/* File: armv5te/entry.S */ 247/* 248 * Copyright (C) 2008 The Android Open Source Project 249 * 250 * Licensed under the Apache License, Version 2.0 (the "License"); 251 * you may not use this file except in compliance with the License. 252 * You may obtain a copy of the License at 253 * 254 * http://www.apache.org/licenses/LICENSE-2.0 255 * 256 * Unless required by applicable law or agreed to in writing, software 257 * distributed under the License is distributed on an "AS IS" BASIS, 258 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 259 * See the License for the specific language governing permissions and 260 * limitations under the License. 261 */ 262/* 263 * Interpreter entry point. 264 */ 265 266/* 267 * We don't have formal stack frames, so gdb scans upward in the code 268 * to find the start of the function (a label with the %function type), 269 * and then looks at the next few instructions to figure out what 270 * got pushed onto the stack. From this it figures out how to restore 271 * the registers, including PC, for the previous stack frame. If gdb 272 * sees a non-function label, it stops scanning, so either we need to 273 * have nothing but assembler-local labels between the entry point and 274 * the break, or we need to fake it out. 275 * 276 * When this is defined, we add some stuff to make gdb less confused. 277 */ 278#define ASSIST_DEBUGGER 1 279 280 .text 281 .align 2 282 .global dvmMterpStdRun 283 .type dvmMterpStdRun, %function 284 285/* 286 * On entry: 287 * r0 MterpGlue* glue 288 * 289 * This function returns a boolean "changeInterp" value. The return comes 290 * via a call to dvmMterpStdBail(). 291 */ 292dvmMterpStdRun: 293#define MTERP_ENTRY1 \ 294 .save {r4-r10,fp,lr}; \ 295 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 296#define MTERP_ENTRY2 \ 297 .pad #4; \ 298 sub sp, sp, #4 @ align 64 299 300 .fnstart 301 MTERP_ENTRY1 302 MTERP_ENTRY2 303 304 /* save stack pointer, add magic word for debuggerd */ 305 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 306 307 /* set up "named" registers, figure out entry point */ 308 mov rGLUE, r0 @ set rGLUE 309 ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char 310 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 311 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 312 cmp r1, #kInterpEntryInstr @ usual case? 313 bne .Lnot_instr @ no, handle it 314 315#if defined(WITH_JIT) 316.Lno_singleStep: 317 /* Entry is always a possible trace start */ 318 GET_JIT_PROF_TABLE(r0) 319 FETCH_INST() 320 cmp r0,#0 321 bne common_updateProfile 322 GET_INST_OPCODE(ip) 323 GOTO_OPCODE(ip) 324#else 325 /* start executing the instruction at rPC */ 326 FETCH_INST() @ load rINST from rPC 327 GET_INST_OPCODE(ip) @ extract opcode from rINST 328 GOTO_OPCODE(ip) @ jump to next instruction 329#endif 330 331.Lnot_instr: 332 cmp r1, #kInterpEntryReturn @ were we returning from a method? 333 beq common_returnFromMethod 334 335.Lnot_return: 336 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 337 beq common_exceptionThrown 338 339#if defined(WITH_JIT) 340.Lnot_throw: 341 ldr r0,[rGLUE, #offGlue_jitResume] 342 ldr r2,[rGLUE, #offGlue_jitResumePC] 343 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 344 bne .Lbad_arg 345 cmp rPC,r2 346 bne .Lno_singleStep @ must have branched, don't resume 347 mov r1, #kInterpEntryInstr 348 strb r1, [rGLUE, #offGlue_entryPoint] 349 ldr rINST, .LdvmCompilerTemplate 350 bx r0 @ re-enter the translation 351.LdvmCompilerTemplate: 352 .word dvmCompilerTemplateStart 353#endif 354 355.Lbad_arg: 356 ldr r0, strBadEntryPoint 357 @ r1 holds value of entryPoint 358 bl printf 359 bl dvmAbort 360 .fnend 361 362 363 .global dvmMterpStdBail 364 .type dvmMterpStdBail, %function 365 366/* 367 * Restore the stack pointer and PC from the save point established on entry. 368 * This is essentially the same as a longjmp, but should be cheaper. The 369 * last instruction causes us to return to whoever called dvmMterpStdRun. 370 * 371 * We pushed some registers on the stack in dvmMterpStdRun, then saved 372 * SP and LR. Here we restore SP, restore the registers, and then restore 373 * LR to PC. 374 * 375 * On entry: 376 * r0 MterpGlue* glue 377 * r1 bool changeInterp 378 */ 379dvmMterpStdBail: 380 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 381 mov r0, r1 @ return the changeInterp value 382 add sp, sp, #4 @ un-align 64 383 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 384 385 386/* 387 * String references. 388 */ 389strBadEntryPoint: 390 .word .LstrBadEntryPoint 391 392 393 394 .global dvmAsmInstructionStart 395 .type dvmAsmInstructionStart, %function 396dvmAsmInstructionStart = .L_OP_NOP 397 .text 398 399/* ------------------------------ */ 400 .balign 64 401.L_OP_NOP: /* 0x00 */ 402/* File: armv5te/OP_NOP.S */ 403 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 404 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 405 GOTO_OPCODE(ip) @ execute it 406 407#ifdef ASSIST_DEBUGGER 408 /* insert fake function header to help gdb find the stack frame */ 409 .type dalvik_inst, %function 410dalvik_inst: 411 .fnstart 412 MTERP_ENTRY1 413 MTERP_ENTRY2 414 .fnend 415#endif 416 417 418/* ------------------------------ */ 419 .balign 64 420.L_OP_MOVE: /* 0x01 */ 421/* File: armv5te/OP_MOVE.S */ 422 /* for move, move-object, long-to-int */ 423 /* op vA, vB */ 424 mov r1, rINST, lsr #12 @ r1<- B from 15:12 425 mov r0, rINST, lsr #8 @ r0<- A from 11:8 426 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 427 GET_VREG(r2, r1) @ r2<- fp[B] 428 and r0, r0, #15 429 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 430 SET_VREG(r2, r0) @ fp[A]<- r2 431 GOTO_OPCODE(ip) @ execute next instruction 432 433 434/* ------------------------------ */ 435 .balign 64 436.L_OP_MOVE_FROM16: /* 0x02 */ 437/* File: armv5te/OP_MOVE_FROM16.S */ 438 /* for: move/from16, move-object/from16 */ 439 /* op vAA, vBBBB */ 440 FETCH(r1, 1) @ r1<- BBBB 441 mov r0, rINST, lsr #8 @ r0<- AA 442 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 443 GET_VREG(r2, r1) @ r2<- fp[BBBB] 444 GET_INST_OPCODE(ip) @ extract opcode from rINST 445 SET_VREG(r2, r0) @ fp[AA]<- r2 446 GOTO_OPCODE(ip) @ jump to next instruction 447 448 449/* ------------------------------ */ 450 .balign 64 451.L_OP_MOVE_16: /* 0x03 */ 452/* File: armv5te/OP_MOVE_16.S */ 453 /* for: move/16, move-object/16 */ 454 /* op vAAAA, vBBBB */ 455 FETCH(r1, 2) @ r1<- BBBB 456 FETCH(r0, 1) @ r0<- AAAA 457 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 458 GET_VREG(r2, r1) @ r2<- fp[BBBB] 459 GET_INST_OPCODE(ip) @ extract opcode from rINST 460 SET_VREG(r2, r0) @ fp[AAAA]<- r2 461 GOTO_OPCODE(ip) @ jump to next instruction 462 463 464/* ------------------------------ */ 465 .balign 64 466.L_OP_MOVE_WIDE: /* 0x04 */ 467/* File: armv5te/OP_MOVE_WIDE.S */ 468 /* move-wide vA, vB */ 469 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 470 mov r2, rINST, lsr #8 @ r2<- A(+) 471 mov r3, rINST, lsr #12 @ r3<- B 472 and r2, r2, #15 473 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 474 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 475 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 476 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 477 GET_INST_OPCODE(ip) @ extract opcode from rINST 478 stmia r2, {r0-r1} @ fp[A]<- r0/r1 479 GOTO_OPCODE(ip) @ jump to next instruction 480 481 482/* ------------------------------ */ 483 .balign 64 484.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 485/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 486 /* move-wide/from16 vAA, vBBBB */ 487 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 488 FETCH(r3, 1) @ r3<- BBBB 489 mov r2, rINST, lsr #8 @ r2<- AA 490 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 491 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 492 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 493 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 494 GET_INST_OPCODE(ip) @ extract opcode from rINST 495 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 496 GOTO_OPCODE(ip) @ jump to next instruction 497 498 499/* ------------------------------ */ 500 .balign 64 501.L_OP_MOVE_WIDE_16: /* 0x06 */ 502/* File: armv5te/OP_MOVE_WIDE_16.S */ 503 /* move-wide/16 vAAAA, vBBBB */ 504 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 505 FETCH(r3, 2) @ r3<- BBBB 506 FETCH(r2, 1) @ r2<- AAAA 507 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 508 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 509 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 510 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 511 GET_INST_OPCODE(ip) @ extract opcode from rINST 512 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 513 GOTO_OPCODE(ip) @ jump to next instruction 514 515 516/* ------------------------------ */ 517 .balign 64 518.L_OP_MOVE_OBJECT: /* 0x07 */ 519/* File: armv5te/OP_MOVE_OBJECT.S */ 520/* File: armv5te/OP_MOVE.S */ 521 /* for move, move-object, long-to-int */ 522 /* op vA, vB */ 523 mov r1, rINST, lsr #12 @ r1<- B from 15:12 524 mov r0, rINST, lsr #8 @ r0<- A from 11:8 525 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 526 GET_VREG(r2, r1) @ r2<- fp[B] 527 and r0, r0, #15 528 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 529 SET_VREG(r2, r0) @ fp[A]<- r2 530 GOTO_OPCODE(ip) @ execute next instruction 531 532 533 534/* ------------------------------ */ 535 .balign 64 536.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 537/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 538/* File: armv5te/OP_MOVE_FROM16.S */ 539 /* for: move/from16, move-object/from16 */ 540 /* op vAA, vBBBB */ 541 FETCH(r1, 1) @ r1<- BBBB 542 mov r0, rINST, lsr #8 @ r0<- AA 543 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 544 GET_VREG(r2, r1) @ r2<- fp[BBBB] 545 GET_INST_OPCODE(ip) @ extract opcode from rINST 546 SET_VREG(r2, r0) @ fp[AA]<- r2 547 GOTO_OPCODE(ip) @ jump to next instruction 548 549 550 551/* ------------------------------ */ 552 .balign 64 553.L_OP_MOVE_OBJECT_16: /* 0x09 */ 554/* File: armv5te/OP_MOVE_OBJECT_16.S */ 555/* File: armv5te/OP_MOVE_16.S */ 556 /* for: move/16, move-object/16 */ 557 /* op vAAAA, vBBBB */ 558 FETCH(r1, 2) @ r1<- BBBB 559 FETCH(r0, 1) @ r0<- AAAA 560 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 561 GET_VREG(r2, r1) @ r2<- fp[BBBB] 562 GET_INST_OPCODE(ip) @ extract opcode from rINST 563 SET_VREG(r2, r0) @ fp[AAAA]<- r2 564 GOTO_OPCODE(ip) @ jump to next instruction 565 566 567 568/* ------------------------------ */ 569 .balign 64 570.L_OP_MOVE_RESULT: /* 0x0a */ 571/* File: armv5te/OP_MOVE_RESULT.S */ 572 /* for: move-result, move-result-object */ 573 /* op vAA */ 574 mov r2, rINST, lsr #8 @ r2<- AA 575 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 576 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 577 GET_INST_OPCODE(ip) @ extract opcode from rINST 578 SET_VREG(r0, r2) @ fp[AA]<- r0 579 GOTO_OPCODE(ip) @ jump to next instruction 580 581 582/* ------------------------------ */ 583 .balign 64 584.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 585/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 586 /* move-result-wide vAA */ 587 mov r2, rINST, lsr #8 @ r2<- AA 588 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 589 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 590 ldmia r3, {r0-r1} @ r0/r1<- retval.j 591 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 592 GET_INST_OPCODE(ip) @ extract opcode from rINST 593 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 594 GOTO_OPCODE(ip) @ jump to next instruction 595 596 597/* ------------------------------ */ 598 .balign 64 599.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 600/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 601/* File: armv5te/OP_MOVE_RESULT.S */ 602 /* for: move-result, move-result-object */ 603 /* op vAA */ 604 mov r2, rINST, lsr #8 @ r2<- AA 605 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 606 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 607 GET_INST_OPCODE(ip) @ extract opcode from rINST 608 SET_VREG(r0, r2) @ fp[AA]<- r0 609 GOTO_OPCODE(ip) @ jump to next instruction 610 611 612 613/* ------------------------------ */ 614 .balign 64 615.L_OP_MOVE_EXCEPTION: /* 0x0d */ 616/* File: armv5te/OP_MOVE_EXCEPTION.S */ 617 /* move-exception vAA */ 618 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 619 mov r2, rINST, lsr #8 @ r2<- AA 620 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 621 mov r1, #0 @ r1<- 0 622 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 623 SET_VREG(r3, r2) @ fp[AA]<- exception obj 624 GET_INST_OPCODE(ip) @ extract opcode from rINST 625 str r1, [r0, #offThread_exception] @ dvmClearException bypass 626 GOTO_OPCODE(ip) @ jump to next instruction 627 628 629/* ------------------------------ */ 630 .balign 64 631.L_OP_RETURN_VOID: /* 0x0e */ 632/* File: armv5te/OP_RETURN_VOID.S */ 633 b common_returnFromMethod 634 635 636/* ------------------------------ */ 637 .balign 64 638.L_OP_RETURN: /* 0x0f */ 639/* File: armv5te/OP_RETURN.S */ 640 /* 641 * Return a 32-bit value. Copies the return value into the "glue" 642 * structure, then jumps to the return handler. 643 * 644 * for: return, return-object 645 */ 646 /* op vAA */ 647 mov r2, rINST, lsr #8 @ r2<- AA 648 GET_VREG(r0, r2) @ r0<- vAA 649 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 650 b common_returnFromMethod 651 652 653/* ------------------------------ */ 654 .balign 64 655.L_OP_RETURN_WIDE: /* 0x10 */ 656/* File: armv5te/OP_RETURN_WIDE.S */ 657 /* 658 * Return a 64-bit value. Copies the return value into the "glue" 659 * structure, then jumps to the return handler. 660 */ 661 /* return-wide vAA */ 662 mov r2, rINST, lsr #8 @ r2<- AA 663 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 664 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 665 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 666 stmia r3, {r0-r1} @ retval<- r0/r1 667 b common_returnFromMethod 668 669 670/* ------------------------------ */ 671 .balign 64 672.L_OP_RETURN_OBJECT: /* 0x11 */ 673/* File: armv5te/OP_RETURN_OBJECT.S */ 674/* File: armv5te/OP_RETURN.S */ 675 /* 676 * Return a 32-bit value. Copies the return value into the "glue" 677 * structure, then jumps to the return handler. 678 * 679 * for: return, return-object 680 */ 681 /* op vAA */ 682 mov r2, rINST, lsr #8 @ r2<- AA 683 GET_VREG(r0, r2) @ r0<- vAA 684 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 685 b common_returnFromMethod 686 687 688 689/* ------------------------------ */ 690 .balign 64 691.L_OP_CONST_4: /* 0x12 */ 692/* File: armv5te/OP_CONST_4.S */ 693 /* const/4 vA, #+B */ 694 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 695 mov r0, rINST, lsr #8 @ r0<- A+ 696 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 697 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 698 and r0, r0, #15 699 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 700 SET_VREG(r1, r0) @ fp[A]<- r1 701 GOTO_OPCODE(ip) @ execute next instruction 702 703 704/* ------------------------------ */ 705 .balign 64 706.L_OP_CONST_16: /* 0x13 */ 707/* File: armv5te/OP_CONST_16.S */ 708 /* const/16 vAA, #+BBBB */ 709 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 710 mov r3, rINST, lsr #8 @ r3<- AA 711 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 712 SET_VREG(r0, r3) @ vAA<- r0 713 GET_INST_OPCODE(ip) @ extract opcode from rINST 714 GOTO_OPCODE(ip) @ jump to next instruction 715 716 717/* ------------------------------ */ 718 .balign 64 719.L_OP_CONST: /* 0x14 */ 720/* File: armv5te/OP_CONST.S */ 721 /* const vAA, #+BBBBbbbb */ 722 mov r3, rINST, lsr #8 @ r3<- AA 723 FETCH(r0, 1) @ r0<- bbbb (low) 724 FETCH(r1, 2) @ r1<- BBBB (high) 725 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 726 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 727 GET_INST_OPCODE(ip) @ extract opcode from rINST 728 SET_VREG(r0, r3) @ vAA<- r0 729 GOTO_OPCODE(ip) @ jump to next instruction 730 731 732/* ------------------------------ */ 733 .balign 64 734.L_OP_CONST_HIGH16: /* 0x15 */ 735/* File: armv5te/OP_CONST_HIGH16.S */ 736 /* const/high16 vAA, #+BBBB0000 */ 737 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 738 mov r3, rINST, lsr #8 @ r3<- AA 739 mov r0, r0, lsl #16 @ r0<- BBBB0000 740 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 741 SET_VREG(r0, r3) @ vAA<- r0 742 GET_INST_OPCODE(ip) @ extract opcode from rINST 743 GOTO_OPCODE(ip) @ jump to next instruction 744 745 746/* ------------------------------ */ 747 .balign 64 748.L_OP_CONST_WIDE_16: /* 0x16 */ 749/* File: armv5te/OP_CONST_WIDE_16.S */ 750 /* const-wide/16 vAA, #+BBBB */ 751 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 752 mov r3, rINST, lsr #8 @ r3<- AA 753 mov r1, r0, asr #31 @ r1<- ssssssss 754 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 755 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 756 GET_INST_OPCODE(ip) @ extract opcode from rINST 757 stmia r3, {r0-r1} @ vAA<- r0/r1 758 GOTO_OPCODE(ip) @ jump to next instruction 759 760 761/* ------------------------------ */ 762 .balign 64 763.L_OP_CONST_WIDE_32: /* 0x17 */ 764/* File: armv5te/OP_CONST_WIDE_32.S */ 765 /* const-wide/32 vAA, #+BBBBbbbb */ 766 FETCH(r0, 1) @ r0<- 0000bbbb (low) 767 mov r3, rINST, lsr #8 @ r3<- AA 768 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 769 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 770 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 771 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 772 mov r1, r0, asr #31 @ r1<- ssssssss 773 GET_INST_OPCODE(ip) @ extract opcode from rINST 774 stmia r3, {r0-r1} @ vAA<- r0/r1 775 GOTO_OPCODE(ip) @ jump to next instruction 776 777 778/* ------------------------------ */ 779 .balign 64 780.L_OP_CONST_WIDE: /* 0x18 */ 781/* File: armv5te/OP_CONST_WIDE.S */ 782 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 783 FETCH(r0, 1) @ r0<- bbbb (low) 784 FETCH(r1, 2) @ r1<- BBBB (low middle) 785 FETCH(r2, 3) @ r2<- hhhh (high middle) 786 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 787 FETCH(r3, 4) @ r3<- HHHH (high) 788 mov r9, rINST, lsr #8 @ r9<- AA 789 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 790 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 791 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 792 GET_INST_OPCODE(ip) @ extract opcode from rINST 793 stmia r9, {r0-r1} @ vAA<- r0/r1 794 GOTO_OPCODE(ip) @ jump to next instruction 795 796 797/* ------------------------------ */ 798 .balign 64 799.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 800/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 801 /* const-wide/high16 vAA, #+BBBB000000000000 */ 802 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 803 mov r3, rINST, lsr #8 @ r3<- AA 804 mov r0, #0 @ r0<- 00000000 805 mov r1, r1, lsl #16 @ r1<- BBBB0000 806 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 807 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 808 GET_INST_OPCODE(ip) @ extract opcode from rINST 809 stmia r3, {r0-r1} @ vAA<- r0/r1 810 GOTO_OPCODE(ip) @ jump to next instruction 811 812 813/* ------------------------------ */ 814 .balign 64 815.L_OP_CONST_STRING: /* 0x1a */ 816/* File: armv5te/OP_CONST_STRING.S */ 817 /* const/string vAA, String@BBBB */ 818 FETCH(r1, 1) @ r1<- BBBB 819 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 820 mov r9, rINST, lsr #8 @ r9<- AA 821 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 822 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 823 cmp r0, #0 @ not yet resolved? 824 beq .LOP_CONST_STRING_resolve 825 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 826 GET_INST_OPCODE(ip) @ extract opcode from rINST 827 SET_VREG(r0, r9) @ vAA<- r0 828 GOTO_OPCODE(ip) @ jump to next instruction 829 830/* ------------------------------ */ 831 .balign 64 832.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 833/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 834 /* const/string vAA, String@BBBBBBBB */ 835 FETCH(r0, 1) @ r0<- bbbb (low) 836 FETCH(r1, 2) @ r1<- BBBB (high) 837 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 838 mov r9, rINST, lsr #8 @ r9<- AA 839 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 840 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 841 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 842 cmp r0, #0 843 beq .LOP_CONST_STRING_JUMBO_resolve 844 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 845 GET_INST_OPCODE(ip) @ extract opcode from rINST 846 SET_VREG(r0, r9) @ vAA<- r0 847 GOTO_OPCODE(ip) @ jump to next instruction 848 849/* ------------------------------ */ 850 .balign 64 851.L_OP_CONST_CLASS: /* 0x1c */ 852/* File: armv5te/OP_CONST_CLASS.S */ 853 /* const/class vAA, Class@BBBB */ 854 FETCH(r1, 1) @ r1<- BBBB 855 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 856 mov r9, rINST, lsr #8 @ r9<- AA 857 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 858 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 859 cmp r0, #0 @ not yet resolved? 860 beq .LOP_CONST_CLASS_resolve 861 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 862 GET_INST_OPCODE(ip) @ extract opcode from rINST 863 SET_VREG(r0, r9) @ vAA<- r0 864 GOTO_OPCODE(ip) @ jump to next instruction 865 866/* ------------------------------ */ 867 .balign 64 868.L_OP_MONITOR_ENTER: /* 0x1d */ 869/* File: armv5te/OP_MONITOR_ENTER.S */ 870 /* 871 * Synchronize on an object. 872 */ 873 /* monitor-enter vAA */ 874 mov r2, rINST, lsr #8 @ r2<- AA 875 GET_VREG(r1, r2) @ r1<- vAA (object) 876 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 877 cmp r1, #0 @ null object? 878 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 879 beq common_errNullObject @ null object, throw an exception 880 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 881 bl dvmLockObject @ call(self, obj) 882#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 883 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 884 ldr r1, [r0, #offThread_exception] @ check for exception 885 cmp r1, #0 886 bne common_exceptionThrown @ exception raised, bail out 887#endif 888 GET_INST_OPCODE(ip) @ extract opcode from rINST 889 GOTO_OPCODE(ip) @ jump to next instruction 890 891 892/* ------------------------------ */ 893 .balign 64 894.L_OP_MONITOR_EXIT: /* 0x1e */ 895/* File: armv5te/OP_MONITOR_EXIT.S */ 896 /* 897 * Unlock an object. 898 * 899 * Exceptions that occur when unlocking a monitor need to appear as 900 * if they happened at the following instruction. See the Dalvik 901 * instruction spec. 902 */ 903 /* monitor-exit vAA */ 904 mov r2, rINST, lsr #8 @ r2<- AA 905 EXPORT_PC() @ before fetch: export the PC 906 GET_VREG(r1, r2) @ r1<- vAA (object) 907 cmp r1, #0 @ null object? 908 beq common_errNullObject @ yes 909 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 910 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 911 cmp r0, #0 @ failed? 912 beq common_exceptionThrown @ yes, exception is pending 913 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 914 GET_INST_OPCODE(ip) @ extract opcode from rINST 915 GOTO_OPCODE(ip) @ jump to next instruction 916 917 918/* ------------------------------ */ 919 .balign 64 920.L_OP_CHECK_CAST: /* 0x1f */ 921/* File: armv5te/OP_CHECK_CAST.S */ 922 /* 923 * Check to see if a cast from one class to another is allowed. 924 */ 925 /* check-cast vAA, class@BBBB */ 926 mov r3, rINST, lsr #8 @ r3<- AA 927 FETCH(r2, 1) @ r2<- BBBB 928 GET_VREG(r9, r3) @ r9<- object 929 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 930 cmp r9, #0 @ is object null? 931 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 932 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 933 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 934 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 935 cmp r1, #0 @ have we resolved this before? 936 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 937.LOP_CHECK_CAST_resolved: 938 cmp r0, r1 @ same class (trivial success)? 939 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 940.LOP_CHECK_CAST_okay: 941 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 942 GET_INST_OPCODE(ip) @ extract opcode from rINST 943 GOTO_OPCODE(ip) @ jump to next instruction 944 945/* ------------------------------ */ 946 .balign 64 947.L_OP_INSTANCE_OF: /* 0x20 */ 948/* File: armv5te/OP_INSTANCE_OF.S */ 949 /* 950 * Check to see if an object reference is an instance of a class. 951 * 952 * Most common situation is a non-null object, being compared against 953 * an already-resolved class. 954 */ 955 /* instance-of vA, vB, class@CCCC */ 956 mov r3, rINST, lsr #12 @ r3<- B 957 mov r9, rINST, lsr #8 @ r9<- A+ 958 GET_VREG(r0, r3) @ r0<- vB (object) 959 and r9, r9, #15 @ r9<- A 960 cmp r0, #0 @ is object null? 961 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 962 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 963 FETCH(r3, 1) @ r3<- CCCC 964 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 965 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 966 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 967 cmp r1, #0 @ have we resolved this before? 968 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 969.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 970 cmp r0, r1 @ same class (trivial success)? 971 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 972 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 973 974/* ------------------------------ */ 975 .balign 64 976.L_OP_ARRAY_LENGTH: /* 0x21 */ 977/* File: armv5te/OP_ARRAY_LENGTH.S */ 978 /* 979 * Return the length of an array. 980 */ 981 mov r1, rINST, lsr #12 @ r1<- B 982 mov r2, rINST, lsr #8 @ r2<- A+ 983 GET_VREG(r0, r1) @ r0<- vB (object ref) 984 and r2, r2, #15 @ r2<- A 985 cmp r0, #0 @ is object null? 986 beq common_errNullObject @ yup, fail 987 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 988 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 989 GET_INST_OPCODE(ip) @ extract opcode from rINST 990 SET_VREG(r3, r2) @ vB<- length 991 GOTO_OPCODE(ip) @ jump to next instruction 992 993 994/* ------------------------------ */ 995 .balign 64 996.L_OP_NEW_INSTANCE: /* 0x22 */ 997/* File: armv5te/OP_NEW_INSTANCE.S */ 998 /* 999 * Create a new instance of a class. 1000 */ 1001 /* new-instance vAA, class@BBBB */ 1002 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1003 FETCH(r1, 1) @ r1<- BBBB 1004 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1005 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1006 EXPORT_PC() @ req'd for init, resolve, alloc 1007 cmp r0, #0 @ already resolved? 1008 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1009.LOP_NEW_INSTANCE_resolved: @ r0=class 1010 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1011 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1012 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1013.LOP_NEW_INSTANCE_initialized: @ r0=class 1014 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1015 bl dvmAllocObject @ r0<- new object 1016 b .LOP_NEW_INSTANCE_finish @ continue 1017 1018/* ------------------------------ */ 1019 .balign 64 1020.L_OP_NEW_ARRAY: /* 0x23 */ 1021/* File: armv5te/OP_NEW_ARRAY.S */ 1022 /* 1023 * Allocate an array of objects, specified with the array class 1024 * and a count. 1025 * 1026 * The verifier guarantees that this is an array class, so we don't 1027 * check for it here. 1028 */ 1029 /* new-array vA, vB, class@CCCC */ 1030 mov r0, rINST, lsr #12 @ r0<- B 1031 FETCH(r2, 1) @ r2<- CCCC 1032 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1033 GET_VREG(r1, r0) @ r1<- vB (array length) 1034 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1035 cmp r1, #0 @ check length 1036 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1037 bmi common_errNegativeArraySize @ negative length, bail 1038 cmp r0, #0 @ already resolved? 1039 EXPORT_PC() @ req'd for resolve, alloc 1040 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1041 b .LOP_NEW_ARRAY_resolve @ do resolve now 1042 1043/* ------------------------------ */ 1044 .balign 64 1045.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1046/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1047 /* 1048 * Create a new array with elements filled from registers. 1049 * 1050 * for: filled-new-array, filled-new-array/range 1051 */ 1052 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1053 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1054 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1055 FETCH(r1, 1) @ r1<- BBBB 1056 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1057 EXPORT_PC() @ need for resolve and alloc 1058 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1059 mov r10, rINST, lsr #8 @ r10<- AA or BA 1060 cmp r0, #0 @ already resolved? 1061 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10628: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1063 mov r2, #0 @ r2<- false 1064 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1065 bl dvmResolveClass @ r0<- call(clazz, ref) 1066 cmp r0, #0 @ got null? 1067 beq common_exceptionThrown @ yes, handle exception 1068 b .LOP_FILLED_NEW_ARRAY_continue 1069 1070/* ------------------------------ */ 1071 .balign 64 1072.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1073/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1074/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1075 /* 1076 * Create a new array with elements filled from registers. 1077 * 1078 * for: filled-new-array, filled-new-array/range 1079 */ 1080 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1081 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1082 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1083 FETCH(r1, 1) @ r1<- BBBB 1084 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1085 EXPORT_PC() @ need for resolve and alloc 1086 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1087 mov r10, rINST, lsr #8 @ r10<- AA or BA 1088 cmp r0, #0 @ already resolved? 1089 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10908: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1091 mov r2, #0 @ r2<- false 1092 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1093 bl dvmResolveClass @ r0<- call(clazz, ref) 1094 cmp r0, #0 @ got null? 1095 beq common_exceptionThrown @ yes, handle exception 1096 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1097 1098 1099/* ------------------------------ */ 1100 .balign 64 1101.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1102/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1103 /* fill-array-data vAA, +BBBBBBBB */ 1104 FETCH(r0, 1) @ r0<- bbbb (lo) 1105 FETCH(r1, 2) @ r1<- BBBB (hi) 1106 mov r3, rINST, lsr #8 @ r3<- AA 1107 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1108 GET_VREG(r0, r3) @ r0<- vAA (array object) 1109 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1110 EXPORT_PC(); 1111 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1112 cmp r0, #0 @ 0 means an exception is thrown 1113 beq common_exceptionThrown @ has exception 1114 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1115 GET_INST_OPCODE(ip) @ extract opcode from rINST 1116 GOTO_OPCODE(ip) @ jump to next instruction 1117 1118/* ------------------------------ */ 1119 .balign 64 1120.L_OP_THROW: /* 0x27 */ 1121/* File: armv5te/OP_THROW.S */ 1122 /* 1123 * Throw an exception object in the current thread. 1124 */ 1125 /* throw vAA */ 1126 mov r2, rINST, lsr #8 @ r2<- AA 1127 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1128 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1129 cmp r1, #0 @ null object? 1130 beq common_errNullObject @ yes, throw an NPE instead 1131 @ bypass dvmSetException, just store it 1132 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1133 b common_exceptionThrown 1134 1135 1136/* ------------------------------ */ 1137 .balign 64 1138.L_OP_GOTO: /* 0x28 */ 1139/* File: armv5te/OP_GOTO.S */ 1140 /* 1141 * Unconditional branch, 8-bit offset. 1142 * 1143 * The branch distance is a signed code-unit offset, which we need to 1144 * double to get a byte offset. 1145 */ 1146 /* goto +AA */ 1147 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1148 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1149 mov r9, r9, lsl #1 @ r9<- byte offset 1150 bmi common_backwardBranch @ backward branch, do periodic checks 1151#if defined(WITH_JIT) 1152 GET_JIT_PROF_TABLE(r0) 1153 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1154 cmp r0,#0 1155 bne common_updateProfile 1156 GET_INST_OPCODE(ip) @ extract opcode from rINST 1157 GOTO_OPCODE(ip) @ jump to next instruction 1158#else 1159 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1160 GET_INST_OPCODE(ip) @ extract opcode from rINST 1161 GOTO_OPCODE(ip) @ jump to next instruction 1162#endif 1163 1164/* ------------------------------ */ 1165 .balign 64 1166.L_OP_GOTO_16: /* 0x29 */ 1167/* File: armv5te/OP_GOTO_16.S */ 1168 /* 1169 * Unconditional branch, 16-bit offset. 1170 * 1171 * The branch distance is a signed code-unit offset, which we need to 1172 * double to get a byte offset. 1173 */ 1174 /* goto/16 +AAAA */ 1175 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1176 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1177 bmi common_backwardBranch @ backward branch, do periodic checks 1178#if defined(WITH_JIT) 1179 GET_JIT_PROF_TABLE(r0) 1180 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1181 cmp r0,#0 1182 bne common_updateProfile 1183 GET_INST_OPCODE(ip) @ extract opcode from rINST 1184 GOTO_OPCODE(ip) @ jump to next instruction 1185#else 1186 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1187 GET_INST_OPCODE(ip) @ extract opcode from rINST 1188 GOTO_OPCODE(ip) @ jump to next instruction 1189#endif 1190 1191 1192/* ------------------------------ */ 1193 .balign 64 1194.L_OP_GOTO_32: /* 0x2a */ 1195/* File: armv5te/OP_GOTO_32.S */ 1196 /* 1197 * Unconditional branch, 32-bit offset. 1198 * 1199 * The branch distance is a signed code-unit offset, which we need to 1200 * double to get a byte offset. 1201 * 1202 * Unlike most opcodes, this one is allowed to branch to itself, so 1203 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1204 * instruction doesn't affect the V flag, so we need to clear it 1205 * explicitly. 1206 */ 1207 /* goto/32 +AAAAAAAA */ 1208 FETCH(r0, 1) @ r0<- aaaa (lo) 1209 FETCH(r1, 2) @ r1<- AAAA (hi) 1210 cmp ip, ip @ (clear V flag during stall) 1211 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1212 mov r9, r0, asl #1 @ r9<- byte offset 1213 ble common_backwardBranch @ backward branch, do periodic checks 1214#if defined(WITH_JIT) 1215 GET_JIT_PROF_TABLE(r0) 1216 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1217 cmp r0,#0 1218 bne common_updateProfile 1219 GET_INST_OPCODE(ip) @ extract opcode from rINST 1220 GOTO_OPCODE(ip) @ jump to next instruction 1221#else 1222 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1223 GET_INST_OPCODE(ip) @ extract opcode from rINST 1224 GOTO_OPCODE(ip) @ jump to next instruction 1225#endif 1226 1227/* ------------------------------ */ 1228 .balign 64 1229.L_OP_PACKED_SWITCH: /* 0x2b */ 1230/* File: armv5te/OP_PACKED_SWITCH.S */ 1231 /* 1232 * Handle a packed-switch or sparse-switch instruction. In both cases 1233 * we decode it and hand it off to a helper function. 1234 * 1235 * We don't really expect backward branches in a switch statement, but 1236 * they're perfectly legal, so we check for them here. 1237 * 1238 * for: packed-switch, sparse-switch 1239 */ 1240 /* op vAA, +BBBB */ 1241 FETCH(r0, 1) @ r0<- bbbb (lo) 1242 FETCH(r1, 2) @ r1<- BBBB (hi) 1243 mov r3, rINST, lsr #8 @ r3<- AA 1244 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1245 GET_VREG(r1, r3) @ r1<- vAA 1246 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1247 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1248 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1249 bmi common_backwardBranch @ backward branch, do periodic checks 1250 beq common_backwardBranch @ (want to use BLE but V is unknown) 1251#if defined(WITH_JIT) 1252 GET_JIT_PROF_TABLE(r0) 1253 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1254 cmp r0,#0 1255 bne common_updateProfile 1256 GET_INST_OPCODE(ip) @ extract opcode from rINST 1257 GOTO_OPCODE(ip) @ jump to next instruction 1258#else 1259 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1260 GET_INST_OPCODE(ip) @ extract opcode from rINST 1261 GOTO_OPCODE(ip) @ jump to next instruction 1262#endif 1263 1264 1265/* ------------------------------ */ 1266 .balign 64 1267.L_OP_SPARSE_SWITCH: /* 0x2c */ 1268/* File: armv5te/OP_SPARSE_SWITCH.S */ 1269/* File: armv5te/OP_PACKED_SWITCH.S */ 1270 /* 1271 * Handle a packed-switch or sparse-switch instruction. In both cases 1272 * we decode it and hand it off to a helper function. 1273 * 1274 * We don't really expect backward branches in a switch statement, but 1275 * they're perfectly legal, so we check for them here. 1276 * 1277 * for: packed-switch, sparse-switch 1278 */ 1279 /* op vAA, +BBBB */ 1280 FETCH(r0, 1) @ r0<- bbbb (lo) 1281 FETCH(r1, 2) @ r1<- BBBB (hi) 1282 mov r3, rINST, lsr #8 @ r3<- AA 1283 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1284 GET_VREG(r1, r3) @ r1<- vAA 1285 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1286 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1287 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1288 bmi common_backwardBranch @ backward branch, do periodic checks 1289 beq common_backwardBranch @ (want to use BLE but V is unknown) 1290#if defined(WITH_JIT) 1291 GET_JIT_PROF_TABLE(r0) 1292 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1293 cmp r0,#0 1294 bne common_updateProfile 1295 GET_INST_OPCODE(ip) @ extract opcode from rINST 1296 GOTO_OPCODE(ip) @ jump to next instruction 1297#else 1298 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1299 GET_INST_OPCODE(ip) @ extract opcode from rINST 1300 GOTO_OPCODE(ip) @ jump to next instruction 1301#endif 1302 1303 1304 1305/* ------------------------------ */ 1306 .balign 64 1307.L_OP_CMPL_FLOAT: /* 0x2d */ 1308/* File: vfp/OP_CMPL_FLOAT.S */ 1309 /* 1310 * Compare two floating-point values. Puts 0, 1, or -1 into the 1311 * destination register based on the results of the comparison. 1312 * 1313 * int compare(x, y) { 1314 * if (x == y) { 1315 * return 0; 1316 * } else if (x > y) { 1317 * return 1; 1318 * } else if (x < y) { 1319 * return -1; 1320 * } else { 1321 * return -1; 1322 * } 1323 * } 1324 */ 1325 /* op vAA, vBB, vCC */ 1326 FETCH(r0, 1) @ r0<- CCBB 1327 and r2, r0, #255 @ r2<- BB 1328 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1329 flds s0, [r2] @ s0<- vBB 1330 mov r3, r0, lsr #8 @ r3<- CC 1331 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1332 flds s1, [r3] @ s1<- vCC 1333 mov r9, rINST, lsr #8 @ r9<- AA 1334 fcmpes s0, s1 @ compare (vBB, vCC) 1335 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1336 mvn r0, #0 @ r0<- -1 (default) 1337 GET_INST_OPCODE(ip) @ extract opcode from rINST 1338 fmstat @ export status flags 1339 movgt r0, #1 @ (greater than) r1<- 1 1340 moveq r0, #0 @ (equal) r1<- 0 1341 bl .LOP_CMPL_FLOAT_finish @ argh 1342 1343 1344/* ------------------------------ */ 1345 .balign 64 1346.L_OP_CMPG_FLOAT: /* 0x2e */ 1347/* File: vfp/OP_CMPG_FLOAT.S */ 1348 /* 1349 * Compare two floating-point values. Puts 0, 1, or -1 into the 1350 * destination register based on the results of the comparison. 1351 * 1352 * int compare(x, y) { 1353 * if (x == y) { 1354 * return 0; 1355 * } else if (x < y) { 1356 * return -1; 1357 * } else if (x > y) { 1358 * return 1; 1359 * } else { 1360 * return 1; 1361 * } 1362 * } 1363 */ 1364 /* op vAA, vBB, vCC */ 1365 FETCH(r0, 1) @ r0<- CCBB 1366 and r2, r0, #255 @ r2<- BB 1367 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1368 flds s0, [r2] @ s0<- vBB 1369 mov r3, r0, lsr #8 @ r3<- CC 1370 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1371 flds s1, [r3] @ s1<- vCC 1372 mov r9, rINST, lsr #8 @ r9<- AA 1373 fcmpes s0, s1 @ compare (vBB, vCC) 1374 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1375 mov r0, #1 @ r0<- 1 (default) 1376 GET_INST_OPCODE(ip) @ extract opcode from rINST 1377 fmstat @ export status flags 1378 mvnmi r0, #0 @ (less than) r1<- -1 1379 moveq r0, #0 @ (equal) r1<- 0 1380 bl .LOP_CMPG_FLOAT_finish @ argh 1381 1382 1383/* ------------------------------ */ 1384 .balign 64 1385.L_OP_CMPL_DOUBLE: /* 0x2f */ 1386/* File: vfp/OP_CMPL_DOUBLE.S */ 1387 /* 1388 * Compare two floating-point values. Puts 0, 1, or -1 into the 1389 * destination register based on the results of the comparison. 1390 * 1391 * int compare(x, y) { 1392 * if (x == y) { 1393 * return 0; 1394 * } else if (x > y) { 1395 * return 1; 1396 * } else if (x < y) { 1397 * return -1; 1398 * } else { 1399 * return -1; 1400 * } 1401 * } 1402 */ 1403 /* op vAA, vBB, vCC */ 1404 FETCH(r0, 1) @ r0<- CCBB 1405 and r2, r0, #255 @ r2<- BB 1406 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1407 fldd d0, [r2] @ d0<- vBB 1408 mov r3, r0, lsr #8 @ r3<- CC 1409 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1410 fldd d1, [r3] @ d1<- vCC 1411 mov r9, rINST, lsr #8 @ r9<- AA 1412 fcmped d0, d1 @ compare (vBB, vCC) 1413 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1414 mvn r0, #0 @ r0<- -1 (default) 1415 GET_INST_OPCODE(ip) @ extract opcode from rINST 1416 fmstat @ export status flags 1417 movgt r0, #1 @ (greater than) r1<- 1 1418 moveq r0, #0 @ (equal) r1<- 0 1419 bl .LOP_CMPL_DOUBLE_finish @ argh 1420 1421 1422/* ------------------------------ */ 1423 .balign 64 1424.L_OP_CMPG_DOUBLE: /* 0x30 */ 1425/* File: vfp/OP_CMPG_DOUBLE.S */ 1426 /* 1427 * Compare two floating-point values. Puts 0, 1, or -1 into the 1428 * destination register based on the results of the comparison. 1429 * 1430 * int compare(x, y) { 1431 * if (x == y) { 1432 * return 0; 1433 * } else if (x < y) { 1434 * return -1; 1435 * } else if (x > y) { 1436 * return 1; 1437 * } else { 1438 * return 1; 1439 * } 1440 * } 1441 */ 1442 /* op vAA, vBB, vCC */ 1443 FETCH(r0, 1) @ r0<- CCBB 1444 and r2, r0, #255 @ r2<- BB 1445 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1446 fldd d0, [r2] @ d0<- vBB 1447 mov r3, r0, lsr #8 @ r3<- CC 1448 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1449 fldd d1, [r3] @ d1<- vCC 1450 mov r9, rINST, lsr #8 @ r9<- AA 1451 fcmped d0, d1 @ compare (vBB, vCC) 1452 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1453 mov r0, #1 @ r0<- 1 (default) 1454 GET_INST_OPCODE(ip) @ extract opcode from rINST 1455 fmstat @ export status flags 1456 mvnmi r0, #0 @ (less than) r1<- -1 1457 moveq r0, #0 @ (equal) r1<- 0 1458 bl .LOP_CMPG_DOUBLE_finish @ argh 1459 1460 1461/* ------------------------------ */ 1462 .balign 64 1463.L_OP_CMP_LONG: /* 0x31 */ 1464/* File: armv5te/OP_CMP_LONG.S */ 1465 /* 1466 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1467 * register based on the results of the comparison. 1468 * 1469 * We load the full values with LDM, but in practice many values could 1470 * be resolved by only looking at the high word. This could be made 1471 * faster or slower by splitting the LDM into a pair of LDRs. 1472 * 1473 * If we just wanted to set condition flags, we could do this: 1474 * subs ip, r0, r2 1475 * sbcs ip, r1, r3 1476 * subeqs ip, r0, r2 1477 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1478 * integer value, which we can do with 2 conditional mov/mvn instructions 1479 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1480 * us a constant 5-cycle path plus a branch at the end to the 1481 * instruction epilogue code. The multi-compare approach below needs 1482 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1483 * in the worst case (the 64-bit values are equal). 1484 */ 1485 /* cmp-long vAA, vBB, vCC */ 1486 FETCH(r0, 1) @ r0<- CCBB 1487 mov r9, rINST, lsr #8 @ r9<- AA 1488 and r2, r0, #255 @ r2<- BB 1489 mov r3, r0, lsr #8 @ r3<- CC 1490 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1491 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1492 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1493 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1494 cmp r1, r3 @ compare (vBB+1, vCC+1) 1495 blt .LOP_CMP_LONG_less @ signed compare on high part 1496 bgt .LOP_CMP_LONG_greater 1497 subs r1, r0, r2 @ r1<- r0 - r2 1498 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1499 bne .LOP_CMP_LONG_less 1500 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1501 1502/* ------------------------------ */ 1503 .balign 64 1504.L_OP_IF_EQ: /* 0x32 */ 1505/* File: armv5te/OP_IF_EQ.S */ 1506/* File: armv5te/bincmp.S */ 1507 /* 1508 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1509 * fragment that specifies the *reverse* comparison to perform, e.g. 1510 * for "if-le" you would use "gt". 1511 * 1512 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1513 */ 1514 /* if-cmp vA, vB, +CCCC */ 1515 mov r0, rINST, lsr #8 @ r0<- A+ 1516 mov r1, rINST, lsr #12 @ r1<- B 1517 and r0, r0, #15 1518 GET_VREG(r3, r1) @ r3<- vB 1519 GET_VREG(r2, r0) @ r2<- vA 1520 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1521 cmp r2, r3 @ compare (vA, vB) 1522 bne 1f @ branch to 1 if comparison failed 1523 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1524 movs r9, r9, asl #1 @ convert to bytes, check sign 1525 bmi common_backwardBranch @ yes, do periodic checks 15261: 1527#if defined(WITH_JIT) 1528 GET_JIT_PROF_TABLE(r0) 1529 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1530 b common_testUpdateProfile 1531#else 1532 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1533 GET_INST_OPCODE(ip) @ extract opcode from rINST 1534 GOTO_OPCODE(ip) @ jump to next instruction 1535#endif 1536 1537 1538 1539/* ------------------------------ */ 1540 .balign 64 1541.L_OP_IF_NE: /* 0x33 */ 1542/* File: armv5te/OP_IF_NE.S */ 1543/* File: armv5te/bincmp.S */ 1544 /* 1545 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1546 * fragment that specifies the *reverse* comparison to perform, e.g. 1547 * for "if-le" you would use "gt". 1548 * 1549 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1550 */ 1551 /* if-cmp vA, vB, +CCCC */ 1552 mov r0, rINST, lsr #8 @ r0<- A+ 1553 mov r1, rINST, lsr #12 @ r1<- B 1554 and r0, r0, #15 1555 GET_VREG(r3, r1) @ r3<- vB 1556 GET_VREG(r2, r0) @ r2<- vA 1557 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1558 cmp r2, r3 @ compare (vA, vB) 1559 beq 1f @ branch to 1 if comparison failed 1560 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1561 movs r9, r9, asl #1 @ convert to bytes, check sign 1562 bmi common_backwardBranch @ yes, do periodic checks 15631: 1564#if defined(WITH_JIT) 1565 GET_JIT_PROF_TABLE(r0) 1566 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1567 b common_testUpdateProfile 1568#else 1569 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1570 GET_INST_OPCODE(ip) @ extract opcode from rINST 1571 GOTO_OPCODE(ip) @ jump to next instruction 1572#endif 1573 1574 1575 1576/* ------------------------------ */ 1577 .balign 64 1578.L_OP_IF_LT: /* 0x34 */ 1579/* File: armv5te/OP_IF_LT.S */ 1580/* File: armv5te/bincmp.S */ 1581 /* 1582 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1583 * fragment that specifies the *reverse* comparison to perform, e.g. 1584 * for "if-le" you would use "gt". 1585 * 1586 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1587 */ 1588 /* if-cmp vA, vB, +CCCC */ 1589 mov r0, rINST, lsr #8 @ r0<- A+ 1590 mov r1, rINST, lsr #12 @ r1<- B 1591 and r0, r0, #15 1592 GET_VREG(r3, r1) @ r3<- vB 1593 GET_VREG(r2, r0) @ r2<- vA 1594 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1595 cmp r2, r3 @ compare (vA, vB) 1596 bge 1f @ branch to 1 if comparison failed 1597 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1598 movs r9, r9, asl #1 @ convert to bytes, check sign 1599 bmi common_backwardBranch @ yes, do periodic checks 16001: 1601#if defined(WITH_JIT) 1602 GET_JIT_PROF_TABLE(r0) 1603 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1604 b common_testUpdateProfile 1605#else 1606 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1607 GET_INST_OPCODE(ip) @ extract opcode from rINST 1608 GOTO_OPCODE(ip) @ jump to next instruction 1609#endif 1610 1611 1612 1613/* ------------------------------ */ 1614 .balign 64 1615.L_OP_IF_GE: /* 0x35 */ 1616/* File: armv5te/OP_IF_GE.S */ 1617/* File: armv5te/bincmp.S */ 1618 /* 1619 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1620 * fragment that specifies the *reverse* comparison to perform, e.g. 1621 * for "if-le" you would use "gt". 1622 * 1623 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1624 */ 1625 /* if-cmp vA, vB, +CCCC */ 1626 mov r0, rINST, lsr #8 @ r0<- A+ 1627 mov r1, rINST, lsr #12 @ r1<- B 1628 and r0, r0, #15 1629 GET_VREG(r3, r1) @ r3<- vB 1630 GET_VREG(r2, r0) @ r2<- vA 1631 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1632 cmp r2, r3 @ compare (vA, vB) 1633 blt 1f @ branch to 1 if comparison failed 1634 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1635 movs r9, r9, asl #1 @ convert to bytes, check sign 1636 bmi common_backwardBranch @ yes, do periodic checks 16371: 1638#if defined(WITH_JIT) 1639 GET_JIT_PROF_TABLE(r0) 1640 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1641 b common_testUpdateProfile 1642#else 1643 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1644 GET_INST_OPCODE(ip) @ extract opcode from rINST 1645 GOTO_OPCODE(ip) @ jump to next instruction 1646#endif 1647 1648 1649 1650/* ------------------------------ */ 1651 .balign 64 1652.L_OP_IF_GT: /* 0x36 */ 1653/* File: armv5te/OP_IF_GT.S */ 1654/* File: armv5te/bincmp.S */ 1655 /* 1656 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1657 * fragment that specifies the *reverse* comparison to perform, e.g. 1658 * for "if-le" you would use "gt". 1659 * 1660 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1661 */ 1662 /* if-cmp vA, vB, +CCCC */ 1663 mov r0, rINST, lsr #8 @ r0<- A+ 1664 mov r1, rINST, lsr #12 @ r1<- B 1665 and r0, r0, #15 1666 GET_VREG(r3, r1) @ r3<- vB 1667 GET_VREG(r2, r0) @ r2<- vA 1668 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1669 cmp r2, r3 @ compare (vA, vB) 1670 ble 1f @ branch to 1 if comparison failed 1671 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1672 movs r9, r9, asl #1 @ convert to bytes, check sign 1673 bmi common_backwardBranch @ yes, do periodic checks 16741: 1675#if defined(WITH_JIT) 1676 GET_JIT_PROF_TABLE(r0) 1677 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1678 b common_testUpdateProfile 1679#else 1680 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1681 GET_INST_OPCODE(ip) @ extract opcode from rINST 1682 GOTO_OPCODE(ip) @ jump to next instruction 1683#endif 1684 1685 1686 1687/* ------------------------------ */ 1688 .balign 64 1689.L_OP_IF_LE: /* 0x37 */ 1690/* File: armv5te/OP_IF_LE.S */ 1691/* File: armv5te/bincmp.S */ 1692 /* 1693 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1694 * fragment that specifies the *reverse* comparison to perform, e.g. 1695 * for "if-le" you would use "gt". 1696 * 1697 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1698 */ 1699 /* if-cmp vA, vB, +CCCC */ 1700 mov r0, rINST, lsr #8 @ r0<- A+ 1701 mov r1, rINST, lsr #12 @ r1<- B 1702 and r0, r0, #15 1703 GET_VREG(r3, r1) @ r3<- vB 1704 GET_VREG(r2, r0) @ r2<- vA 1705 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1706 cmp r2, r3 @ compare (vA, vB) 1707 bgt 1f @ branch to 1 if comparison failed 1708 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1709 movs r9, r9, asl #1 @ convert to bytes, check sign 1710 bmi common_backwardBranch @ yes, do periodic checks 17111: 1712#if defined(WITH_JIT) 1713 GET_JIT_PROF_TABLE(r0) 1714 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1715 b common_testUpdateProfile 1716#else 1717 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1718 GET_INST_OPCODE(ip) @ extract opcode from rINST 1719 GOTO_OPCODE(ip) @ jump to next instruction 1720#endif 1721 1722 1723 1724/* ------------------------------ */ 1725 .balign 64 1726.L_OP_IF_EQZ: /* 0x38 */ 1727/* File: armv5te/OP_IF_EQZ.S */ 1728/* File: armv5te/zcmp.S */ 1729 /* 1730 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1731 * fragment that specifies the *reverse* comparison to perform, e.g. 1732 * for "if-le" you would use "gt". 1733 * 1734 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1735 */ 1736 /* if-cmp vAA, +BBBB */ 1737 mov r0, rINST, lsr #8 @ r0<- AA 1738 GET_VREG(r2, r0) @ r2<- vAA 1739 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1740 cmp r2, #0 @ compare (vA, 0) 1741 bne 1f @ branch to 1 if comparison failed 1742 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1743 movs r9, r9, asl #1 @ convert to bytes, check sign 1744 bmi common_backwardBranch @ backward branch, do periodic checks 17451: 1746#if defined(WITH_JIT) 1747 GET_JIT_PROF_TABLE(r0) 1748 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1749 cmp r0,#0 1750 bne common_updateProfile 1751 GET_INST_OPCODE(ip) @ extract opcode from rINST 1752 GOTO_OPCODE(ip) @ jump to next instruction 1753#else 1754 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1755 GET_INST_OPCODE(ip) @ extract opcode from rINST 1756 GOTO_OPCODE(ip) @ jump to next instruction 1757#endif 1758 1759 1760 1761/* ------------------------------ */ 1762 .balign 64 1763.L_OP_IF_NEZ: /* 0x39 */ 1764/* File: armv5te/OP_IF_NEZ.S */ 1765/* File: armv5te/zcmp.S */ 1766 /* 1767 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1768 * fragment that specifies the *reverse* comparison to perform, e.g. 1769 * for "if-le" you would use "gt". 1770 * 1771 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1772 */ 1773 /* if-cmp vAA, +BBBB */ 1774 mov r0, rINST, lsr #8 @ r0<- AA 1775 GET_VREG(r2, r0) @ r2<- vAA 1776 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1777 cmp r2, #0 @ compare (vA, 0) 1778 beq 1f @ branch to 1 if comparison failed 1779 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1780 movs r9, r9, asl #1 @ convert to bytes, check sign 1781 bmi common_backwardBranch @ backward branch, do periodic checks 17821: 1783#if defined(WITH_JIT) 1784 GET_JIT_PROF_TABLE(r0) 1785 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1786 cmp r0,#0 1787 bne common_updateProfile 1788 GET_INST_OPCODE(ip) @ extract opcode from rINST 1789 GOTO_OPCODE(ip) @ jump to next instruction 1790#else 1791 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1792 GET_INST_OPCODE(ip) @ extract opcode from rINST 1793 GOTO_OPCODE(ip) @ jump to next instruction 1794#endif 1795 1796 1797 1798/* ------------------------------ */ 1799 .balign 64 1800.L_OP_IF_LTZ: /* 0x3a */ 1801/* File: armv5te/OP_IF_LTZ.S */ 1802/* File: armv5te/zcmp.S */ 1803 /* 1804 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1805 * fragment that specifies the *reverse* comparison to perform, e.g. 1806 * for "if-le" you would use "gt". 1807 * 1808 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1809 */ 1810 /* if-cmp vAA, +BBBB */ 1811 mov r0, rINST, lsr #8 @ r0<- AA 1812 GET_VREG(r2, r0) @ r2<- vAA 1813 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1814 cmp r2, #0 @ compare (vA, 0) 1815 bge 1f @ branch to 1 if comparison failed 1816 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1817 movs r9, r9, asl #1 @ convert to bytes, check sign 1818 bmi common_backwardBranch @ backward branch, do periodic checks 18191: 1820#if defined(WITH_JIT) 1821 GET_JIT_PROF_TABLE(r0) 1822 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1823 cmp r0,#0 1824 bne common_updateProfile 1825 GET_INST_OPCODE(ip) @ extract opcode from rINST 1826 GOTO_OPCODE(ip) @ jump to next instruction 1827#else 1828 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1829 GET_INST_OPCODE(ip) @ extract opcode from rINST 1830 GOTO_OPCODE(ip) @ jump to next instruction 1831#endif 1832 1833 1834 1835/* ------------------------------ */ 1836 .balign 64 1837.L_OP_IF_GEZ: /* 0x3b */ 1838/* File: armv5te/OP_IF_GEZ.S */ 1839/* File: armv5te/zcmp.S */ 1840 /* 1841 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1842 * fragment that specifies the *reverse* comparison to perform, e.g. 1843 * for "if-le" you would use "gt". 1844 * 1845 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1846 */ 1847 /* if-cmp vAA, +BBBB */ 1848 mov r0, rINST, lsr #8 @ r0<- AA 1849 GET_VREG(r2, r0) @ r2<- vAA 1850 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1851 cmp r2, #0 @ compare (vA, 0) 1852 blt 1f @ branch to 1 if comparison failed 1853 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1854 movs r9, r9, asl #1 @ convert to bytes, check sign 1855 bmi common_backwardBranch @ backward branch, do periodic checks 18561: 1857#if defined(WITH_JIT) 1858 GET_JIT_PROF_TABLE(r0) 1859 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1860 cmp r0,#0 1861 bne common_updateProfile 1862 GET_INST_OPCODE(ip) @ extract opcode from rINST 1863 GOTO_OPCODE(ip) @ jump to next instruction 1864#else 1865 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1866 GET_INST_OPCODE(ip) @ extract opcode from rINST 1867 GOTO_OPCODE(ip) @ jump to next instruction 1868#endif 1869 1870 1871 1872/* ------------------------------ */ 1873 .balign 64 1874.L_OP_IF_GTZ: /* 0x3c */ 1875/* File: armv5te/OP_IF_GTZ.S */ 1876/* File: armv5te/zcmp.S */ 1877 /* 1878 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1879 * fragment that specifies the *reverse* comparison to perform, e.g. 1880 * for "if-le" you would use "gt". 1881 * 1882 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1883 */ 1884 /* if-cmp vAA, +BBBB */ 1885 mov r0, rINST, lsr #8 @ r0<- AA 1886 GET_VREG(r2, r0) @ r2<- vAA 1887 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1888 cmp r2, #0 @ compare (vA, 0) 1889 ble 1f @ branch to 1 if comparison failed 1890 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1891 movs r9, r9, asl #1 @ convert to bytes, check sign 1892 bmi common_backwardBranch @ backward branch, do periodic checks 18931: 1894#if defined(WITH_JIT) 1895 GET_JIT_PROF_TABLE(r0) 1896 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1897 cmp r0,#0 1898 bne common_updateProfile 1899 GET_INST_OPCODE(ip) @ extract opcode from rINST 1900 GOTO_OPCODE(ip) @ jump to next instruction 1901#else 1902 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1903 GET_INST_OPCODE(ip) @ extract opcode from rINST 1904 GOTO_OPCODE(ip) @ jump to next instruction 1905#endif 1906 1907 1908 1909/* ------------------------------ */ 1910 .balign 64 1911.L_OP_IF_LEZ: /* 0x3d */ 1912/* File: armv5te/OP_IF_LEZ.S */ 1913/* File: armv5te/zcmp.S */ 1914 /* 1915 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1916 * fragment that specifies the *reverse* comparison to perform, e.g. 1917 * for "if-le" you would use "gt". 1918 * 1919 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1920 */ 1921 /* if-cmp vAA, +BBBB */ 1922 mov r0, rINST, lsr #8 @ r0<- AA 1923 GET_VREG(r2, r0) @ r2<- vAA 1924 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1925 cmp r2, #0 @ compare (vA, 0) 1926 bgt 1f @ branch to 1 if comparison failed 1927 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1928 movs r9, r9, asl #1 @ convert to bytes, check sign 1929 bmi common_backwardBranch @ backward branch, do periodic checks 19301: 1931#if defined(WITH_JIT) 1932 GET_JIT_PROF_TABLE(r0) 1933 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1934 cmp r0,#0 1935 bne common_updateProfile 1936 GET_INST_OPCODE(ip) @ extract opcode from rINST 1937 GOTO_OPCODE(ip) @ jump to next instruction 1938#else 1939 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1940 GET_INST_OPCODE(ip) @ extract opcode from rINST 1941 GOTO_OPCODE(ip) @ jump to next instruction 1942#endif 1943 1944 1945 1946/* ------------------------------ */ 1947 .balign 64 1948.L_OP_UNUSED_3E: /* 0x3e */ 1949/* File: armv5te/OP_UNUSED_3E.S */ 1950/* File: armv5te/unused.S */ 1951 bl common_abort 1952 1953 1954 1955/* ------------------------------ */ 1956 .balign 64 1957.L_OP_UNUSED_3F: /* 0x3f */ 1958/* File: armv5te/OP_UNUSED_3F.S */ 1959/* File: armv5te/unused.S */ 1960 bl common_abort 1961 1962 1963 1964/* ------------------------------ */ 1965 .balign 64 1966.L_OP_UNUSED_40: /* 0x40 */ 1967/* File: armv5te/OP_UNUSED_40.S */ 1968/* File: armv5te/unused.S */ 1969 bl common_abort 1970 1971 1972 1973/* ------------------------------ */ 1974 .balign 64 1975.L_OP_UNUSED_41: /* 0x41 */ 1976/* File: armv5te/OP_UNUSED_41.S */ 1977/* File: armv5te/unused.S */ 1978 bl common_abort 1979 1980 1981 1982/* ------------------------------ */ 1983 .balign 64 1984.L_OP_UNUSED_42: /* 0x42 */ 1985/* File: armv5te/OP_UNUSED_42.S */ 1986/* File: armv5te/unused.S */ 1987 bl common_abort 1988 1989 1990 1991/* ------------------------------ */ 1992 .balign 64 1993.L_OP_UNUSED_43: /* 0x43 */ 1994/* File: armv5te/OP_UNUSED_43.S */ 1995/* File: armv5te/unused.S */ 1996 bl common_abort 1997 1998 1999 2000/* ------------------------------ */ 2001 .balign 64 2002.L_OP_AGET: /* 0x44 */ 2003/* File: armv5te/OP_AGET.S */ 2004 /* 2005 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2006 * 2007 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2008 * instructions. We use a pair of FETCH_Bs instead. 2009 * 2010 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2011 */ 2012 /* op vAA, vBB, vCC */ 2013 FETCH_B(r2, 1, 0) @ r2<- BB 2014 mov r9, rINST, lsr #8 @ r9<- AA 2015 FETCH_B(r3, 1, 1) @ r3<- CC 2016 GET_VREG(r0, r2) @ r0<- vBB (array object) 2017 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2018 cmp r0, #0 @ null array object? 2019 beq common_errNullObject @ yes, bail 2020 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2021 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2022 cmp r1, r3 @ compare unsigned index, length 2023 bcs common_errArrayIndex @ index >= length, bail 2024 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2025 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2026 GET_INST_OPCODE(ip) @ extract opcode from rINST 2027 SET_VREG(r2, r9) @ vAA<- r2 2028 GOTO_OPCODE(ip) @ jump to next instruction 2029 2030 2031/* ------------------------------ */ 2032 .balign 64 2033.L_OP_AGET_WIDE: /* 0x45 */ 2034/* File: armv5te/OP_AGET_WIDE.S */ 2035 /* 2036 * Array get, 64 bits. vAA <- vBB[vCC]. 2037 * 2038 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2039 */ 2040 /* aget-wide vAA, vBB, vCC */ 2041 FETCH(r0, 1) @ r0<- CCBB 2042 mov r9, rINST, lsr #8 @ r9<- AA 2043 and r2, r0, #255 @ r2<- BB 2044 mov r3, r0, lsr #8 @ r3<- CC 2045 GET_VREG(r0, r2) @ r0<- vBB (array object) 2046 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2047 cmp r0, #0 @ null array object? 2048 beq common_errNullObject @ yes, bail 2049 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2050 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2051 cmp r1, r3 @ compare unsigned index, length 2052 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2053 b common_errArrayIndex @ index >= length, bail 2054 @ May want to swap the order of these two branches depending on how the 2055 @ branch prediction (if any) handles conditional forward branches vs. 2056 @ unconditional forward branches. 2057 2058/* ------------------------------ */ 2059 .balign 64 2060.L_OP_AGET_OBJECT: /* 0x46 */ 2061/* File: armv5te/OP_AGET_OBJECT.S */ 2062/* File: armv5te/OP_AGET.S */ 2063 /* 2064 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2065 * 2066 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2067 * instructions. We use a pair of FETCH_Bs instead. 2068 * 2069 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2070 */ 2071 /* op vAA, vBB, vCC */ 2072 FETCH_B(r2, 1, 0) @ r2<- BB 2073 mov r9, rINST, lsr #8 @ r9<- AA 2074 FETCH_B(r3, 1, 1) @ r3<- CC 2075 GET_VREG(r0, r2) @ r0<- vBB (array object) 2076 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2077 cmp r0, #0 @ null array object? 2078 beq common_errNullObject @ yes, bail 2079 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2080 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2081 cmp r1, r3 @ compare unsigned index, length 2082 bcs common_errArrayIndex @ index >= length, bail 2083 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2084 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2085 GET_INST_OPCODE(ip) @ extract opcode from rINST 2086 SET_VREG(r2, r9) @ vAA<- r2 2087 GOTO_OPCODE(ip) @ jump to next instruction 2088 2089 2090 2091/* ------------------------------ */ 2092 .balign 64 2093.L_OP_AGET_BOOLEAN: /* 0x47 */ 2094/* File: armv5te/OP_AGET_BOOLEAN.S */ 2095/* File: armv5te/OP_AGET.S */ 2096 /* 2097 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2098 * 2099 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2100 * instructions. We use a pair of FETCH_Bs instead. 2101 * 2102 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2103 */ 2104 /* op vAA, vBB, vCC */ 2105 FETCH_B(r2, 1, 0) @ r2<- BB 2106 mov r9, rINST, lsr #8 @ r9<- AA 2107 FETCH_B(r3, 1, 1) @ r3<- CC 2108 GET_VREG(r0, r2) @ r0<- vBB (array object) 2109 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2110 cmp r0, #0 @ null array object? 2111 beq common_errNullObject @ yes, bail 2112 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2113 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2114 cmp r1, r3 @ compare unsigned index, length 2115 bcs common_errArrayIndex @ index >= length, bail 2116 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2117 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2118 GET_INST_OPCODE(ip) @ extract opcode from rINST 2119 SET_VREG(r2, r9) @ vAA<- r2 2120 GOTO_OPCODE(ip) @ jump to next instruction 2121 2122 2123 2124/* ------------------------------ */ 2125 .balign 64 2126.L_OP_AGET_BYTE: /* 0x48 */ 2127/* File: armv5te/OP_AGET_BYTE.S */ 2128/* File: armv5te/OP_AGET.S */ 2129 /* 2130 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2131 * 2132 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2133 * instructions. We use a pair of FETCH_Bs instead. 2134 * 2135 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2136 */ 2137 /* op vAA, vBB, vCC */ 2138 FETCH_B(r2, 1, 0) @ r2<- BB 2139 mov r9, rINST, lsr #8 @ r9<- AA 2140 FETCH_B(r3, 1, 1) @ r3<- CC 2141 GET_VREG(r0, r2) @ r0<- vBB (array object) 2142 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2143 cmp r0, #0 @ null array object? 2144 beq common_errNullObject @ yes, bail 2145 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2146 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2147 cmp r1, r3 @ compare unsigned index, length 2148 bcs common_errArrayIndex @ index >= length, bail 2149 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2150 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2151 GET_INST_OPCODE(ip) @ extract opcode from rINST 2152 SET_VREG(r2, r9) @ vAA<- r2 2153 GOTO_OPCODE(ip) @ jump to next instruction 2154 2155 2156 2157/* ------------------------------ */ 2158 .balign 64 2159.L_OP_AGET_CHAR: /* 0x49 */ 2160/* File: armv5te/OP_AGET_CHAR.S */ 2161/* File: armv5te/OP_AGET.S */ 2162 /* 2163 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2164 * 2165 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2166 * instructions. We use a pair of FETCH_Bs instead. 2167 * 2168 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2169 */ 2170 /* op vAA, vBB, vCC */ 2171 FETCH_B(r2, 1, 0) @ r2<- BB 2172 mov r9, rINST, lsr #8 @ r9<- AA 2173 FETCH_B(r3, 1, 1) @ r3<- CC 2174 GET_VREG(r0, r2) @ r0<- vBB (array object) 2175 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2176 cmp r0, #0 @ null array object? 2177 beq common_errNullObject @ yes, bail 2178 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2179 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2180 cmp r1, r3 @ compare unsigned index, length 2181 bcs common_errArrayIndex @ index >= length, bail 2182 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2183 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2184 GET_INST_OPCODE(ip) @ extract opcode from rINST 2185 SET_VREG(r2, r9) @ vAA<- r2 2186 GOTO_OPCODE(ip) @ jump to next instruction 2187 2188 2189 2190/* ------------------------------ */ 2191 .balign 64 2192.L_OP_AGET_SHORT: /* 0x4a */ 2193/* File: armv5te/OP_AGET_SHORT.S */ 2194/* File: armv5te/OP_AGET.S */ 2195 /* 2196 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2197 * 2198 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2199 * instructions. We use a pair of FETCH_Bs instead. 2200 * 2201 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2202 */ 2203 /* op vAA, vBB, vCC */ 2204 FETCH_B(r2, 1, 0) @ r2<- BB 2205 mov r9, rINST, lsr #8 @ r9<- AA 2206 FETCH_B(r3, 1, 1) @ r3<- CC 2207 GET_VREG(r0, r2) @ r0<- vBB (array object) 2208 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2209 cmp r0, #0 @ null array object? 2210 beq common_errNullObject @ yes, bail 2211 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2212 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2213 cmp r1, r3 @ compare unsigned index, length 2214 bcs common_errArrayIndex @ index >= length, bail 2215 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2216 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2217 GET_INST_OPCODE(ip) @ extract opcode from rINST 2218 SET_VREG(r2, r9) @ vAA<- r2 2219 GOTO_OPCODE(ip) @ jump to next instruction 2220 2221 2222 2223/* ------------------------------ */ 2224 .balign 64 2225.L_OP_APUT: /* 0x4b */ 2226/* File: armv5te/OP_APUT.S */ 2227 /* 2228 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2229 * 2230 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2231 * instructions. We use a pair of FETCH_Bs instead. 2232 * 2233 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2234 */ 2235 /* op vAA, vBB, vCC */ 2236 FETCH_B(r2, 1, 0) @ r2<- BB 2237 mov r9, rINST, lsr #8 @ r9<- AA 2238 FETCH_B(r3, 1, 1) @ r3<- CC 2239 GET_VREG(r0, r2) @ r0<- vBB (array object) 2240 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2241 cmp r0, #0 @ null array object? 2242 beq common_errNullObject @ yes, bail 2243 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2244 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2245 cmp r1, r3 @ compare unsigned index, length 2246 bcs common_errArrayIndex @ index >= length, bail 2247 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2248 GET_VREG(r2, r9) @ r2<- vAA 2249 GET_INST_OPCODE(ip) @ extract opcode from rINST 2250 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2251 GOTO_OPCODE(ip) @ jump to next instruction 2252 2253 2254/* ------------------------------ */ 2255 .balign 64 2256.L_OP_APUT_WIDE: /* 0x4c */ 2257/* File: armv5te/OP_APUT_WIDE.S */ 2258 /* 2259 * Array put, 64 bits. vBB[vCC] <- vAA. 2260 * 2261 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2262 */ 2263 /* aput-wide vAA, vBB, vCC */ 2264 FETCH(r0, 1) @ r0<- CCBB 2265 mov r9, rINST, lsr #8 @ r9<- AA 2266 and r2, r0, #255 @ r2<- BB 2267 mov r3, r0, lsr #8 @ r3<- CC 2268 GET_VREG(r0, r2) @ r0<- vBB (array object) 2269 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2270 cmp r0, #0 @ null array object? 2271 beq common_errNullObject @ yes, bail 2272 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2273 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2274 cmp r1, r3 @ compare unsigned index, length 2275 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2276 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2277 b common_errArrayIndex @ index >= length, bail 2278 @ May want to swap the order of these two branches depending on how the 2279 @ branch prediction (if any) handles conditional forward branches vs. 2280 @ unconditional forward branches. 2281 2282/* ------------------------------ */ 2283 .balign 64 2284.L_OP_APUT_OBJECT: /* 0x4d */ 2285/* File: armv5te/OP_APUT_OBJECT.S */ 2286 /* 2287 * Store an object into an array. vBB[vCC] <- vAA. 2288 * 2289 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2290 * instructions. We use a pair of FETCH_Bs instead. 2291 */ 2292 /* op vAA, vBB, vCC */ 2293 FETCH(r0, 1) @ r0<- CCBB 2294 mov r9, rINST, lsr #8 @ r9<- AA 2295 and r2, r0, #255 @ r2<- BB 2296 mov r3, r0, lsr #8 @ r3<- CC 2297 GET_VREG(r1, r2) @ r1<- vBB (array object) 2298 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2299 cmp r1, #0 @ null array object? 2300 GET_VREG(r9, r9) @ r9<- vAA 2301 beq common_errNullObject @ yes, bail 2302 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2303 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2304 cmp r0, r3 @ compare unsigned index, length 2305 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2306 b common_errArrayIndex @ index >= length, bail 2307 2308 2309/* ------------------------------ */ 2310 .balign 64 2311.L_OP_APUT_BOOLEAN: /* 0x4e */ 2312/* File: armv5te/OP_APUT_BOOLEAN.S */ 2313/* File: armv5te/OP_APUT.S */ 2314 /* 2315 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2316 * 2317 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2318 * instructions. We use a pair of FETCH_Bs instead. 2319 * 2320 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2321 */ 2322 /* op vAA, vBB, vCC */ 2323 FETCH_B(r2, 1, 0) @ r2<- BB 2324 mov r9, rINST, lsr #8 @ r9<- AA 2325 FETCH_B(r3, 1, 1) @ r3<- CC 2326 GET_VREG(r0, r2) @ r0<- vBB (array object) 2327 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2328 cmp r0, #0 @ null array object? 2329 beq common_errNullObject @ yes, bail 2330 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2331 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2332 cmp r1, r3 @ compare unsigned index, length 2333 bcs common_errArrayIndex @ index >= length, bail 2334 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2335 GET_VREG(r2, r9) @ r2<- vAA 2336 GET_INST_OPCODE(ip) @ extract opcode from rINST 2337 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2338 GOTO_OPCODE(ip) @ jump to next instruction 2339 2340 2341 2342/* ------------------------------ */ 2343 .balign 64 2344.L_OP_APUT_BYTE: /* 0x4f */ 2345/* File: armv5te/OP_APUT_BYTE.S */ 2346/* File: armv5te/OP_APUT.S */ 2347 /* 2348 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2349 * 2350 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2351 * instructions. We use a pair of FETCH_Bs instead. 2352 * 2353 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2354 */ 2355 /* op vAA, vBB, vCC */ 2356 FETCH_B(r2, 1, 0) @ r2<- BB 2357 mov r9, rINST, lsr #8 @ r9<- AA 2358 FETCH_B(r3, 1, 1) @ r3<- CC 2359 GET_VREG(r0, r2) @ r0<- vBB (array object) 2360 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2361 cmp r0, #0 @ null array object? 2362 beq common_errNullObject @ yes, bail 2363 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2364 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2365 cmp r1, r3 @ compare unsigned index, length 2366 bcs common_errArrayIndex @ index >= length, bail 2367 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2368 GET_VREG(r2, r9) @ r2<- vAA 2369 GET_INST_OPCODE(ip) @ extract opcode from rINST 2370 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2371 GOTO_OPCODE(ip) @ jump to next instruction 2372 2373 2374 2375/* ------------------------------ */ 2376 .balign 64 2377.L_OP_APUT_CHAR: /* 0x50 */ 2378/* File: armv5te/OP_APUT_CHAR.S */ 2379/* File: armv5te/OP_APUT.S */ 2380 /* 2381 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2382 * 2383 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2384 * instructions. We use a pair of FETCH_Bs instead. 2385 * 2386 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2387 */ 2388 /* op vAA, vBB, vCC */ 2389 FETCH_B(r2, 1, 0) @ r2<- BB 2390 mov r9, rINST, lsr #8 @ r9<- AA 2391 FETCH_B(r3, 1, 1) @ r3<- CC 2392 GET_VREG(r0, r2) @ r0<- vBB (array object) 2393 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2394 cmp r0, #0 @ null array object? 2395 beq common_errNullObject @ yes, bail 2396 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2397 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2398 cmp r1, r3 @ compare unsigned index, length 2399 bcs common_errArrayIndex @ index >= length, bail 2400 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2401 GET_VREG(r2, r9) @ r2<- vAA 2402 GET_INST_OPCODE(ip) @ extract opcode from rINST 2403 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2404 GOTO_OPCODE(ip) @ jump to next instruction 2405 2406 2407 2408/* ------------------------------ */ 2409 .balign 64 2410.L_OP_APUT_SHORT: /* 0x51 */ 2411/* File: armv5te/OP_APUT_SHORT.S */ 2412/* File: armv5te/OP_APUT.S */ 2413 /* 2414 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2415 * 2416 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2417 * instructions. We use a pair of FETCH_Bs instead. 2418 * 2419 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2420 */ 2421 /* op vAA, vBB, vCC */ 2422 FETCH_B(r2, 1, 0) @ r2<- BB 2423 mov r9, rINST, lsr #8 @ r9<- AA 2424 FETCH_B(r3, 1, 1) @ r3<- CC 2425 GET_VREG(r0, r2) @ r0<- vBB (array object) 2426 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2427 cmp r0, #0 @ null array object? 2428 beq common_errNullObject @ yes, bail 2429 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2430 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2431 cmp r1, r3 @ compare unsigned index, length 2432 bcs common_errArrayIndex @ index >= length, bail 2433 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2434 GET_VREG(r2, r9) @ r2<- vAA 2435 GET_INST_OPCODE(ip) @ extract opcode from rINST 2436 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2437 GOTO_OPCODE(ip) @ jump to next instruction 2438 2439 2440 2441/* ------------------------------ */ 2442 .balign 64 2443.L_OP_IGET: /* 0x52 */ 2444/* File: armv5te/OP_IGET.S */ 2445 /* 2446 * General 32-bit instance field get. 2447 * 2448 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2449 */ 2450 /* op vA, vB, field@CCCC */ 2451 mov r0, rINST, lsr #12 @ r0<- B 2452 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2453 FETCH(r1, 1) @ r1<- field ref CCCC 2454 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2455 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2456 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2457 cmp r0, #0 @ is resolved entry null? 2458 bne .LOP_IGET_finish @ no, already resolved 24598: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2460 EXPORT_PC() @ resolve() could throw 2461 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2462 bl dvmResolveInstField @ r0<- resolved InstField ptr 2463 cmp r0, #0 2464 bne .LOP_IGET_finish 2465 b common_exceptionThrown 2466 2467/* ------------------------------ */ 2468 .balign 64 2469.L_OP_IGET_WIDE: /* 0x53 */ 2470/* File: armv5te/OP_IGET_WIDE.S */ 2471 /* 2472 * Wide 32-bit instance field get. 2473 */ 2474 /* iget-wide vA, vB, field@CCCC */ 2475 mov r0, rINST, lsr #12 @ r0<- B 2476 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2477 FETCH(r1, 1) @ r1<- field ref CCCC 2478 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2479 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2480 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2481 cmp r0, #0 @ is resolved entry null? 2482 bne .LOP_IGET_WIDE_finish @ no, already resolved 24838: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2484 EXPORT_PC() @ resolve() could throw 2485 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2486 bl dvmResolveInstField @ r0<- resolved InstField ptr 2487 cmp r0, #0 2488 bne .LOP_IGET_WIDE_finish 2489 b common_exceptionThrown 2490 2491/* ------------------------------ */ 2492 .balign 64 2493.L_OP_IGET_OBJECT: /* 0x54 */ 2494/* File: armv5te/OP_IGET_OBJECT.S */ 2495/* File: armv5te/OP_IGET.S */ 2496 /* 2497 * General 32-bit instance field get. 2498 * 2499 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2500 */ 2501 /* op vA, vB, field@CCCC */ 2502 mov r0, rINST, lsr #12 @ r0<- B 2503 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2504 FETCH(r1, 1) @ r1<- field ref CCCC 2505 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2506 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2507 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2508 cmp r0, #0 @ is resolved entry null? 2509 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25108: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2511 EXPORT_PC() @ resolve() could throw 2512 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2513 bl dvmResolveInstField @ r0<- resolved InstField ptr 2514 cmp r0, #0 2515 bne .LOP_IGET_OBJECT_finish 2516 b common_exceptionThrown 2517 2518 2519/* ------------------------------ */ 2520 .balign 64 2521.L_OP_IGET_BOOLEAN: /* 0x55 */ 2522/* File: armv5te/OP_IGET_BOOLEAN.S */ 2523@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2524/* File: armv5te/OP_IGET.S */ 2525 /* 2526 * General 32-bit instance field get. 2527 * 2528 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2529 */ 2530 /* op vA, vB, field@CCCC */ 2531 mov r0, rINST, lsr #12 @ r0<- B 2532 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2533 FETCH(r1, 1) @ r1<- field ref CCCC 2534 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2535 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2536 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2537 cmp r0, #0 @ is resolved entry null? 2538 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25398: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2540 EXPORT_PC() @ resolve() could throw 2541 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2542 bl dvmResolveInstField @ r0<- resolved InstField ptr 2543 cmp r0, #0 2544 bne .LOP_IGET_BOOLEAN_finish 2545 b common_exceptionThrown 2546 2547 2548/* ------------------------------ */ 2549 .balign 64 2550.L_OP_IGET_BYTE: /* 0x56 */ 2551/* File: armv5te/OP_IGET_BYTE.S */ 2552@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2553/* File: armv5te/OP_IGET.S */ 2554 /* 2555 * General 32-bit instance field get. 2556 * 2557 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2558 */ 2559 /* op vA, vB, field@CCCC */ 2560 mov r0, rINST, lsr #12 @ r0<- B 2561 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2562 FETCH(r1, 1) @ r1<- field ref CCCC 2563 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2564 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2565 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2566 cmp r0, #0 @ is resolved entry null? 2567 bne .LOP_IGET_BYTE_finish @ no, already resolved 25688: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2569 EXPORT_PC() @ resolve() could throw 2570 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2571 bl dvmResolveInstField @ r0<- resolved InstField ptr 2572 cmp r0, #0 2573 bne .LOP_IGET_BYTE_finish 2574 b common_exceptionThrown 2575 2576 2577/* ------------------------------ */ 2578 .balign 64 2579.L_OP_IGET_CHAR: /* 0x57 */ 2580/* File: armv5te/OP_IGET_CHAR.S */ 2581@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2582/* File: armv5te/OP_IGET.S */ 2583 /* 2584 * General 32-bit instance field get. 2585 * 2586 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2587 */ 2588 /* op vA, vB, field@CCCC */ 2589 mov r0, rINST, lsr #12 @ r0<- B 2590 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2591 FETCH(r1, 1) @ r1<- field ref CCCC 2592 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2593 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2594 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2595 cmp r0, #0 @ is resolved entry null? 2596 bne .LOP_IGET_CHAR_finish @ no, already resolved 25978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2598 EXPORT_PC() @ resolve() could throw 2599 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2600 bl dvmResolveInstField @ r0<- resolved InstField ptr 2601 cmp r0, #0 2602 bne .LOP_IGET_CHAR_finish 2603 b common_exceptionThrown 2604 2605 2606/* ------------------------------ */ 2607 .balign 64 2608.L_OP_IGET_SHORT: /* 0x58 */ 2609/* File: armv5te/OP_IGET_SHORT.S */ 2610@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2611/* File: armv5te/OP_IGET.S */ 2612 /* 2613 * General 32-bit instance field get. 2614 * 2615 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2616 */ 2617 /* op vA, vB, field@CCCC */ 2618 mov r0, rINST, lsr #12 @ r0<- B 2619 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2620 FETCH(r1, 1) @ r1<- field ref CCCC 2621 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2622 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2623 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2624 cmp r0, #0 @ is resolved entry null? 2625 bne .LOP_IGET_SHORT_finish @ no, already resolved 26268: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2627 EXPORT_PC() @ resolve() could throw 2628 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2629 bl dvmResolveInstField @ r0<- resolved InstField ptr 2630 cmp r0, #0 2631 bne .LOP_IGET_SHORT_finish 2632 b common_exceptionThrown 2633 2634 2635/* ------------------------------ */ 2636 .balign 64 2637.L_OP_IPUT: /* 0x59 */ 2638/* File: armv5te/OP_IPUT.S */ 2639 /* 2640 * General 32-bit instance field put. 2641 * 2642 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2643 */ 2644 /* op vA, vB, field@CCCC */ 2645 mov r0, rINST, lsr #12 @ r0<- B 2646 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2647 FETCH(r1, 1) @ r1<- field ref CCCC 2648 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2649 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2650 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2651 cmp r0, #0 @ is resolved entry null? 2652 bne .LOP_IPUT_finish @ no, already resolved 26538: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2654 EXPORT_PC() @ resolve() could throw 2655 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2656 bl dvmResolveInstField @ r0<- resolved InstField ptr 2657 cmp r0, #0 @ success? 2658 bne .LOP_IPUT_finish @ yes, finish up 2659 b common_exceptionThrown 2660 2661/* ------------------------------ */ 2662 .balign 64 2663.L_OP_IPUT_WIDE: /* 0x5a */ 2664/* File: armv5te/OP_IPUT_WIDE.S */ 2665 /* iput-wide vA, vB, field@CCCC */ 2666 mov r0, rINST, lsr #12 @ r0<- B 2667 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2668 FETCH(r1, 1) @ r1<- field ref CCCC 2669 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2670 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2671 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2672 cmp r0, #0 @ is resolved entry null? 2673 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26748: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2675 EXPORT_PC() @ resolve() could throw 2676 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2677 bl dvmResolveInstField @ r0<- resolved InstField ptr 2678 cmp r0, #0 @ success? 2679 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2680 b common_exceptionThrown 2681 2682/* ------------------------------ */ 2683 .balign 64 2684.L_OP_IPUT_OBJECT: /* 0x5b */ 2685/* File: armv5te/OP_IPUT_OBJECT.S */ 2686/* File: armv5te/OP_IPUT.S */ 2687 /* 2688 * General 32-bit instance field put. 2689 * 2690 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2691 */ 2692 /* op vA, vB, field@CCCC */ 2693 mov r0, rINST, lsr #12 @ r0<- B 2694 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2695 FETCH(r1, 1) @ r1<- field ref CCCC 2696 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2697 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2698 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2699 cmp r0, #0 @ is resolved entry null? 2700 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 27018: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2702 EXPORT_PC() @ resolve() could throw 2703 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2704 bl dvmResolveInstField @ r0<- resolved InstField ptr 2705 cmp r0, #0 @ success? 2706 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2707 b common_exceptionThrown 2708 2709 2710/* ------------------------------ */ 2711 .balign 64 2712.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2713/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2714@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2715/* File: armv5te/OP_IPUT.S */ 2716 /* 2717 * General 32-bit instance field put. 2718 * 2719 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2720 */ 2721 /* op vA, vB, field@CCCC */ 2722 mov r0, rINST, lsr #12 @ r0<- B 2723 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2724 FETCH(r1, 1) @ r1<- field ref CCCC 2725 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2726 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2727 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2728 cmp r0, #0 @ is resolved entry null? 2729 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27308: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2731 EXPORT_PC() @ resolve() could throw 2732 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2733 bl dvmResolveInstField @ r0<- resolved InstField ptr 2734 cmp r0, #0 @ success? 2735 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2736 b common_exceptionThrown 2737 2738 2739/* ------------------------------ */ 2740 .balign 64 2741.L_OP_IPUT_BYTE: /* 0x5d */ 2742/* File: armv5te/OP_IPUT_BYTE.S */ 2743@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2744/* File: armv5te/OP_IPUT.S */ 2745 /* 2746 * General 32-bit instance field put. 2747 * 2748 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2749 */ 2750 /* op vA, vB, field@CCCC */ 2751 mov r0, rINST, lsr #12 @ r0<- B 2752 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2753 FETCH(r1, 1) @ r1<- field ref CCCC 2754 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2755 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2756 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2757 cmp r0, #0 @ is resolved entry null? 2758 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27598: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2760 EXPORT_PC() @ resolve() could throw 2761 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2762 bl dvmResolveInstField @ r0<- resolved InstField ptr 2763 cmp r0, #0 @ success? 2764 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2765 b common_exceptionThrown 2766 2767 2768/* ------------------------------ */ 2769 .balign 64 2770.L_OP_IPUT_CHAR: /* 0x5e */ 2771/* File: armv5te/OP_IPUT_CHAR.S */ 2772@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2773/* File: armv5te/OP_IPUT.S */ 2774 /* 2775 * General 32-bit instance field put. 2776 * 2777 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2778 */ 2779 /* op vA, vB, field@CCCC */ 2780 mov r0, rINST, lsr #12 @ r0<- B 2781 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2782 FETCH(r1, 1) @ r1<- field ref CCCC 2783 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2784 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2785 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2786 cmp r0, #0 @ is resolved entry null? 2787 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27888: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2789 EXPORT_PC() @ resolve() could throw 2790 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2791 bl dvmResolveInstField @ r0<- resolved InstField ptr 2792 cmp r0, #0 @ success? 2793 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2794 b common_exceptionThrown 2795 2796 2797/* ------------------------------ */ 2798 .balign 64 2799.L_OP_IPUT_SHORT: /* 0x5f */ 2800/* File: armv5te/OP_IPUT_SHORT.S */ 2801@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2802/* File: armv5te/OP_IPUT.S */ 2803 /* 2804 * General 32-bit instance field put. 2805 * 2806 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2807 */ 2808 /* op vA, vB, field@CCCC */ 2809 mov r0, rINST, lsr #12 @ r0<- B 2810 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2811 FETCH(r1, 1) @ r1<- field ref CCCC 2812 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2813 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2814 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2815 cmp r0, #0 @ is resolved entry null? 2816 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28178: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2818 EXPORT_PC() @ resolve() could throw 2819 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2820 bl dvmResolveInstField @ r0<- resolved InstField ptr 2821 cmp r0, #0 @ success? 2822 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2823 b common_exceptionThrown 2824 2825 2826/* ------------------------------ */ 2827 .balign 64 2828.L_OP_SGET: /* 0x60 */ 2829/* File: armv5te/OP_SGET.S */ 2830 /* 2831 * General 32-bit SGET handler. 2832 * 2833 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2834 */ 2835 /* op vAA, field@BBBB */ 2836 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2837 FETCH(r1, 1) @ r1<- field ref BBBB 2838 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2839 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2840 cmp r0, #0 @ is resolved entry null? 2841 beq .LOP_SGET_resolve @ yes, do resolve 2842.LOP_SGET_finish: @ field ptr in r0 2843 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2844 mov r2, rINST, lsr #8 @ r2<- AA 2845 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2846 SET_VREG(r1, r2) @ fp[AA]<- r1 2847 GET_INST_OPCODE(ip) @ extract opcode from rINST 2848 GOTO_OPCODE(ip) @ jump to next instruction 2849 2850/* ------------------------------ */ 2851 .balign 64 2852.L_OP_SGET_WIDE: /* 0x61 */ 2853/* File: armv5te/OP_SGET_WIDE.S */ 2854 /* 2855 * 64-bit SGET handler. 2856 */ 2857 /* sget-wide vAA, field@BBBB */ 2858 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2859 FETCH(r1, 1) @ r1<- field ref BBBB 2860 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2861 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2862 cmp r0, #0 @ is resolved entry null? 2863 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2864.LOP_SGET_WIDE_finish: 2865 mov r1, rINST, lsr #8 @ r1<- AA 2866 ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned) 2867 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2868 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2869 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2870 GET_INST_OPCODE(ip) @ extract opcode from rINST 2871 GOTO_OPCODE(ip) @ jump to next instruction 2872 2873/* ------------------------------ */ 2874 .balign 64 2875.L_OP_SGET_OBJECT: /* 0x62 */ 2876/* File: armv5te/OP_SGET_OBJECT.S */ 2877/* File: armv5te/OP_SGET.S */ 2878 /* 2879 * General 32-bit SGET handler. 2880 * 2881 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2882 */ 2883 /* op vAA, field@BBBB */ 2884 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2885 FETCH(r1, 1) @ r1<- field ref BBBB 2886 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2887 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2888 cmp r0, #0 @ is resolved entry null? 2889 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2890.LOP_SGET_OBJECT_finish: @ field ptr in r0 2891 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2892 mov r2, rINST, lsr #8 @ r2<- AA 2893 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2894 SET_VREG(r1, r2) @ fp[AA]<- r1 2895 GET_INST_OPCODE(ip) @ extract opcode from rINST 2896 GOTO_OPCODE(ip) @ jump to next instruction 2897 2898 2899/* ------------------------------ */ 2900 .balign 64 2901.L_OP_SGET_BOOLEAN: /* 0x63 */ 2902/* File: armv5te/OP_SGET_BOOLEAN.S */ 2903/* File: armv5te/OP_SGET.S */ 2904 /* 2905 * General 32-bit SGET handler. 2906 * 2907 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2908 */ 2909 /* op vAA, field@BBBB */ 2910 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2911 FETCH(r1, 1) @ r1<- field ref BBBB 2912 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2913 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2914 cmp r0, #0 @ is resolved entry null? 2915 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2916.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2917 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2918 mov r2, rINST, lsr #8 @ r2<- AA 2919 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2920 SET_VREG(r1, r2) @ fp[AA]<- r1 2921 GET_INST_OPCODE(ip) @ extract opcode from rINST 2922 GOTO_OPCODE(ip) @ jump to next instruction 2923 2924 2925/* ------------------------------ */ 2926 .balign 64 2927.L_OP_SGET_BYTE: /* 0x64 */ 2928/* File: armv5te/OP_SGET_BYTE.S */ 2929/* File: armv5te/OP_SGET.S */ 2930 /* 2931 * General 32-bit SGET handler. 2932 * 2933 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2934 */ 2935 /* op vAA, field@BBBB */ 2936 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2937 FETCH(r1, 1) @ r1<- field ref BBBB 2938 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2939 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2940 cmp r0, #0 @ is resolved entry null? 2941 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2942.LOP_SGET_BYTE_finish: @ field ptr in r0 2943 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2944 mov r2, rINST, lsr #8 @ r2<- AA 2945 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2946 SET_VREG(r1, r2) @ fp[AA]<- r1 2947 GET_INST_OPCODE(ip) @ extract opcode from rINST 2948 GOTO_OPCODE(ip) @ jump to next instruction 2949 2950 2951/* ------------------------------ */ 2952 .balign 64 2953.L_OP_SGET_CHAR: /* 0x65 */ 2954/* File: armv5te/OP_SGET_CHAR.S */ 2955/* File: armv5te/OP_SGET.S */ 2956 /* 2957 * General 32-bit SGET handler. 2958 * 2959 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2960 */ 2961 /* op vAA, field@BBBB */ 2962 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2963 FETCH(r1, 1) @ r1<- field ref BBBB 2964 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2965 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2966 cmp r0, #0 @ is resolved entry null? 2967 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2968.LOP_SGET_CHAR_finish: @ field ptr in r0 2969 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2970 mov r2, rINST, lsr #8 @ r2<- AA 2971 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2972 SET_VREG(r1, r2) @ fp[AA]<- r1 2973 GET_INST_OPCODE(ip) @ extract opcode from rINST 2974 GOTO_OPCODE(ip) @ jump to next instruction 2975 2976 2977/* ------------------------------ */ 2978 .balign 64 2979.L_OP_SGET_SHORT: /* 0x66 */ 2980/* File: armv5te/OP_SGET_SHORT.S */ 2981/* File: armv5te/OP_SGET.S */ 2982 /* 2983 * General 32-bit SGET handler. 2984 * 2985 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2986 */ 2987 /* op vAA, field@BBBB */ 2988 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2989 FETCH(r1, 1) @ r1<- field ref BBBB 2990 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2991 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2992 cmp r0, #0 @ is resolved entry null? 2993 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2994.LOP_SGET_SHORT_finish: @ field ptr in r0 2995 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2996 mov r2, rINST, lsr #8 @ r2<- AA 2997 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2998 SET_VREG(r1, r2) @ fp[AA]<- r1 2999 GET_INST_OPCODE(ip) @ extract opcode from rINST 3000 GOTO_OPCODE(ip) @ jump to next instruction 3001 3002 3003/* ------------------------------ */ 3004 .balign 64 3005.L_OP_SPUT: /* 0x67 */ 3006/* File: armv5te/OP_SPUT.S */ 3007 /* 3008 * General 32-bit SPUT handler. 3009 * 3010 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3011 */ 3012 /* op vAA, field@BBBB */ 3013 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3014 FETCH(r1, 1) @ r1<- field ref BBBB 3015 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3016 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3017 cmp r0, #0 @ is resolved entry null? 3018 beq .LOP_SPUT_resolve @ yes, do resolve 3019.LOP_SPUT_finish: @ field ptr in r0 3020 mov r2, rINST, lsr #8 @ r2<- AA 3021 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3022 GET_VREG(r1, r2) @ r1<- fp[AA] 3023 GET_INST_OPCODE(ip) @ extract opcode from rINST 3024 str r1, [r0, #offStaticField_value] @ field<- vAA 3025 GOTO_OPCODE(ip) @ jump to next instruction 3026 3027/* ------------------------------ */ 3028 .balign 64 3029.L_OP_SPUT_WIDE: /* 0x68 */ 3030/* File: armv5te/OP_SPUT_WIDE.S */ 3031 /* 3032 * 64-bit SPUT handler. 3033 */ 3034 /* sput-wide vAA, field@BBBB */ 3035 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3036 FETCH(r1, 1) @ r1<- field ref BBBB 3037 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3038 mov r9, rINST, lsr #8 @ r9<- AA 3039 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3040 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3041 cmp r0, #0 @ is resolved entry null? 3042 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3043.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3044 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3045 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3046 GET_INST_OPCODE(ip) @ extract opcode from rINST 3047 strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1 3048 GOTO_OPCODE(ip) @ jump to next instruction 3049 3050/* ------------------------------ */ 3051 .balign 64 3052.L_OP_SPUT_OBJECT: /* 0x69 */ 3053/* File: armv5te/OP_SPUT_OBJECT.S */ 3054/* File: armv5te/OP_SPUT.S */ 3055 /* 3056 * General 32-bit SPUT handler. 3057 * 3058 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3059 */ 3060 /* op vAA, field@BBBB */ 3061 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3062 FETCH(r1, 1) @ r1<- field ref BBBB 3063 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3064 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3065 cmp r0, #0 @ is resolved entry null? 3066 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3067.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3068 mov r2, rINST, lsr #8 @ r2<- AA 3069 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3070 GET_VREG(r1, r2) @ r1<- fp[AA] 3071 GET_INST_OPCODE(ip) @ extract opcode from rINST 3072 str r1, [r0, #offStaticField_value] @ field<- vAA 3073 GOTO_OPCODE(ip) @ jump to next instruction 3074 3075 3076/* ------------------------------ */ 3077 .balign 64 3078.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3079/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3080/* File: armv5te/OP_SPUT.S */ 3081 /* 3082 * General 32-bit SPUT handler. 3083 * 3084 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3085 */ 3086 /* op vAA, field@BBBB */ 3087 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3088 FETCH(r1, 1) @ r1<- field ref BBBB 3089 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3090 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3091 cmp r0, #0 @ is resolved entry null? 3092 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3093.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3094 mov r2, rINST, lsr #8 @ r2<- AA 3095 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3096 GET_VREG(r1, r2) @ r1<- fp[AA] 3097 GET_INST_OPCODE(ip) @ extract opcode from rINST 3098 str r1, [r0, #offStaticField_value] @ field<- vAA 3099 GOTO_OPCODE(ip) @ jump to next instruction 3100 3101 3102/* ------------------------------ */ 3103 .balign 64 3104.L_OP_SPUT_BYTE: /* 0x6b */ 3105/* File: armv5te/OP_SPUT_BYTE.S */ 3106/* File: armv5te/OP_SPUT.S */ 3107 /* 3108 * General 32-bit SPUT handler. 3109 * 3110 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3111 */ 3112 /* op vAA, field@BBBB */ 3113 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3114 FETCH(r1, 1) @ r1<- field ref BBBB 3115 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3116 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3117 cmp r0, #0 @ is resolved entry null? 3118 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3119.LOP_SPUT_BYTE_finish: @ field ptr in r0 3120 mov r2, rINST, lsr #8 @ r2<- AA 3121 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3122 GET_VREG(r1, r2) @ r1<- fp[AA] 3123 GET_INST_OPCODE(ip) @ extract opcode from rINST 3124 str r1, [r0, #offStaticField_value] @ field<- vAA 3125 GOTO_OPCODE(ip) @ jump to next instruction 3126 3127 3128/* ------------------------------ */ 3129 .balign 64 3130.L_OP_SPUT_CHAR: /* 0x6c */ 3131/* File: armv5te/OP_SPUT_CHAR.S */ 3132/* File: armv5te/OP_SPUT.S */ 3133 /* 3134 * General 32-bit SPUT handler. 3135 * 3136 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3137 */ 3138 /* op vAA, field@BBBB */ 3139 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3140 FETCH(r1, 1) @ r1<- field ref BBBB 3141 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3142 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3143 cmp r0, #0 @ is resolved entry null? 3144 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3145.LOP_SPUT_CHAR_finish: @ field ptr in r0 3146 mov r2, rINST, lsr #8 @ r2<- AA 3147 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3148 GET_VREG(r1, r2) @ r1<- fp[AA] 3149 GET_INST_OPCODE(ip) @ extract opcode from rINST 3150 str r1, [r0, #offStaticField_value] @ field<- vAA 3151 GOTO_OPCODE(ip) @ jump to next instruction 3152 3153 3154/* ------------------------------ */ 3155 .balign 64 3156.L_OP_SPUT_SHORT: /* 0x6d */ 3157/* File: armv5te/OP_SPUT_SHORT.S */ 3158/* File: armv5te/OP_SPUT.S */ 3159 /* 3160 * General 32-bit SPUT handler. 3161 * 3162 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3163 */ 3164 /* op vAA, field@BBBB */ 3165 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3166 FETCH(r1, 1) @ r1<- field ref BBBB 3167 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3168 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3169 cmp r0, #0 @ is resolved entry null? 3170 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3171.LOP_SPUT_SHORT_finish: @ field ptr in r0 3172 mov r2, rINST, lsr #8 @ r2<- AA 3173 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3174 GET_VREG(r1, r2) @ r1<- fp[AA] 3175 GET_INST_OPCODE(ip) @ extract opcode from rINST 3176 str r1, [r0, #offStaticField_value] @ field<- vAA 3177 GOTO_OPCODE(ip) @ jump to next instruction 3178 3179 3180/* ------------------------------ */ 3181 .balign 64 3182.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3183/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3184 /* 3185 * Handle a virtual method call. 3186 * 3187 * for: invoke-virtual, invoke-virtual/range 3188 */ 3189 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3190 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3191 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3192 FETCH(r1, 1) @ r1<- BBBB 3193 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3194 FETCH(r10, 2) @ r10<- GFED or CCCC 3195 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3196 .if (!0) 3197 and r10, r10, #15 @ r10<- D (or stays CCCC) 3198 .endif 3199 cmp r0, #0 @ already resolved? 3200 EXPORT_PC() @ must export for invoke 3201 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3202 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3203 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3204 mov r2, #METHOD_VIRTUAL @ resolver method type 3205 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3206 cmp r0, #0 @ got null? 3207 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3208 b common_exceptionThrown @ yes, handle exception 3209 3210/* ------------------------------ */ 3211 .balign 64 3212.L_OP_INVOKE_SUPER: /* 0x6f */ 3213/* File: armv5te/OP_INVOKE_SUPER.S */ 3214 /* 3215 * Handle a "super" method call. 3216 * 3217 * for: invoke-super, invoke-super/range 3218 */ 3219 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3220 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3221 FETCH(r10, 2) @ r10<- GFED or CCCC 3222 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3223 .if (!0) 3224 and r10, r10, #15 @ r10<- D (or stays CCCC) 3225 .endif 3226 FETCH(r1, 1) @ r1<- BBBB 3227 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3228 GET_VREG(r2, r10) @ r2<- "this" ptr 3229 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3230 cmp r2, #0 @ null "this"? 3231 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3232 beq common_errNullObject @ null "this", throw exception 3233 cmp r0, #0 @ already resolved? 3234 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3235 EXPORT_PC() @ must export for invoke 3236 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3237 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3238 3239/* ------------------------------ */ 3240 .balign 64 3241.L_OP_INVOKE_DIRECT: /* 0x70 */ 3242/* File: armv5te/OP_INVOKE_DIRECT.S */ 3243 /* 3244 * Handle a direct method call. 3245 * 3246 * (We could defer the "is 'this' pointer null" test to the common 3247 * method invocation code, and use a flag to indicate that static 3248 * calls don't count. If we do this as part of copying the arguments 3249 * out we could avoiding loading the first arg twice.) 3250 * 3251 * for: invoke-direct, invoke-direct/range 3252 */ 3253 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3254 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3255 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3256 FETCH(r1, 1) @ r1<- BBBB 3257 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3258 FETCH(r10, 2) @ r10<- GFED or CCCC 3259 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3260 .if (!0) 3261 and r10, r10, #15 @ r10<- D (or stays CCCC) 3262 .endif 3263 cmp r0, #0 @ already resolved? 3264 EXPORT_PC() @ must export for invoke 3265 GET_VREG(r2, r10) @ r2<- "this" ptr 3266 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3267.LOP_INVOKE_DIRECT_finish: 3268 cmp r2, #0 @ null "this" ref? 3269 bne common_invokeMethodNoRange @ no, continue on 3270 b common_errNullObject @ yes, throw exception 3271 3272/* ------------------------------ */ 3273 .balign 64 3274.L_OP_INVOKE_STATIC: /* 0x71 */ 3275/* File: armv5te/OP_INVOKE_STATIC.S */ 3276 /* 3277 * Handle a static method call. 3278 * 3279 * for: invoke-static, invoke-static/range 3280 */ 3281 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3282 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3283 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3284 FETCH(r1, 1) @ r1<- BBBB 3285 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3286 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3287 cmp r0, #0 @ already resolved? 3288 EXPORT_PC() @ must export for invoke 3289 bne common_invokeMethodNoRange @ yes, continue on 32900: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3291 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3292 mov r2, #METHOD_STATIC @ resolver method type 3293 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3294 cmp r0, #0 @ got null? 3295 bne common_invokeMethodNoRange @ no, continue 3296 b common_exceptionThrown @ yes, handle exception 3297 3298 3299/* ------------------------------ */ 3300 .balign 64 3301.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3302/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3303 /* 3304 * Handle an interface method call. 3305 * 3306 * for: invoke-interface, invoke-interface/range 3307 */ 3308 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3309 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3310 FETCH(r2, 2) @ r2<- FEDC or CCCC 3311 FETCH(r1, 1) @ r1<- BBBB 3312 .if (!0) 3313 and r2, r2, #15 @ r2<- C (or stays CCCC) 3314 .endif 3315 EXPORT_PC() @ must export for invoke 3316 GET_VREG(r0, r2) @ r0<- first arg ("this") 3317 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3318 cmp r0, #0 @ null obj? 3319 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3320 beq common_errNullObject @ yes, fail 3321 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3322 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3323 cmp r0, #0 @ failed? 3324 beq common_exceptionThrown @ yes, handle exception 3325 b common_invokeMethodNoRange @ jump to common handler 3326 3327 3328/* ------------------------------ */ 3329 .balign 64 3330.L_OP_UNUSED_73: /* 0x73 */ 3331/* File: armv5te/OP_UNUSED_73.S */ 3332/* File: armv5te/unused.S */ 3333 bl common_abort 3334 3335 3336 3337/* ------------------------------ */ 3338 .balign 64 3339.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3340/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3341/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3342 /* 3343 * Handle a virtual method call. 3344 * 3345 * for: invoke-virtual, invoke-virtual/range 3346 */ 3347 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3348 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3349 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3350 FETCH(r1, 1) @ r1<- BBBB 3351 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3352 FETCH(r10, 2) @ r10<- GFED or CCCC 3353 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3354 .if (!1) 3355 and r10, r10, #15 @ r10<- D (or stays CCCC) 3356 .endif 3357 cmp r0, #0 @ already resolved? 3358 EXPORT_PC() @ must export for invoke 3359 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3360 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3361 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3362 mov r2, #METHOD_VIRTUAL @ resolver method type 3363 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3364 cmp r0, #0 @ got null? 3365 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3366 b common_exceptionThrown @ yes, handle exception 3367 3368 3369/* ------------------------------ */ 3370 .balign 64 3371.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3372/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3373/* File: armv5te/OP_INVOKE_SUPER.S */ 3374 /* 3375 * Handle a "super" method call. 3376 * 3377 * for: invoke-super, invoke-super/range 3378 */ 3379 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3380 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3381 FETCH(r10, 2) @ r10<- GFED or CCCC 3382 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3383 .if (!1) 3384 and r10, r10, #15 @ r10<- D (or stays CCCC) 3385 .endif 3386 FETCH(r1, 1) @ r1<- BBBB 3387 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3388 GET_VREG(r2, r10) @ r2<- "this" ptr 3389 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3390 cmp r2, #0 @ null "this"? 3391 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3392 beq common_errNullObject @ null "this", throw exception 3393 cmp r0, #0 @ already resolved? 3394 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3395 EXPORT_PC() @ must export for invoke 3396 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3397 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3398 3399 3400/* ------------------------------ */ 3401 .balign 64 3402.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3403/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3404/* File: armv5te/OP_INVOKE_DIRECT.S */ 3405 /* 3406 * Handle a direct method call. 3407 * 3408 * (We could defer the "is 'this' pointer null" test to the common 3409 * method invocation code, and use a flag to indicate that static 3410 * calls don't count. If we do this as part of copying the arguments 3411 * out we could avoiding loading the first arg twice.) 3412 * 3413 * for: invoke-direct, invoke-direct/range 3414 */ 3415 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3416 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3417 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3418 FETCH(r1, 1) @ r1<- BBBB 3419 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3420 FETCH(r10, 2) @ r10<- GFED or CCCC 3421 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3422 .if (!1) 3423 and r10, r10, #15 @ r10<- D (or stays CCCC) 3424 .endif 3425 cmp r0, #0 @ already resolved? 3426 EXPORT_PC() @ must export for invoke 3427 GET_VREG(r2, r10) @ r2<- "this" ptr 3428 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3429.LOP_INVOKE_DIRECT_RANGE_finish: 3430 cmp r2, #0 @ null "this" ref? 3431 bne common_invokeMethodRange @ no, continue on 3432 b common_errNullObject @ yes, throw exception 3433 3434 3435/* ------------------------------ */ 3436 .balign 64 3437.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3438/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3439/* File: armv5te/OP_INVOKE_STATIC.S */ 3440 /* 3441 * Handle a static method call. 3442 * 3443 * for: invoke-static, invoke-static/range 3444 */ 3445 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3446 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3447 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3448 FETCH(r1, 1) @ r1<- BBBB 3449 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3450 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3451 cmp r0, #0 @ already resolved? 3452 EXPORT_PC() @ must export for invoke 3453 bne common_invokeMethodRange @ yes, continue on 34540: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3455 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3456 mov r2, #METHOD_STATIC @ resolver method type 3457 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3458 cmp r0, #0 @ got null? 3459 bne common_invokeMethodRange @ no, continue 3460 b common_exceptionThrown @ yes, handle exception 3461 3462 3463 3464/* ------------------------------ */ 3465 .balign 64 3466.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3467/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3468/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3469 /* 3470 * Handle an interface method call. 3471 * 3472 * for: invoke-interface, invoke-interface/range 3473 */ 3474 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3475 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3476 FETCH(r2, 2) @ r2<- FEDC or CCCC 3477 FETCH(r1, 1) @ r1<- BBBB 3478 .if (!1) 3479 and r2, r2, #15 @ r2<- C (or stays CCCC) 3480 .endif 3481 EXPORT_PC() @ must export for invoke 3482 GET_VREG(r0, r2) @ r0<- first arg ("this") 3483 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3484 cmp r0, #0 @ null obj? 3485 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3486 beq common_errNullObject @ yes, fail 3487 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3488 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3489 cmp r0, #0 @ failed? 3490 beq common_exceptionThrown @ yes, handle exception 3491 b common_invokeMethodRange @ jump to common handler 3492 3493 3494 3495/* ------------------------------ */ 3496 .balign 64 3497.L_OP_UNUSED_79: /* 0x79 */ 3498/* File: armv5te/OP_UNUSED_79.S */ 3499/* File: armv5te/unused.S */ 3500 bl common_abort 3501 3502 3503 3504/* ------------------------------ */ 3505 .balign 64 3506.L_OP_UNUSED_7A: /* 0x7a */ 3507/* File: armv5te/OP_UNUSED_7A.S */ 3508/* File: armv5te/unused.S */ 3509 bl common_abort 3510 3511 3512 3513/* ------------------------------ */ 3514 .balign 64 3515.L_OP_NEG_INT: /* 0x7b */ 3516/* File: armv5te/OP_NEG_INT.S */ 3517/* File: armv5te/unop.S */ 3518 /* 3519 * Generic 32-bit unary operation. Provide an "instr" line that 3520 * specifies an instruction that performs "result = op r0". 3521 * This could be an ARM instruction or a function call. 3522 * 3523 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3524 * int-to-byte, int-to-char, int-to-short 3525 */ 3526 /* unop vA, vB */ 3527 mov r3, rINST, lsr #12 @ r3<- B 3528 mov r9, rINST, lsr #8 @ r9<- A+ 3529 GET_VREG(r0, r3) @ r0<- vB 3530 and r9, r9, #15 3531 @ optional op; may set condition codes 3532 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3533 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3534 GET_INST_OPCODE(ip) @ extract opcode from rINST 3535 SET_VREG(r0, r9) @ vAA<- r0 3536 GOTO_OPCODE(ip) @ jump to next instruction 3537 /* 9-10 instructions */ 3538 3539 3540/* ------------------------------ */ 3541 .balign 64 3542.L_OP_NOT_INT: /* 0x7c */ 3543/* File: armv5te/OP_NOT_INT.S */ 3544/* File: armv5te/unop.S */ 3545 /* 3546 * Generic 32-bit unary operation. Provide an "instr" line that 3547 * specifies an instruction that performs "result = op r0". 3548 * This could be an ARM instruction or a function call. 3549 * 3550 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3551 * int-to-byte, int-to-char, int-to-short 3552 */ 3553 /* unop vA, vB */ 3554 mov r3, rINST, lsr #12 @ r3<- B 3555 mov r9, rINST, lsr #8 @ r9<- A+ 3556 GET_VREG(r0, r3) @ r0<- vB 3557 and r9, r9, #15 3558 @ optional op; may set condition codes 3559 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3560 mvn r0, r0 @ r0<- op, r0-r3 changed 3561 GET_INST_OPCODE(ip) @ extract opcode from rINST 3562 SET_VREG(r0, r9) @ vAA<- r0 3563 GOTO_OPCODE(ip) @ jump to next instruction 3564 /* 9-10 instructions */ 3565 3566 3567/* ------------------------------ */ 3568 .balign 64 3569.L_OP_NEG_LONG: /* 0x7d */ 3570/* File: armv5te/OP_NEG_LONG.S */ 3571/* File: armv5te/unopWide.S */ 3572 /* 3573 * Generic 64-bit unary operation. Provide an "instr" line that 3574 * specifies an instruction that performs "result = op r0/r1". 3575 * This could be an ARM instruction or a function call. 3576 * 3577 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3578 */ 3579 /* unop vA, vB */ 3580 mov r9, rINST, lsr #8 @ r9<- A+ 3581 mov r3, rINST, lsr #12 @ r3<- B 3582 and r9, r9, #15 3583 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3584 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3585 ldmia r3, {r0-r1} @ r0/r1<- vAA 3586 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3587 rsbs r0, r0, #0 @ optional op; may set condition codes 3588 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3589 GET_INST_OPCODE(ip) @ extract opcode from rINST 3590 stmia r9, {r0-r1} @ vAA<- r0/r1 3591 GOTO_OPCODE(ip) @ jump to next instruction 3592 /* 12-13 instructions */ 3593 3594 3595 3596/* ------------------------------ */ 3597 .balign 64 3598.L_OP_NOT_LONG: /* 0x7e */ 3599/* File: armv5te/OP_NOT_LONG.S */ 3600/* File: armv5te/unopWide.S */ 3601 /* 3602 * Generic 64-bit unary operation. Provide an "instr" line that 3603 * specifies an instruction that performs "result = op r0/r1". 3604 * This could be an ARM instruction or a function call. 3605 * 3606 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3607 */ 3608 /* unop vA, vB */ 3609 mov r9, rINST, lsr #8 @ r9<- A+ 3610 mov r3, rINST, lsr #12 @ r3<- B 3611 and r9, r9, #15 3612 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3613 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3614 ldmia r3, {r0-r1} @ r0/r1<- vAA 3615 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3616 mvn r0, r0 @ optional op; may set condition codes 3617 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3618 GET_INST_OPCODE(ip) @ extract opcode from rINST 3619 stmia r9, {r0-r1} @ vAA<- r0/r1 3620 GOTO_OPCODE(ip) @ jump to next instruction 3621 /* 12-13 instructions */ 3622 3623 3624 3625/* ------------------------------ */ 3626 .balign 64 3627.L_OP_NEG_FLOAT: /* 0x7f */ 3628/* File: armv5te/OP_NEG_FLOAT.S */ 3629/* File: armv5te/unop.S */ 3630 /* 3631 * Generic 32-bit unary operation. Provide an "instr" line that 3632 * specifies an instruction that performs "result = op r0". 3633 * This could be an ARM instruction or a function call. 3634 * 3635 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3636 * int-to-byte, int-to-char, int-to-short 3637 */ 3638 /* unop vA, vB */ 3639 mov r3, rINST, lsr #12 @ r3<- B 3640 mov r9, rINST, lsr #8 @ r9<- A+ 3641 GET_VREG(r0, r3) @ r0<- vB 3642 and r9, r9, #15 3643 @ optional op; may set condition codes 3644 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3645 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3646 GET_INST_OPCODE(ip) @ extract opcode from rINST 3647 SET_VREG(r0, r9) @ vAA<- r0 3648 GOTO_OPCODE(ip) @ jump to next instruction 3649 /* 9-10 instructions */ 3650 3651 3652/* ------------------------------ */ 3653 .balign 64 3654.L_OP_NEG_DOUBLE: /* 0x80 */ 3655/* File: armv5te/OP_NEG_DOUBLE.S */ 3656/* File: armv5te/unopWide.S */ 3657 /* 3658 * Generic 64-bit unary operation. Provide an "instr" line that 3659 * specifies an instruction that performs "result = op r0/r1". 3660 * This could be an ARM instruction or a function call. 3661 * 3662 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3663 */ 3664 /* unop vA, vB */ 3665 mov r9, rINST, lsr #8 @ r9<- A+ 3666 mov r3, rINST, lsr #12 @ r3<- B 3667 and r9, r9, #15 3668 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3669 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3670 ldmia r3, {r0-r1} @ r0/r1<- vAA 3671 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3672 @ optional op; may set condition codes 3673 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3674 GET_INST_OPCODE(ip) @ extract opcode from rINST 3675 stmia r9, {r0-r1} @ vAA<- r0/r1 3676 GOTO_OPCODE(ip) @ jump to next instruction 3677 /* 12-13 instructions */ 3678 3679 3680 3681/* ------------------------------ */ 3682 .balign 64 3683.L_OP_INT_TO_LONG: /* 0x81 */ 3684/* File: armv5te/OP_INT_TO_LONG.S */ 3685/* File: armv5te/unopWider.S */ 3686 /* 3687 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3688 * that specifies an instruction that performs "result = op r0", where 3689 * "result" is a 64-bit quantity in r0/r1. 3690 * 3691 * For: int-to-long, int-to-double, float-to-long, float-to-double 3692 */ 3693 /* unop vA, vB */ 3694 mov r9, rINST, lsr #8 @ r9<- A+ 3695 mov r3, rINST, lsr #12 @ r3<- B 3696 and r9, r9, #15 3697 GET_VREG(r0, r3) @ r0<- vB 3698 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3699 @ optional op; may set condition codes 3700 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3701 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3702 GET_INST_OPCODE(ip) @ extract opcode from rINST 3703 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3704 GOTO_OPCODE(ip) @ jump to next instruction 3705 /* 10-11 instructions */ 3706 3707 3708/* ------------------------------ */ 3709 .balign 64 3710.L_OP_INT_TO_FLOAT: /* 0x82 */ 3711/* File: vfp/OP_INT_TO_FLOAT.S */ 3712/* File: vfp/funop.S */ 3713 /* 3714 * Generic 32-bit unary floating-point operation. Provide an "instr" 3715 * line that specifies an instruction that performs "s1 = op s0". 3716 * 3717 * for: int-to-float, float-to-int 3718 */ 3719 /* unop vA, vB */ 3720 mov r3, rINST, lsr #12 @ r3<- B 3721 mov r9, rINST, lsr #8 @ r9<- A+ 3722 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3723 flds s0, [r3] @ s0<- vB 3724 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3725 and r9, r9, #15 @ r9<- A 3726 fsitos s1, s0 @ s1<- op 3727 GET_INST_OPCODE(ip) @ extract opcode from rINST 3728 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3729 fsts s1, [r9] @ vA<- s1 3730 GOTO_OPCODE(ip) @ jump to next instruction 3731 3732 3733/* ------------------------------ */ 3734 .balign 64 3735.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3736/* File: vfp/OP_INT_TO_DOUBLE.S */ 3737/* File: vfp/funopWider.S */ 3738 /* 3739 * Generic 32bit-to-64bit floating point unary operation. Provide an 3740 * "instr" line that specifies an instruction that performs "d0 = op s0". 3741 * 3742 * For: int-to-double, float-to-double 3743 */ 3744 /* unop vA, vB */ 3745 mov r3, rINST, lsr #12 @ r3<- B 3746 mov r9, rINST, lsr #8 @ r9<- A+ 3747 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3748 flds s0, [r3] @ s0<- vB 3749 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3750 and r9, r9, #15 @ r9<- A 3751 fsitod d0, s0 @ d0<- op 3752 GET_INST_OPCODE(ip) @ extract opcode from rINST 3753 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3754 fstd d0, [r9] @ vA<- d0 3755 GOTO_OPCODE(ip) @ jump to next instruction 3756 3757 3758/* ------------------------------ */ 3759 .balign 64 3760.L_OP_LONG_TO_INT: /* 0x84 */ 3761/* File: armv5te/OP_LONG_TO_INT.S */ 3762/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3763/* File: armv5te/OP_MOVE.S */ 3764 /* for move, move-object, long-to-int */ 3765 /* op vA, vB */ 3766 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3767 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3768 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3769 GET_VREG(r2, r1) @ r2<- fp[B] 3770 and r0, r0, #15 3771 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3772 SET_VREG(r2, r0) @ fp[A]<- r2 3773 GOTO_OPCODE(ip) @ execute next instruction 3774 3775 3776 3777/* ------------------------------ */ 3778 .balign 64 3779.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3780/* File: armv5te/OP_LONG_TO_FLOAT.S */ 3781/* File: armv5te/unopNarrower.S */ 3782 /* 3783 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3784 * that specifies an instruction that performs "result = op r0/r1", where 3785 * "result" is a 32-bit quantity in r0. 3786 * 3787 * For: long-to-float, double-to-int, double-to-float 3788 * 3789 * (This would work for long-to-int, but that instruction is actually 3790 * an exact match for OP_MOVE.) 3791 */ 3792 /* unop vA, vB */ 3793 mov r3, rINST, lsr #12 @ r3<- B 3794 mov r9, rINST, lsr #8 @ r9<- A+ 3795 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3796 and r9, r9, #15 3797 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3798 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3799 @ optional op; may set condition codes 3800 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3801 GET_INST_OPCODE(ip) @ extract opcode from rINST 3802 SET_VREG(r0, r9) @ vA<- r0 3803 GOTO_OPCODE(ip) @ jump to next instruction 3804 /* 10-11 instructions */ 3805 3806 3807/* ------------------------------ */ 3808 .balign 64 3809.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3810/* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3811/* File: armv5te/unopWide.S */ 3812 /* 3813 * Generic 64-bit unary operation. Provide an "instr" line that 3814 * specifies an instruction that performs "result = op r0/r1". 3815 * This could be an ARM instruction or a function call. 3816 * 3817 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3818 */ 3819 /* unop vA, vB */ 3820 mov r9, rINST, lsr #8 @ r9<- A+ 3821 mov r3, rINST, lsr #12 @ r3<- B 3822 and r9, r9, #15 3823 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3824 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3825 ldmia r3, {r0-r1} @ r0/r1<- vAA 3826 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3827 @ optional op; may set condition codes 3828 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3829 GET_INST_OPCODE(ip) @ extract opcode from rINST 3830 stmia r9, {r0-r1} @ vAA<- r0/r1 3831 GOTO_OPCODE(ip) @ jump to next instruction 3832 /* 12-13 instructions */ 3833 3834 3835 3836/* ------------------------------ */ 3837 .balign 64 3838.L_OP_FLOAT_TO_INT: /* 0x87 */ 3839/* File: vfp/OP_FLOAT_TO_INT.S */ 3840/* File: vfp/funop.S */ 3841 /* 3842 * Generic 32-bit unary floating-point operation. Provide an "instr" 3843 * line that specifies an instruction that performs "s1 = op s0". 3844 * 3845 * for: int-to-float, float-to-int 3846 */ 3847 /* unop vA, vB */ 3848 mov r3, rINST, lsr #12 @ r3<- B 3849 mov r9, rINST, lsr #8 @ r9<- A+ 3850 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3851 flds s0, [r3] @ s0<- vB 3852 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3853 and r9, r9, #15 @ r9<- A 3854 ftosizs s1, s0 @ s1<- op 3855 GET_INST_OPCODE(ip) @ extract opcode from rINST 3856 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3857 fsts s1, [r9] @ vA<- s1 3858 GOTO_OPCODE(ip) @ jump to next instruction 3859 3860 3861/* ------------------------------ */ 3862 .balign 64 3863.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3864/* File: armv5te/OP_FLOAT_TO_LONG.S */ 3865@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3866/* File: armv5te/unopWider.S */ 3867 /* 3868 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3869 * that specifies an instruction that performs "result = op r0", where 3870 * "result" is a 64-bit quantity in r0/r1. 3871 * 3872 * For: int-to-long, int-to-double, float-to-long, float-to-double 3873 */ 3874 /* unop vA, vB */ 3875 mov r9, rINST, lsr #8 @ r9<- A+ 3876 mov r3, rINST, lsr #12 @ r3<- B 3877 and r9, r9, #15 3878 GET_VREG(r0, r3) @ r0<- vB 3879 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3880 @ optional op; may set condition codes 3881 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3882 bl f2l_doconv @ r0<- op, r0-r3 changed 3883 GET_INST_OPCODE(ip) @ extract opcode from rINST 3884 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3885 GOTO_OPCODE(ip) @ jump to next instruction 3886 /* 10-11 instructions */ 3887 3888 3889 3890/* ------------------------------ */ 3891 .balign 64 3892.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3893/* File: vfp/OP_FLOAT_TO_DOUBLE.S */ 3894/* File: vfp/funopWider.S */ 3895 /* 3896 * Generic 32bit-to-64bit floating point unary operation. Provide an 3897 * "instr" line that specifies an instruction that performs "d0 = op s0". 3898 * 3899 * For: int-to-double, float-to-double 3900 */ 3901 /* unop vA, vB */ 3902 mov r3, rINST, lsr #12 @ r3<- B 3903 mov r9, rINST, lsr #8 @ r9<- A+ 3904 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3905 flds s0, [r3] @ s0<- vB 3906 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3907 and r9, r9, #15 @ r9<- A 3908 fcvtds d0, s0 @ d0<- op 3909 GET_INST_OPCODE(ip) @ extract opcode from rINST 3910 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3911 fstd d0, [r9] @ vA<- d0 3912 GOTO_OPCODE(ip) @ jump to next instruction 3913 3914 3915/* ------------------------------ */ 3916 .balign 64 3917.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3918/* File: vfp/OP_DOUBLE_TO_INT.S */ 3919/* File: vfp/funopNarrower.S */ 3920 /* 3921 * Generic 64bit-to-32bit unary floating point operation. Provide an 3922 * "instr" line that specifies an instruction that performs "s0 = op d0". 3923 * 3924 * For: double-to-int, double-to-float 3925 */ 3926 /* unop vA, vB */ 3927 mov r3, rINST, lsr #12 @ r3<- B 3928 mov r9, rINST, lsr #8 @ r9<- A+ 3929 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3930 fldd d0, [r3] @ d0<- vB 3931 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3932 and r9, r9, #15 @ r9<- A 3933 ftosizd s0, d0 @ s0<- op 3934 GET_INST_OPCODE(ip) @ extract opcode from rINST 3935 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3936 fsts s0, [r9] @ vA<- s0 3937 GOTO_OPCODE(ip) @ jump to next instruction 3938 3939 3940/* ------------------------------ */ 3941 .balign 64 3942.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3943/* File: armv5te/OP_DOUBLE_TO_LONG.S */ 3944@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3945/* File: armv5te/unopWide.S */ 3946 /* 3947 * Generic 64-bit unary operation. Provide an "instr" line that 3948 * specifies an instruction that performs "result = op r0/r1". 3949 * This could be an ARM instruction or a function call. 3950 * 3951 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3952 */ 3953 /* unop vA, vB */ 3954 mov r9, rINST, lsr #8 @ r9<- A+ 3955 mov r3, rINST, lsr #12 @ r3<- B 3956 and r9, r9, #15 3957 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3958 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3959 ldmia r3, {r0-r1} @ r0/r1<- vAA 3960 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3961 @ optional op; may set condition codes 3962 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3963 GET_INST_OPCODE(ip) @ extract opcode from rINST 3964 stmia r9, {r0-r1} @ vAA<- r0/r1 3965 GOTO_OPCODE(ip) @ jump to next instruction 3966 /* 12-13 instructions */ 3967 3968 3969 3970 3971/* ------------------------------ */ 3972 .balign 64 3973.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3974/* File: vfp/OP_DOUBLE_TO_FLOAT.S */ 3975/* File: vfp/funopNarrower.S */ 3976 /* 3977 * Generic 64bit-to-32bit unary floating point operation. Provide an 3978 * "instr" line that specifies an instruction that performs "s0 = op d0". 3979 * 3980 * For: double-to-int, double-to-float 3981 */ 3982 /* unop vA, vB */ 3983 mov r3, rINST, lsr #12 @ r3<- B 3984 mov r9, rINST, lsr #8 @ r9<- A+ 3985 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3986 fldd d0, [r3] @ d0<- vB 3987 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3988 and r9, r9, #15 @ r9<- A 3989 fcvtsd s0, d0 @ s0<- op 3990 GET_INST_OPCODE(ip) @ extract opcode from rINST 3991 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3992 fsts s0, [r9] @ vA<- s0 3993 GOTO_OPCODE(ip) @ jump to next instruction 3994 3995 3996/* ------------------------------ */ 3997 .balign 64 3998.L_OP_INT_TO_BYTE: /* 0x8d */ 3999/* File: armv5te/OP_INT_TO_BYTE.S */ 4000/* File: armv5te/unop.S */ 4001 /* 4002 * Generic 32-bit unary operation. Provide an "instr" line that 4003 * specifies an instruction that performs "result = op r0". 4004 * This could be an ARM instruction or a function call. 4005 * 4006 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4007 * int-to-byte, int-to-char, int-to-short 4008 */ 4009 /* unop vA, vB */ 4010 mov r3, rINST, lsr #12 @ r3<- B 4011 mov r9, rINST, lsr #8 @ r9<- A+ 4012 GET_VREG(r0, r3) @ r0<- vB 4013 and r9, r9, #15 4014 mov r0, r0, asl #24 @ optional op; may set condition codes 4015 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4016 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4017 GET_INST_OPCODE(ip) @ extract opcode from rINST 4018 SET_VREG(r0, r9) @ vAA<- r0 4019 GOTO_OPCODE(ip) @ jump to next instruction 4020 /* 9-10 instructions */ 4021 4022 4023/* ------------------------------ */ 4024 .balign 64 4025.L_OP_INT_TO_CHAR: /* 0x8e */ 4026/* File: armv5te/OP_INT_TO_CHAR.S */ 4027/* File: armv5te/unop.S */ 4028 /* 4029 * Generic 32-bit unary operation. Provide an "instr" line that 4030 * specifies an instruction that performs "result = op r0". 4031 * This could be an ARM instruction or a function call. 4032 * 4033 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4034 * int-to-byte, int-to-char, int-to-short 4035 */ 4036 /* unop vA, vB */ 4037 mov r3, rINST, lsr #12 @ r3<- B 4038 mov r9, rINST, lsr #8 @ r9<- A+ 4039 GET_VREG(r0, r3) @ r0<- vB 4040 and r9, r9, #15 4041 mov r0, r0, asl #16 @ optional op; may set condition codes 4042 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4043 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4044 GET_INST_OPCODE(ip) @ extract opcode from rINST 4045 SET_VREG(r0, r9) @ vAA<- r0 4046 GOTO_OPCODE(ip) @ jump to next instruction 4047 /* 9-10 instructions */ 4048 4049 4050/* ------------------------------ */ 4051 .balign 64 4052.L_OP_INT_TO_SHORT: /* 0x8f */ 4053/* File: armv5te/OP_INT_TO_SHORT.S */ 4054/* File: armv5te/unop.S */ 4055 /* 4056 * Generic 32-bit unary operation. Provide an "instr" line that 4057 * specifies an instruction that performs "result = op r0". 4058 * This could be an ARM instruction or a function call. 4059 * 4060 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4061 * int-to-byte, int-to-char, int-to-short 4062 */ 4063 /* unop vA, vB */ 4064 mov r3, rINST, lsr #12 @ r3<- B 4065 mov r9, rINST, lsr #8 @ r9<- A+ 4066 GET_VREG(r0, r3) @ r0<- vB 4067 and r9, r9, #15 4068 mov r0, r0, asl #16 @ optional op; may set condition codes 4069 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4070 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4071 GET_INST_OPCODE(ip) @ extract opcode from rINST 4072 SET_VREG(r0, r9) @ vAA<- r0 4073 GOTO_OPCODE(ip) @ jump to next instruction 4074 /* 9-10 instructions */ 4075 4076 4077/* ------------------------------ */ 4078 .balign 64 4079.L_OP_ADD_INT: /* 0x90 */ 4080/* File: armv5te/OP_ADD_INT.S */ 4081/* File: armv5te/binop.S */ 4082 /* 4083 * Generic 32-bit binary operation. Provide an "instr" line that 4084 * specifies an instruction that performs "result = r0 op r1". 4085 * This could be an ARM instruction or a function call. (If the result 4086 * comes back in a register other than r0, you can override "result".) 4087 * 4088 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4089 * vCC (r1). Useful for integer division and modulus. Note that we 4090 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4091 * handles it correctly. 4092 * 4093 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4094 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4095 * mul-float, div-float, rem-float 4096 */ 4097 /* binop vAA, vBB, vCC */ 4098 FETCH(r0, 1) @ r0<- CCBB 4099 mov r9, rINST, lsr #8 @ r9<- AA 4100 mov r3, r0, lsr #8 @ r3<- CC 4101 and r2, r0, #255 @ r2<- BB 4102 GET_VREG(r1, r3) @ r1<- vCC 4103 GET_VREG(r0, r2) @ r0<- vBB 4104 .if 0 4105 cmp r1, #0 @ is second operand zero? 4106 beq common_errDivideByZero 4107 .endif 4108 4109 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4110 @ optional op; may set condition codes 4111 add r0, r0, r1 @ r0<- op, r0-r3 changed 4112 GET_INST_OPCODE(ip) @ extract opcode from rINST 4113 SET_VREG(r0, r9) @ vAA<- r0 4114 GOTO_OPCODE(ip) @ jump to next instruction 4115 /* 11-14 instructions */ 4116 4117 4118 4119/* ------------------------------ */ 4120 .balign 64 4121.L_OP_SUB_INT: /* 0x91 */ 4122/* File: armv5te/OP_SUB_INT.S */ 4123/* File: armv5te/binop.S */ 4124 /* 4125 * Generic 32-bit binary operation. Provide an "instr" line that 4126 * specifies an instruction that performs "result = r0 op r1". 4127 * This could be an ARM instruction or a function call. (If the result 4128 * comes back in a register other than r0, you can override "result".) 4129 * 4130 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4131 * vCC (r1). Useful for integer division and modulus. Note that we 4132 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4133 * handles it correctly. 4134 * 4135 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4136 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4137 * mul-float, div-float, rem-float 4138 */ 4139 /* binop vAA, vBB, vCC */ 4140 FETCH(r0, 1) @ r0<- CCBB 4141 mov r9, rINST, lsr #8 @ r9<- AA 4142 mov r3, r0, lsr #8 @ r3<- CC 4143 and r2, r0, #255 @ r2<- BB 4144 GET_VREG(r1, r3) @ r1<- vCC 4145 GET_VREG(r0, r2) @ r0<- vBB 4146 .if 0 4147 cmp r1, #0 @ is second operand zero? 4148 beq common_errDivideByZero 4149 .endif 4150 4151 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4152 @ optional op; may set condition codes 4153 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4154 GET_INST_OPCODE(ip) @ extract opcode from rINST 4155 SET_VREG(r0, r9) @ vAA<- r0 4156 GOTO_OPCODE(ip) @ jump to next instruction 4157 /* 11-14 instructions */ 4158 4159 4160 4161/* ------------------------------ */ 4162 .balign 64 4163.L_OP_MUL_INT: /* 0x92 */ 4164/* File: armv5te/OP_MUL_INT.S */ 4165/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4166/* File: armv5te/binop.S */ 4167 /* 4168 * Generic 32-bit binary operation. Provide an "instr" line that 4169 * specifies an instruction that performs "result = r0 op r1". 4170 * This could be an ARM instruction or a function call. (If the result 4171 * comes back in a register other than r0, you can override "result".) 4172 * 4173 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4174 * vCC (r1). Useful for integer division and modulus. Note that we 4175 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4176 * handles it correctly. 4177 * 4178 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4179 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4180 * mul-float, div-float, rem-float 4181 */ 4182 /* binop vAA, vBB, vCC */ 4183 FETCH(r0, 1) @ r0<- CCBB 4184 mov r9, rINST, lsr #8 @ r9<- AA 4185 mov r3, r0, lsr #8 @ r3<- CC 4186 and r2, r0, #255 @ r2<- BB 4187 GET_VREG(r1, r3) @ r1<- vCC 4188 GET_VREG(r0, r2) @ r0<- vBB 4189 .if 0 4190 cmp r1, #0 @ is second operand zero? 4191 beq common_errDivideByZero 4192 .endif 4193 4194 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4195 @ optional op; may set condition codes 4196 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4197 GET_INST_OPCODE(ip) @ extract opcode from rINST 4198 SET_VREG(r0, r9) @ vAA<- r0 4199 GOTO_OPCODE(ip) @ jump to next instruction 4200 /* 11-14 instructions */ 4201 4202 4203 4204/* ------------------------------ */ 4205 .balign 64 4206.L_OP_DIV_INT: /* 0x93 */ 4207/* File: armv5te/OP_DIV_INT.S */ 4208/* File: armv5te/binop.S */ 4209 /* 4210 * Generic 32-bit binary operation. Provide an "instr" line that 4211 * specifies an instruction that performs "result = r0 op r1". 4212 * This could be an ARM instruction or a function call. (If the result 4213 * comes back in a register other than r0, you can override "result".) 4214 * 4215 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4216 * vCC (r1). Useful for integer division and modulus. Note that we 4217 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4218 * handles it correctly. 4219 * 4220 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4221 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4222 * mul-float, div-float, rem-float 4223 */ 4224 /* binop vAA, vBB, vCC */ 4225 FETCH(r0, 1) @ r0<- CCBB 4226 mov r9, rINST, lsr #8 @ r9<- AA 4227 mov r3, r0, lsr #8 @ r3<- CC 4228 and r2, r0, #255 @ r2<- BB 4229 GET_VREG(r1, r3) @ r1<- vCC 4230 GET_VREG(r0, r2) @ r0<- vBB 4231 .if 1 4232 cmp r1, #0 @ is second operand zero? 4233 beq common_errDivideByZero 4234 .endif 4235 4236 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4237 @ optional op; may set condition codes 4238 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4239 GET_INST_OPCODE(ip) @ extract opcode from rINST 4240 SET_VREG(r0, r9) @ vAA<- r0 4241 GOTO_OPCODE(ip) @ jump to next instruction 4242 /* 11-14 instructions */ 4243 4244 4245 4246/* ------------------------------ */ 4247 .balign 64 4248.L_OP_REM_INT: /* 0x94 */ 4249/* File: armv5te/OP_REM_INT.S */ 4250/* idivmod returns quotient in r0 and remainder in r1 */ 4251/* File: armv5te/binop.S */ 4252 /* 4253 * Generic 32-bit binary operation. Provide an "instr" line that 4254 * specifies an instruction that performs "result = r0 op r1". 4255 * This could be an ARM instruction or a function call. (If the result 4256 * comes back in a register other than r0, you can override "result".) 4257 * 4258 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4259 * vCC (r1). Useful for integer division and modulus. Note that we 4260 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4261 * handles it correctly. 4262 * 4263 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4264 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4265 * mul-float, div-float, rem-float 4266 */ 4267 /* binop vAA, vBB, vCC */ 4268 FETCH(r0, 1) @ r0<- CCBB 4269 mov r9, rINST, lsr #8 @ r9<- AA 4270 mov r3, r0, lsr #8 @ r3<- CC 4271 and r2, r0, #255 @ r2<- BB 4272 GET_VREG(r1, r3) @ r1<- vCC 4273 GET_VREG(r0, r2) @ r0<- vBB 4274 .if 1 4275 cmp r1, #0 @ is second operand zero? 4276 beq common_errDivideByZero 4277 .endif 4278 4279 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4280 @ optional op; may set condition codes 4281 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4282 GET_INST_OPCODE(ip) @ extract opcode from rINST 4283 SET_VREG(r1, r9) @ vAA<- r1 4284 GOTO_OPCODE(ip) @ jump to next instruction 4285 /* 11-14 instructions */ 4286 4287 4288 4289/* ------------------------------ */ 4290 .balign 64 4291.L_OP_AND_INT: /* 0x95 */ 4292/* File: armv5te/OP_AND_INT.S */ 4293/* File: armv5te/binop.S */ 4294 /* 4295 * Generic 32-bit binary operation. Provide an "instr" line that 4296 * specifies an instruction that performs "result = r0 op r1". 4297 * This could be an ARM instruction or a function call. (If the result 4298 * comes back in a register other than r0, you can override "result".) 4299 * 4300 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4301 * vCC (r1). Useful for integer division and modulus. Note that we 4302 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4303 * handles it correctly. 4304 * 4305 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4306 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4307 * mul-float, div-float, rem-float 4308 */ 4309 /* binop vAA, vBB, vCC */ 4310 FETCH(r0, 1) @ r0<- CCBB 4311 mov r9, rINST, lsr #8 @ r9<- AA 4312 mov r3, r0, lsr #8 @ r3<- CC 4313 and r2, r0, #255 @ r2<- BB 4314 GET_VREG(r1, r3) @ r1<- vCC 4315 GET_VREG(r0, r2) @ r0<- vBB 4316 .if 0 4317 cmp r1, #0 @ is second operand zero? 4318 beq common_errDivideByZero 4319 .endif 4320 4321 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4322 @ optional op; may set condition codes 4323 and r0, r0, r1 @ r0<- op, r0-r3 changed 4324 GET_INST_OPCODE(ip) @ extract opcode from rINST 4325 SET_VREG(r0, r9) @ vAA<- r0 4326 GOTO_OPCODE(ip) @ jump to next instruction 4327 /* 11-14 instructions */ 4328 4329 4330 4331/* ------------------------------ */ 4332 .balign 64 4333.L_OP_OR_INT: /* 0x96 */ 4334/* File: armv5te/OP_OR_INT.S */ 4335/* File: armv5te/binop.S */ 4336 /* 4337 * Generic 32-bit binary operation. Provide an "instr" line that 4338 * specifies an instruction that performs "result = r0 op r1". 4339 * This could be an ARM instruction or a function call. (If the result 4340 * comes back in a register other than r0, you can override "result".) 4341 * 4342 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4343 * vCC (r1). Useful for integer division and modulus. Note that we 4344 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4345 * handles it correctly. 4346 * 4347 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4348 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4349 * mul-float, div-float, rem-float 4350 */ 4351 /* binop vAA, vBB, vCC */ 4352 FETCH(r0, 1) @ r0<- CCBB 4353 mov r9, rINST, lsr #8 @ r9<- AA 4354 mov r3, r0, lsr #8 @ r3<- CC 4355 and r2, r0, #255 @ r2<- BB 4356 GET_VREG(r1, r3) @ r1<- vCC 4357 GET_VREG(r0, r2) @ r0<- vBB 4358 .if 0 4359 cmp r1, #0 @ is second operand zero? 4360 beq common_errDivideByZero 4361 .endif 4362 4363 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4364 @ optional op; may set condition codes 4365 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4366 GET_INST_OPCODE(ip) @ extract opcode from rINST 4367 SET_VREG(r0, r9) @ vAA<- r0 4368 GOTO_OPCODE(ip) @ jump to next instruction 4369 /* 11-14 instructions */ 4370 4371 4372 4373/* ------------------------------ */ 4374 .balign 64 4375.L_OP_XOR_INT: /* 0x97 */ 4376/* File: armv5te/OP_XOR_INT.S */ 4377/* File: armv5te/binop.S */ 4378 /* 4379 * Generic 32-bit binary operation. Provide an "instr" line that 4380 * specifies an instruction that performs "result = r0 op r1". 4381 * This could be an ARM instruction or a function call. (If the result 4382 * comes back in a register other than r0, you can override "result".) 4383 * 4384 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4385 * vCC (r1). Useful for integer division and modulus. Note that we 4386 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4387 * handles it correctly. 4388 * 4389 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4390 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4391 * mul-float, div-float, rem-float 4392 */ 4393 /* binop vAA, vBB, vCC */ 4394 FETCH(r0, 1) @ r0<- CCBB 4395 mov r9, rINST, lsr #8 @ r9<- AA 4396 mov r3, r0, lsr #8 @ r3<- CC 4397 and r2, r0, #255 @ r2<- BB 4398 GET_VREG(r1, r3) @ r1<- vCC 4399 GET_VREG(r0, r2) @ r0<- vBB 4400 .if 0 4401 cmp r1, #0 @ is second operand zero? 4402 beq common_errDivideByZero 4403 .endif 4404 4405 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4406 @ optional op; may set condition codes 4407 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4408 GET_INST_OPCODE(ip) @ extract opcode from rINST 4409 SET_VREG(r0, r9) @ vAA<- r0 4410 GOTO_OPCODE(ip) @ jump to next instruction 4411 /* 11-14 instructions */ 4412 4413 4414 4415/* ------------------------------ */ 4416 .balign 64 4417.L_OP_SHL_INT: /* 0x98 */ 4418/* File: armv5te/OP_SHL_INT.S */ 4419/* File: armv5te/binop.S */ 4420 /* 4421 * Generic 32-bit binary operation. Provide an "instr" line that 4422 * specifies an instruction that performs "result = r0 op r1". 4423 * This could be an ARM instruction or a function call. (If the result 4424 * comes back in a register other than r0, you can override "result".) 4425 * 4426 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4427 * vCC (r1). Useful for integer division and modulus. Note that we 4428 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4429 * handles it correctly. 4430 * 4431 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4432 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4433 * mul-float, div-float, rem-float 4434 */ 4435 /* binop vAA, vBB, vCC */ 4436 FETCH(r0, 1) @ r0<- CCBB 4437 mov r9, rINST, lsr #8 @ r9<- AA 4438 mov r3, r0, lsr #8 @ r3<- CC 4439 and r2, r0, #255 @ r2<- BB 4440 GET_VREG(r1, r3) @ r1<- vCC 4441 GET_VREG(r0, r2) @ r0<- vBB 4442 .if 0 4443 cmp r1, #0 @ is second operand zero? 4444 beq common_errDivideByZero 4445 .endif 4446 4447 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4448 and r1, r1, #31 @ optional op; may set condition codes 4449 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4450 GET_INST_OPCODE(ip) @ extract opcode from rINST 4451 SET_VREG(r0, r9) @ vAA<- r0 4452 GOTO_OPCODE(ip) @ jump to next instruction 4453 /* 11-14 instructions */ 4454 4455 4456 4457/* ------------------------------ */ 4458 .balign 64 4459.L_OP_SHR_INT: /* 0x99 */ 4460/* File: armv5te/OP_SHR_INT.S */ 4461/* File: armv5te/binop.S */ 4462 /* 4463 * Generic 32-bit binary operation. Provide an "instr" line that 4464 * specifies an instruction that performs "result = r0 op r1". 4465 * This could be an ARM instruction or a function call. (If the result 4466 * comes back in a register other than r0, you can override "result".) 4467 * 4468 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4469 * vCC (r1). Useful for integer division and modulus. Note that we 4470 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4471 * handles it correctly. 4472 * 4473 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4474 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4475 * mul-float, div-float, rem-float 4476 */ 4477 /* binop vAA, vBB, vCC */ 4478 FETCH(r0, 1) @ r0<- CCBB 4479 mov r9, rINST, lsr #8 @ r9<- AA 4480 mov r3, r0, lsr #8 @ r3<- CC 4481 and r2, r0, #255 @ r2<- BB 4482 GET_VREG(r1, r3) @ r1<- vCC 4483 GET_VREG(r0, r2) @ r0<- vBB 4484 .if 0 4485 cmp r1, #0 @ is second operand zero? 4486 beq common_errDivideByZero 4487 .endif 4488 4489 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4490 and r1, r1, #31 @ optional op; may set condition codes 4491 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4492 GET_INST_OPCODE(ip) @ extract opcode from rINST 4493 SET_VREG(r0, r9) @ vAA<- r0 4494 GOTO_OPCODE(ip) @ jump to next instruction 4495 /* 11-14 instructions */ 4496 4497 4498 4499/* ------------------------------ */ 4500 .balign 64 4501.L_OP_USHR_INT: /* 0x9a */ 4502/* File: armv5te/OP_USHR_INT.S */ 4503/* File: armv5te/binop.S */ 4504 /* 4505 * Generic 32-bit binary operation. Provide an "instr" line that 4506 * specifies an instruction that performs "result = r0 op r1". 4507 * This could be an ARM instruction or a function call. (If the result 4508 * comes back in a register other than r0, you can override "result".) 4509 * 4510 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4511 * vCC (r1). Useful for integer division and modulus. Note that we 4512 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4513 * handles it correctly. 4514 * 4515 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4516 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4517 * mul-float, div-float, rem-float 4518 */ 4519 /* binop vAA, vBB, vCC */ 4520 FETCH(r0, 1) @ r0<- CCBB 4521 mov r9, rINST, lsr #8 @ r9<- AA 4522 mov r3, r0, lsr #8 @ r3<- CC 4523 and r2, r0, #255 @ r2<- BB 4524 GET_VREG(r1, r3) @ r1<- vCC 4525 GET_VREG(r0, r2) @ r0<- vBB 4526 .if 0 4527 cmp r1, #0 @ is second operand zero? 4528 beq common_errDivideByZero 4529 .endif 4530 4531 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4532 and r1, r1, #31 @ optional op; may set condition codes 4533 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4534 GET_INST_OPCODE(ip) @ extract opcode from rINST 4535 SET_VREG(r0, r9) @ vAA<- r0 4536 GOTO_OPCODE(ip) @ jump to next instruction 4537 /* 11-14 instructions */ 4538 4539 4540 4541/* ------------------------------ */ 4542 .balign 64 4543.L_OP_ADD_LONG: /* 0x9b */ 4544/* File: armv5te/OP_ADD_LONG.S */ 4545/* File: armv5te/binopWide.S */ 4546 /* 4547 * Generic 64-bit binary operation. Provide an "instr" line that 4548 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4549 * This could be an ARM instruction or a function call. (If the result 4550 * comes back in a register other than r0, you can override "result".) 4551 * 4552 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4553 * vCC (r1). Useful for integer division and modulus. 4554 * 4555 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4556 * xor-long, add-double, sub-double, mul-double, div-double, 4557 * rem-double 4558 * 4559 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4560 */ 4561 /* binop vAA, vBB, vCC */ 4562 FETCH(r0, 1) @ r0<- CCBB 4563 mov r9, rINST, lsr #8 @ r9<- AA 4564 and r2, r0, #255 @ r2<- BB 4565 mov r3, r0, lsr #8 @ r3<- CC 4566 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4567 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4568 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4569 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4570 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4571 .if 0 4572 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4573 beq common_errDivideByZero 4574 .endif 4575 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4576 4577 adds r0, r0, r2 @ optional op; may set condition codes 4578 adc r1, r1, r3 @ result<- op, r0-r3 changed 4579 GET_INST_OPCODE(ip) @ extract opcode from rINST 4580 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4581 GOTO_OPCODE(ip) @ jump to next instruction 4582 /* 14-17 instructions */ 4583 4584 4585 4586/* ------------------------------ */ 4587 .balign 64 4588.L_OP_SUB_LONG: /* 0x9c */ 4589/* File: armv5te/OP_SUB_LONG.S */ 4590/* File: armv5te/binopWide.S */ 4591 /* 4592 * Generic 64-bit binary operation. Provide an "instr" line that 4593 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4594 * This could be an ARM instruction or a function call. (If the result 4595 * comes back in a register other than r0, you can override "result".) 4596 * 4597 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4598 * vCC (r1). Useful for integer division and modulus. 4599 * 4600 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4601 * xor-long, add-double, sub-double, mul-double, div-double, 4602 * rem-double 4603 * 4604 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4605 */ 4606 /* binop vAA, vBB, vCC */ 4607 FETCH(r0, 1) @ r0<- CCBB 4608 mov r9, rINST, lsr #8 @ r9<- AA 4609 and r2, r0, #255 @ r2<- BB 4610 mov r3, r0, lsr #8 @ r3<- CC 4611 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4612 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4613 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4614 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4615 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4616 .if 0 4617 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4618 beq common_errDivideByZero 4619 .endif 4620 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4621 4622 subs r0, r0, r2 @ optional op; may set condition codes 4623 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4624 GET_INST_OPCODE(ip) @ extract opcode from rINST 4625 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4626 GOTO_OPCODE(ip) @ jump to next instruction 4627 /* 14-17 instructions */ 4628 4629 4630 4631/* ------------------------------ */ 4632 .balign 64 4633.L_OP_MUL_LONG: /* 0x9d */ 4634/* File: armv5te/OP_MUL_LONG.S */ 4635 /* 4636 * Signed 64-bit integer multiply. 4637 * 4638 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4639 * WX 4640 * x YZ 4641 * -------- 4642 * ZW ZX 4643 * YW YX 4644 * 4645 * The low word of the result holds ZX, the high word holds 4646 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4647 * it doesn't fit in the low 64 bits. 4648 * 4649 * Unlike most ARM math operations, multiply instructions have 4650 * restrictions on using the same register more than once (Rd and Rm 4651 * cannot be the same). 4652 */ 4653 /* mul-long vAA, vBB, vCC */ 4654 FETCH(r0, 1) @ r0<- CCBB 4655 and r2, r0, #255 @ r2<- BB 4656 mov r3, r0, lsr #8 @ r3<- CC 4657 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4658 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4659 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4660 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4661 mul ip, r2, r1 @ ip<- ZxW 4662 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4663 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4664 mov r0, rINST, lsr #8 @ r0<- AA 4665 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4666 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4667 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4668 b .LOP_MUL_LONG_finish 4669 4670/* ------------------------------ */ 4671 .balign 64 4672.L_OP_DIV_LONG: /* 0x9e */ 4673/* File: armv5te/OP_DIV_LONG.S */ 4674/* File: armv5te/binopWide.S */ 4675 /* 4676 * Generic 64-bit binary operation. Provide an "instr" line that 4677 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4678 * This could be an ARM instruction or a function call. (If the result 4679 * comes back in a register other than r0, you can override "result".) 4680 * 4681 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4682 * vCC (r1). Useful for integer division and modulus. 4683 * 4684 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4685 * xor-long, add-double, sub-double, mul-double, div-double, 4686 * rem-double 4687 * 4688 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4689 */ 4690 /* binop vAA, vBB, vCC */ 4691 FETCH(r0, 1) @ r0<- CCBB 4692 mov r9, rINST, lsr #8 @ r9<- AA 4693 and r2, r0, #255 @ r2<- BB 4694 mov r3, r0, lsr #8 @ r3<- CC 4695 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4696 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4697 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4698 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4699 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4700 .if 1 4701 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4702 beq common_errDivideByZero 4703 .endif 4704 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4705 4706 @ optional op; may set condition codes 4707 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4708 GET_INST_OPCODE(ip) @ extract opcode from rINST 4709 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4710 GOTO_OPCODE(ip) @ jump to next instruction 4711 /* 14-17 instructions */ 4712 4713 4714 4715/* ------------------------------ */ 4716 .balign 64 4717.L_OP_REM_LONG: /* 0x9f */ 4718/* File: armv5te/OP_REM_LONG.S */ 4719/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4720/* File: armv5te/binopWide.S */ 4721 /* 4722 * Generic 64-bit binary operation. Provide an "instr" line that 4723 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4724 * This could be an ARM instruction or a function call. (If the result 4725 * comes back in a register other than r0, you can override "result".) 4726 * 4727 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4728 * vCC (r1). Useful for integer division and modulus. 4729 * 4730 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4731 * xor-long, add-double, sub-double, mul-double, div-double, 4732 * rem-double 4733 * 4734 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4735 */ 4736 /* binop vAA, vBB, vCC */ 4737 FETCH(r0, 1) @ r0<- CCBB 4738 mov r9, rINST, lsr #8 @ r9<- AA 4739 and r2, r0, #255 @ r2<- BB 4740 mov r3, r0, lsr #8 @ r3<- CC 4741 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4742 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4743 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4744 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4745 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4746 .if 1 4747 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4748 beq common_errDivideByZero 4749 .endif 4750 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4751 4752 @ optional op; may set condition codes 4753 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4754 GET_INST_OPCODE(ip) @ extract opcode from rINST 4755 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4756 GOTO_OPCODE(ip) @ jump to next instruction 4757 /* 14-17 instructions */ 4758 4759 4760 4761/* ------------------------------ */ 4762 .balign 64 4763.L_OP_AND_LONG: /* 0xa0 */ 4764/* File: armv5te/OP_AND_LONG.S */ 4765/* File: armv5te/binopWide.S */ 4766 /* 4767 * Generic 64-bit binary operation. Provide an "instr" line that 4768 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4769 * This could be an ARM instruction or a function call. (If the result 4770 * comes back in a register other than r0, you can override "result".) 4771 * 4772 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4773 * vCC (r1). Useful for integer division and modulus. 4774 * 4775 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4776 * xor-long, add-double, sub-double, mul-double, div-double, 4777 * rem-double 4778 * 4779 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4780 */ 4781 /* binop vAA, vBB, vCC */ 4782 FETCH(r0, 1) @ r0<- CCBB 4783 mov r9, rINST, lsr #8 @ r9<- AA 4784 and r2, r0, #255 @ r2<- BB 4785 mov r3, r0, lsr #8 @ r3<- CC 4786 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4787 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4788 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4789 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4790 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4791 .if 0 4792 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4793 beq common_errDivideByZero 4794 .endif 4795 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4796 4797 and r0, r0, r2 @ optional op; may set condition codes 4798 and r1, r1, r3 @ result<- op, r0-r3 changed 4799 GET_INST_OPCODE(ip) @ extract opcode from rINST 4800 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4801 GOTO_OPCODE(ip) @ jump to next instruction 4802 /* 14-17 instructions */ 4803 4804 4805 4806/* ------------------------------ */ 4807 .balign 64 4808.L_OP_OR_LONG: /* 0xa1 */ 4809/* File: armv5te/OP_OR_LONG.S */ 4810/* File: armv5te/binopWide.S */ 4811 /* 4812 * Generic 64-bit binary operation. Provide an "instr" line that 4813 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4814 * This could be an ARM instruction or a function call. (If the result 4815 * comes back in a register other than r0, you can override "result".) 4816 * 4817 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4818 * vCC (r1). Useful for integer division and modulus. 4819 * 4820 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4821 * xor-long, add-double, sub-double, mul-double, div-double, 4822 * rem-double 4823 * 4824 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4825 */ 4826 /* binop vAA, vBB, vCC */ 4827 FETCH(r0, 1) @ r0<- CCBB 4828 mov r9, rINST, lsr #8 @ r9<- AA 4829 and r2, r0, #255 @ r2<- BB 4830 mov r3, r0, lsr #8 @ r3<- CC 4831 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4832 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4833 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4834 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4835 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4836 .if 0 4837 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4838 beq common_errDivideByZero 4839 .endif 4840 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4841 4842 orr r0, r0, r2 @ optional op; may set condition codes 4843 orr r1, r1, r3 @ result<- op, r0-r3 changed 4844 GET_INST_OPCODE(ip) @ extract opcode from rINST 4845 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4846 GOTO_OPCODE(ip) @ jump to next instruction 4847 /* 14-17 instructions */ 4848 4849 4850 4851/* ------------------------------ */ 4852 .balign 64 4853.L_OP_XOR_LONG: /* 0xa2 */ 4854/* File: armv5te/OP_XOR_LONG.S */ 4855/* File: armv5te/binopWide.S */ 4856 /* 4857 * Generic 64-bit binary operation. Provide an "instr" line that 4858 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4859 * This could be an ARM instruction or a function call. (If the result 4860 * comes back in a register other than r0, you can override "result".) 4861 * 4862 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4863 * vCC (r1). Useful for integer division and modulus. 4864 * 4865 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4866 * xor-long, add-double, sub-double, mul-double, div-double, 4867 * rem-double 4868 * 4869 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4870 */ 4871 /* binop vAA, vBB, vCC */ 4872 FETCH(r0, 1) @ r0<- CCBB 4873 mov r9, rINST, lsr #8 @ r9<- AA 4874 and r2, r0, #255 @ r2<- BB 4875 mov r3, r0, lsr #8 @ r3<- CC 4876 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4877 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4878 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4879 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4880 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4881 .if 0 4882 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4883 beq common_errDivideByZero 4884 .endif 4885 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4886 4887 eor r0, r0, r2 @ optional op; may set condition codes 4888 eor r1, r1, r3 @ result<- op, r0-r3 changed 4889 GET_INST_OPCODE(ip) @ extract opcode from rINST 4890 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4891 GOTO_OPCODE(ip) @ jump to next instruction 4892 /* 14-17 instructions */ 4893 4894 4895 4896/* ------------------------------ */ 4897 .balign 64 4898.L_OP_SHL_LONG: /* 0xa3 */ 4899/* File: armv5te/OP_SHL_LONG.S */ 4900 /* 4901 * Long integer shift. This is different from the generic 32/64-bit 4902 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4903 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4904 * 6 bits of the shift distance. 4905 */ 4906 /* shl-long vAA, vBB, vCC */ 4907 FETCH(r0, 1) @ r0<- CCBB 4908 mov r9, rINST, lsr #8 @ r9<- AA 4909 and r3, r0, #255 @ r3<- BB 4910 mov r0, r0, lsr #8 @ r0<- CC 4911 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4912 GET_VREG(r2, r0) @ r2<- vCC 4913 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4914 and r2, r2, #63 @ r2<- r2 & 0x3f 4915 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4916 4917 mov r1, r1, asl r2 @ r1<- r1 << r2 4918 rsb r3, r2, #32 @ r3<- 32 - r2 4919 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4920 subs ip, r2, #32 @ ip<- r2 - 32 4921 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4922 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4923 b .LOP_SHL_LONG_finish 4924 4925/* ------------------------------ */ 4926 .balign 64 4927.L_OP_SHR_LONG: /* 0xa4 */ 4928/* File: armv5te/OP_SHR_LONG.S */ 4929 /* 4930 * Long integer shift. This is different from the generic 32/64-bit 4931 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4932 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4933 * 6 bits of the shift distance. 4934 */ 4935 /* shr-long vAA, vBB, vCC */ 4936 FETCH(r0, 1) @ r0<- CCBB 4937 mov r9, rINST, lsr #8 @ r9<- AA 4938 and r3, r0, #255 @ r3<- BB 4939 mov r0, r0, lsr #8 @ r0<- CC 4940 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4941 GET_VREG(r2, r0) @ r2<- vCC 4942 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4943 and r2, r2, #63 @ r0<- r0 & 0x3f 4944 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4945 4946 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4947 rsb r3, r2, #32 @ r3<- 32 - r2 4948 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4949 subs ip, r2, #32 @ ip<- r2 - 32 4950 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4951 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4952 b .LOP_SHR_LONG_finish 4953 4954/* ------------------------------ */ 4955 .balign 64 4956.L_OP_USHR_LONG: /* 0xa5 */ 4957/* File: armv5te/OP_USHR_LONG.S */ 4958 /* 4959 * Long integer shift. This is different from the generic 32/64-bit 4960 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4961 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4962 * 6 bits of the shift distance. 4963 */ 4964 /* ushr-long vAA, vBB, vCC */ 4965 FETCH(r0, 1) @ r0<- CCBB 4966 mov r9, rINST, lsr #8 @ r9<- AA 4967 and r3, r0, #255 @ r3<- BB 4968 mov r0, r0, lsr #8 @ r0<- CC 4969 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4970 GET_VREG(r2, r0) @ r2<- vCC 4971 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4972 and r2, r2, #63 @ r0<- r0 & 0x3f 4973 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4974 4975 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4976 rsb r3, r2, #32 @ r3<- 32 - r2 4977 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4978 subs ip, r2, #32 @ ip<- r2 - 32 4979 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4980 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4981 b .LOP_USHR_LONG_finish 4982 4983/* ------------------------------ */ 4984 .balign 64 4985.L_OP_ADD_FLOAT: /* 0xa6 */ 4986/* File: vfp/OP_ADD_FLOAT.S */ 4987/* File: vfp/fbinop.S */ 4988 /* 4989 * Generic 32-bit floating-point operation. Provide an "instr" line that 4990 * specifies an instruction that performs "s2 = s0 op s1". Because we 4991 * use the "softfp" ABI, this must be an instruction, not a function call. 4992 * 4993 * For: add-float, sub-float, mul-float, div-float 4994 */ 4995 /* floatop vAA, vBB, vCC */ 4996 FETCH(r0, 1) @ r0<- CCBB 4997 mov r9, rINST, lsr #8 @ r9<- AA 4998 mov r3, r0, lsr #8 @ r3<- CC 4999 and r2, r0, #255 @ r2<- BB 5000 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5001 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5002 flds s1, [r3] @ s1<- vCC 5003 flds s0, [r2] @ s0<- vBB 5004 5005 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5006 fadds s2, s0, s1 @ s2<- op 5007 GET_INST_OPCODE(ip) @ extract opcode from rINST 5008 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5009 fsts s2, [r9] @ vAA<- s2 5010 GOTO_OPCODE(ip) @ jump to next instruction 5011 5012 5013/* ------------------------------ */ 5014 .balign 64 5015.L_OP_SUB_FLOAT: /* 0xa7 */ 5016/* File: vfp/OP_SUB_FLOAT.S */ 5017/* File: vfp/fbinop.S */ 5018 /* 5019 * Generic 32-bit floating-point operation. Provide an "instr" line that 5020 * specifies an instruction that performs "s2 = s0 op s1". Because we 5021 * use the "softfp" ABI, this must be an instruction, not a function call. 5022 * 5023 * For: add-float, sub-float, mul-float, div-float 5024 */ 5025 /* floatop vAA, vBB, vCC */ 5026 FETCH(r0, 1) @ r0<- CCBB 5027 mov r9, rINST, lsr #8 @ r9<- AA 5028 mov r3, r0, lsr #8 @ r3<- CC 5029 and r2, r0, #255 @ r2<- BB 5030 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5031 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5032 flds s1, [r3] @ s1<- vCC 5033 flds s0, [r2] @ s0<- vBB 5034 5035 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5036 fsubs s2, s0, s1 @ s2<- op 5037 GET_INST_OPCODE(ip) @ extract opcode from rINST 5038 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5039 fsts s2, [r9] @ vAA<- s2 5040 GOTO_OPCODE(ip) @ jump to next instruction 5041 5042 5043/* ------------------------------ */ 5044 .balign 64 5045.L_OP_MUL_FLOAT: /* 0xa8 */ 5046/* File: vfp/OP_MUL_FLOAT.S */ 5047/* File: vfp/fbinop.S */ 5048 /* 5049 * Generic 32-bit floating-point operation. Provide an "instr" line that 5050 * specifies an instruction that performs "s2 = s0 op s1". Because we 5051 * use the "softfp" ABI, this must be an instruction, not a function call. 5052 * 5053 * For: add-float, sub-float, mul-float, div-float 5054 */ 5055 /* floatop vAA, vBB, vCC */ 5056 FETCH(r0, 1) @ r0<- CCBB 5057 mov r9, rINST, lsr #8 @ r9<- AA 5058 mov r3, r0, lsr #8 @ r3<- CC 5059 and r2, r0, #255 @ r2<- BB 5060 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5061 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5062 flds s1, [r3] @ s1<- vCC 5063 flds s0, [r2] @ s0<- vBB 5064 5065 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5066 fmuls s2, s0, s1 @ s2<- op 5067 GET_INST_OPCODE(ip) @ extract opcode from rINST 5068 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5069 fsts s2, [r9] @ vAA<- s2 5070 GOTO_OPCODE(ip) @ jump to next instruction 5071 5072 5073/* ------------------------------ */ 5074 .balign 64 5075.L_OP_DIV_FLOAT: /* 0xa9 */ 5076/* File: vfp/OP_DIV_FLOAT.S */ 5077/* File: vfp/fbinop.S */ 5078 /* 5079 * Generic 32-bit floating-point operation. Provide an "instr" line that 5080 * specifies an instruction that performs "s2 = s0 op s1". Because we 5081 * use the "softfp" ABI, this must be an instruction, not a function call. 5082 * 5083 * For: add-float, sub-float, mul-float, div-float 5084 */ 5085 /* floatop vAA, vBB, vCC */ 5086 FETCH(r0, 1) @ r0<- CCBB 5087 mov r9, rINST, lsr #8 @ r9<- AA 5088 mov r3, r0, lsr #8 @ r3<- CC 5089 and r2, r0, #255 @ r2<- BB 5090 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5091 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5092 flds s1, [r3] @ s1<- vCC 5093 flds s0, [r2] @ s0<- vBB 5094 5095 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5096 fdivs s2, s0, s1 @ s2<- op 5097 GET_INST_OPCODE(ip) @ extract opcode from rINST 5098 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5099 fsts s2, [r9] @ vAA<- s2 5100 GOTO_OPCODE(ip) @ jump to next instruction 5101 5102 5103/* ------------------------------ */ 5104 .balign 64 5105.L_OP_REM_FLOAT: /* 0xaa */ 5106/* File: armv5te/OP_REM_FLOAT.S */ 5107/* EABI doesn't define a float remainder function, but libm does */ 5108/* File: armv5te/binop.S */ 5109 /* 5110 * Generic 32-bit binary operation. Provide an "instr" line that 5111 * specifies an instruction that performs "result = r0 op r1". 5112 * This could be an ARM instruction or a function call. (If the result 5113 * comes back in a register other than r0, you can override "result".) 5114 * 5115 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5116 * vCC (r1). Useful for integer division and modulus. Note that we 5117 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5118 * handles it correctly. 5119 * 5120 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5121 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5122 * mul-float, div-float, rem-float 5123 */ 5124 /* binop vAA, vBB, vCC */ 5125 FETCH(r0, 1) @ r0<- CCBB 5126 mov r9, rINST, lsr #8 @ r9<- AA 5127 mov r3, r0, lsr #8 @ r3<- CC 5128 and r2, r0, #255 @ r2<- BB 5129 GET_VREG(r1, r3) @ r1<- vCC 5130 GET_VREG(r0, r2) @ r0<- vBB 5131 .if 0 5132 cmp r1, #0 @ is second operand zero? 5133 beq common_errDivideByZero 5134 .endif 5135 5136 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5137 @ optional op; may set condition codes 5138 bl fmodf @ r0<- op, r0-r3 changed 5139 GET_INST_OPCODE(ip) @ extract opcode from rINST 5140 SET_VREG(r0, r9) @ vAA<- r0 5141 GOTO_OPCODE(ip) @ jump to next instruction 5142 /* 11-14 instructions */ 5143 5144 5145 5146/* ------------------------------ */ 5147 .balign 64 5148.L_OP_ADD_DOUBLE: /* 0xab */ 5149/* File: vfp/OP_ADD_DOUBLE.S */ 5150/* File: vfp/fbinopWide.S */ 5151 /* 5152 * Generic 64-bit double-precision floating point binary operation. 5153 * Provide an "instr" line that specifies an instruction that performs 5154 * "d2 = d0 op d1". 5155 * 5156 * for: add-double, sub-double, mul-double, div-double 5157 */ 5158 /* doubleop vAA, vBB, vCC */ 5159 FETCH(r0, 1) @ r0<- CCBB 5160 mov r9, rINST, lsr #8 @ r9<- AA 5161 mov r3, r0, lsr #8 @ r3<- CC 5162 and r2, r0, #255 @ r2<- BB 5163 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5164 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5165 fldd d1, [r3] @ d1<- vCC 5166 fldd d0, [r2] @ d0<- vBB 5167 5168 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5169 faddd d2, d0, d1 @ s2<- op 5170 GET_INST_OPCODE(ip) @ extract opcode from rINST 5171 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5172 fstd d2, [r9] @ vAA<- d2 5173 GOTO_OPCODE(ip) @ jump to next instruction 5174 5175 5176/* ------------------------------ */ 5177 .balign 64 5178.L_OP_SUB_DOUBLE: /* 0xac */ 5179/* File: vfp/OP_SUB_DOUBLE.S */ 5180/* File: vfp/fbinopWide.S */ 5181 /* 5182 * Generic 64-bit double-precision floating point binary operation. 5183 * Provide an "instr" line that specifies an instruction that performs 5184 * "d2 = d0 op d1". 5185 * 5186 * for: add-double, sub-double, mul-double, div-double 5187 */ 5188 /* doubleop vAA, vBB, vCC */ 5189 FETCH(r0, 1) @ r0<- CCBB 5190 mov r9, rINST, lsr #8 @ r9<- AA 5191 mov r3, r0, lsr #8 @ r3<- CC 5192 and r2, r0, #255 @ r2<- BB 5193 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5194 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5195 fldd d1, [r3] @ d1<- vCC 5196 fldd d0, [r2] @ d0<- vBB 5197 5198 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5199 fsubd d2, d0, d1 @ s2<- op 5200 GET_INST_OPCODE(ip) @ extract opcode from rINST 5201 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5202 fstd d2, [r9] @ vAA<- d2 5203 GOTO_OPCODE(ip) @ jump to next instruction 5204 5205 5206/* ------------------------------ */ 5207 .balign 64 5208.L_OP_MUL_DOUBLE: /* 0xad */ 5209/* File: vfp/OP_MUL_DOUBLE.S */ 5210/* File: vfp/fbinopWide.S */ 5211 /* 5212 * Generic 64-bit double-precision floating point binary operation. 5213 * Provide an "instr" line that specifies an instruction that performs 5214 * "d2 = d0 op d1". 5215 * 5216 * for: add-double, sub-double, mul-double, div-double 5217 */ 5218 /* doubleop vAA, vBB, vCC */ 5219 FETCH(r0, 1) @ r0<- CCBB 5220 mov r9, rINST, lsr #8 @ r9<- AA 5221 mov r3, r0, lsr #8 @ r3<- CC 5222 and r2, r0, #255 @ r2<- BB 5223 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5224 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5225 fldd d1, [r3] @ d1<- vCC 5226 fldd d0, [r2] @ d0<- vBB 5227 5228 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5229 fmuld d2, d0, d1 @ s2<- op 5230 GET_INST_OPCODE(ip) @ extract opcode from rINST 5231 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5232 fstd d2, [r9] @ vAA<- d2 5233 GOTO_OPCODE(ip) @ jump to next instruction 5234 5235 5236/* ------------------------------ */ 5237 .balign 64 5238.L_OP_DIV_DOUBLE: /* 0xae */ 5239/* File: vfp/OP_DIV_DOUBLE.S */ 5240/* File: vfp/fbinopWide.S */ 5241 /* 5242 * Generic 64-bit double-precision floating point binary operation. 5243 * Provide an "instr" line that specifies an instruction that performs 5244 * "d2 = d0 op d1". 5245 * 5246 * for: add-double, sub-double, mul-double, div-double 5247 */ 5248 /* doubleop vAA, vBB, vCC */ 5249 FETCH(r0, 1) @ r0<- CCBB 5250 mov r9, rINST, lsr #8 @ r9<- AA 5251 mov r3, r0, lsr #8 @ r3<- CC 5252 and r2, r0, #255 @ r2<- BB 5253 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5254 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5255 fldd d1, [r3] @ d1<- vCC 5256 fldd d0, [r2] @ d0<- vBB 5257 5258 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5259 fdivd d2, d0, d1 @ s2<- op 5260 GET_INST_OPCODE(ip) @ extract opcode from rINST 5261 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5262 fstd d2, [r9] @ vAA<- d2 5263 GOTO_OPCODE(ip) @ jump to next instruction 5264 5265 5266/* ------------------------------ */ 5267 .balign 64 5268.L_OP_REM_DOUBLE: /* 0xaf */ 5269/* File: armv5te/OP_REM_DOUBLE.S */ 5270/* EABI doesn't define a double remainder function, but libm does */ 5271/* File: armv5te/binopWide.S */ 5272 /* 5273 * Generic 64-bit binary operation. Provide an "instr" line that 5274 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5275 * This could be an ARM instruction or a function call. (If the result 5276 * comes back in a register other than r0, you can override "result".) 5277 * 5278 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5279 * vCC (r1). Useful for integer division and modulus. 5280 * 5281 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5282 * xor-long, add-double, sub-double, mul-double, div-double, 5283 * rem-double 5284 * 5285 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5286 */ 5287 /* binop vAA, vBB, vCC */ 5288 FETCH(r0, 1) @ r0<- CCBB 5289 mov r9, rINST, lsr #8 @ r9<- AA 5290 and r2, r0, #255 @ r2<- BB 5291 mov r3, r0, lsr #8 @ r3<- CC 5292 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5293 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5294 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5295 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5296 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5297 .if 0 5298 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5299 beq common_errDivideByZero 5300 .endif 5301 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5302 5303 @ optional op; may set condition codes 5304 bl fmod @ result<- op, r0-r3 changed 5305 GET_INST_OPCODE(ip) @ extract opcode from rINST 5306 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5307 GOTO_OPCODE(ip) @ jump to next instruction 5308 /* 14-17 instructions */ 5309 5310 5311 5312/* ------------------------------ */ 5313 .balign 64 5314.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5315/* File: armv5te/OP_ADD_INT_2ADDR.S */ 5316/* File: armv5te/binop2addr.S */ 5317 /* 5318 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5319 * that specifies an instruction that performs "result = r0 op r1". 5320 * This could be an ARM instruction or a function call. (If the result 5321 * comes back in a register other than r0, you can override "result".) 5322 * 5323 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5324 * vCC (r1). Useful for integer division and modulus. 5325 * 5326 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5327 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5328 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5329 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5330 */ 5331 /* binop/2addr vA, vB */ 5332 mov r9, rINST, lsr #8 @ r9<- A+ 5333 mov r3, rINST, lsr #12 @ r3<- B 5334 and r9, r9, #15 5335 GET_VREG(r0, r9) @ r0<- vA 5336 GET_VREG(r1, r3) @ r1<- vB 5337 .if 0 5338 cmp r1, #0 @ is second operand zero? 5339 beq common_errDivideByZero 5340 .endif 5341 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5342 5343 @ optional op; may set condition codes 5344 add r0, r0, r1 @ r0<- op, r0-r3 changed 5345 GET_INST_OPCODE(ip) @ extract opcode from rINST 5346 SET_VREG(r0, r9) @ vAA<- r0 5347 GOTO_OPCODE(ip) @ jump to next instruction 5348 /* 10-13 instructions */ 5349 5350 5351 5352/* ------------------------------ */ 5353 .balign 64 5354.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5355/* File: armv5te/OP_SUB_INT_2ADDR.S */ 5356/* File: armv5te/binop2addr.S */ 5357 /* 5358 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5359 * that specifies an instruction that performs "result = r0 op r1". 5360 * This could be an ARM instruction or a function call. (If the result 5361 * comes back in a register other than r0, you can override "result".) 5362 * 5363 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5364 * vCC (r1). Useful for integer division and modulus. 5365 * 5366 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5367 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5368 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5369 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5370 */ 5371 /* binop/2addr vA, vB */ 5372 mov r9, rINST, lsr #8 @ r9<- A+ 5373 mov r3, rINST, lsr #12 @ r3<- B 5374 and r9, r9, #15 5375 GET_VREG(r0, r9) @ r0<- vA 5376 GET_VREG(r1, r3) @ r1<- vB 5377 .if 0 5378 cmp r1, #0 @ is second operand zero? 5379 beq common_errDivideByZero 5380 .endif 5381 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5382 5383 @ optional op; may set condition codes 5384 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5385 GET_INST_OPCODE(ip) @ extract opcode from rINST 5386 SET_VREG(r0, r9) @ vAA<- r0 5387 GOTO_OPCODE(ip) @ jump to next instruction 5388 /* 10-13 instructions */ 5389 5390 5391 5392/* ------------------------------ */ 5393 .balign 64 5394.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5395/* File: armv5te/OP_MUL_INT_2ADDR.S */ 5396/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5397/* File: armv5te/binop2addr.S */ 5398 /* 5399 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5400 * that specifies an instruction that performs "result = r0 op r1". 5401 * This could be an ARM instruction or a function call. (If the result 5402 * comes back in a register other than r0, you can override "result".) 5403 * 5404 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5405 * vCC (r1). Useful for integer division and modulus. 5406 * 5407 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5408 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5409 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5410 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5411 */ 5412 /* binop/2addr vA, vB */ 5413 mov r9, rINST, lsr #8 @ r9<- A+ 5414 mov r3, rINST, lsr #12 @ r3<- B 5415 and r9, r9, #15 5416 GET_VREG(r0, r9) @ r0<- vA 5417 GET_VREG(r1, r3) @ r1<- vB 5418 .if 0 5419 cmp r1, #0 @ is second operand zero? 5420 beq common_errDivideByZero 5421 .endif 5422 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5423 5424 @ optional op; may set condition codes 5425 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5426 GET_INST_OPCODE(ip) @ extract opcode from rINST 5427 SET_VREG(r0, r9) @ vAA<- r0 5428 GOTO_OPCODE(ip) @ jump to next instruction 5429 /* 10-13 instructions */ 5430 5431 5432 5433/* ------------------------------ */ 5434 .balign 64 5435.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5436/* File: armv5te/OP_DIV_INT_2ADDR.S */ 5437/* File: armv5te/binop2addr.S */ 5438 /* 5439 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5440 * that specifies an instruction that performs "result = r0 op r1". 5441 * This could be an ARM instruction or a function call. (If the result 5442 * comes back in a register other than r0, you can override "result".) 5443 * 5444 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5445 * vCC (r1). Useful for integer division and modulus. 5446 * 5447 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5448 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5449 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5450 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5451 */ 5452 /* binop/2addr vA, vB */ 5453 mov r9, rINST, lsr #8 @ r9<- A+ 5454 mov r3, rINST, lsr #12 @ r3<- B 5455 and r9, r9, #15 5456 GET_VREG(r0, r9) @ r0<- vA 5457 GET_VREG(r1, r3) @ r1<- vB 5458 .if 1 5459 cmp r1, #0 @ is second operand zero? 5460 beq common_errDivideByZero 5461 .endif 5462 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5463 5464 @ optional op; may set condition codes 5465 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5466 GET_INST_OPCODE(ip) @ extract opcode from rINST 5467 SET_VREG(r0, r9) @ vAA<- r0 5468 GOTO_OPCODE(ip) @ jump to next instruction 5469 /* 10-13 instructions */ 5470 5471 5472 5473/* ------------------------------ */ 5474 .balign 64 5475.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5476/* File: armv5te/OP_REM_INT_2ADDR.S */ 5477/* idivmod returns quotient in r0 and remainder in r1 */ 5478/* File: armv5te/binop2addr.S */ 5479 /* 5480 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5481 * that specifies an instruction that performs "result = r0 op r1". 5482 * This could be an ARM instruction or a function call. (If the result 5483 * comes back in a register other than r0, you can override "result".) 5484 * 5485 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5486 * vCC (r1). Useful for integer division and modulus. 5487 * 5488 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5489 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5490 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5491 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5492 */ 5493 /* binop/2addr vA, vB */ 5494 mov r9, rINST, lsr #8 @ r9<- A+ 5495 mov r3, rINST, lsr #12 @ r3<- B 5496 and r9, r9, #15 5497 GET_VREG(r0, r9) @ r0<- vA 5498 GET_VREG(r1, r3) @ r1<- vB 5499 .if 1 5500 cmp r1, #0 @ is second operand zero? 5501 beq common_errDivideByZero 5502 .endif 5503 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5504 5505 @ optional op; may set condition codes 5506 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5507 GET_INST_OPCODE(ip) @ extract opcode from rINST 5508 SET_VREG(r1, r9) @ vAA<- r1 5509 GOTO_OPCODE(ip) @ jump to next instruction 5510 /* 10-13 instructions */ 5511 5512 5513 5514/* ------------------------------ */ 5515 .balign 64 5516.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5517/* File: armv5te/OP_AND_INT_2ADDR.S */ 5518/* File: armv5te/binop2addr.S */ 5519 /* 5520 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5521 * that specifies an instruction that performs "result = r0 op r1". 5522 * This could be an ARM instruction or a function call. (If the result 5523 * comes back in a register other than r0, you can override "result".) 5524 * 5525 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5526 * vCC (r1). Useful for integer division and modulus. 5527 * 5528 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5529 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5530 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5531 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5532 */ 5533 /* binop/2addr vA, vB */ 5534 mov r9, rINST, lsr #8 @ r9<- A+ 5535 mov r3, rINST, lsr #12 @ r3<- B 5536 and r9, r9, #15 5537 GET_VREG(r0, r9) @ r0<- vA 5538 GET_VREG(r1, r3) @ r1<- vB 5539 .if 0 5540 cmp r1, #0 @ is second operand zero? 5541 beq common_errDivideByZero 5542 .endif 5543 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5544 5545 @ optional op; may set condition codes 5546 and r0, r0, r1 @ r0<- op, r0-r3 changed 5547 GET_INST_OPCODE(ip) @ extract opcode from rINST 5548 SET_VREG(r0, r9) @ vAA<- r0 5549 GOTO_OPCODE(ip) @ jump to next instruction 5550 /* 10-13 instructions */ 5551 5552 5553 5554/* ------------------------------ */ 5555 .balign 64 5556.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5557/* File: armv5te/OP_OR_INT_2ADDR.S */ 5558/* File: armv5te/binop2addr.S */ 5559 /* 5560 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5561 * that specifies an instruction that performs "result = r0 op r1". 5562 * This could be an ARM instruction or a function call. (If the result 5563 * comes back in a register other than r0, you can override "result".) 5564 * 5565 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5566 * vCC (r1). Useful for integer division and modulus. 5567 * 5568 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5569 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5570 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5571 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5572 */ 5573 /* binop/2addr vA, vB */ 5574 mov r9, rINST, lsr #8 @ r9<- A+ 5575 mov r3, rINST, lsr #12 @ r3<- B 5576 and r9, r9, #15 5577 GET_VREG(r0, r9) @ r0<- vA 5578 GET_VREG(r1, r3) @ r1<- vB 5579 .if 0 5580 cmp r1, #0 @ is second operand zero? 5581 beq common_errDivideByZero 5582 .endif 5583 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5584 5585 @ optional op; may set condition codes 5586 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5587 GET_INST_OPCODE(ip) @ extract opcode from rINST 5588 SET_VREG(r0, r9) @ vAA<- r0 5589 GOTO_OPCODE(ip) @ jump to next instruction 5590 /* 10-13 instructions */ 5591 5592 5593 5594/* ------------------------------ */ 5595 .balign 64 5596.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5597/* File: armv5te/OP_XOR_INT_2ADDR.S */ 5598/* File: armv5te/binop2addr.S */ 5599 /* 5600 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5601 * that specifies an instruction that performs "result = r0 op r1". 5602 * This could be an ARM instruction or a function call. (If the result 5603 * comes back in a register other than r0, you can override "result".) 5604 * 5605 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5606 * vCC (r1). Useful for integer division and modulus. 5607 * 5608 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5609 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5610 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5611 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5612 */ 5613 /* binop/2addr vA, vB */ 5614 mov r9, rINST, lsr #8 @ r9<- A+ 5615 mov r3, rINST, lsr #12 @ r3<- B 5616 and r9, r9, #15 5617 GET_VREG(r0, r9) @ r0<- vA 5618 GET_VREG(r1, r3) @ r1<- vB 5619 .if 0 5620 cmp r1, #0 @ is second operand zero? 5621 beq common_errDivideByZero 5622 .endif 5623 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5624 5625 @ optional op; may set condition codes 5626 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5627 GET_INST_OPCODE(ip) @ extract opcode from rINST 5628 SET_VREG(r0, r9) @ vAA<- r0 5629 GOTO_OPCODE(ip) @ jump to next instruction 5630 /* 10-13 instructions */ 5631 5632 5633 5634/* ------------------------------ */ 5635 .balign 64 5636.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5637/* File: armv5te/OP_SHL_INT_2ADDR.S */ 5638/* File: armv5te/binop2addr.S */ 5639 /* 5640 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5641 * that specifies an instruction that performs "result = r0 op r1". 5642 * This could be an ARM instruction or a function call. (If the result 5643 * comes back in a register other than r0, you can override "result".) 5644 * 5645 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5646 * vCC (r1). Useful for integer division and modulus. 5647 * 5648 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5649 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5650 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5651 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5652 */ 5653 /* binop/2addr vA, vB */ 5654 mov r9, rINST, lsr #8 @ r9<- A+ 5655 mov r3, rINST, lsr #12 @ r3<- B 5656 and r9, r9, #15 5657 GET_VREG(r0, r9) @ r0<- vA 5658 GET_VREG(r1, r3) @ r1<- vB 5659 .if 0 5660 cmp r1, #0 @ is second operand zero? 5661 beq common_errDivideByZero 5662 .endif 5663 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5664 5665 and r1, r1, #31 @ optional op; may set condition codes 5666 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5667 GET_INST_OPCODE(ip) @ extract opcode from rINST 5668 SET_VREG(r0, r9) @ vAA<- r0 5669 GOTO_OPCODE(ip) @ jump to next instruction 5670 /* 10-13 instructions */ 5671 5672 5673 5674/* ------------------------------ */ 5675 .balign 64 5676.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5677/* File: armv5te/OP_SHR_INT_2ADDR.S */ 5678/* File: armv5te/binop2addr.S */ 5679 /* 5680 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5681 * that specifies an instruction that performs "result = r0 op r1". 5682 * This could be an ARM instruction or a function call. (If the result 5683 * comes back in a register other than r0, you can override "result".) 5684 * 5685 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5686 * vCC (r1). Useful for integer division and modulus. 5687 * 5688 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5689 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5690 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5691 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5692 */ 5693 /* binop/2addr vA, vB */ 5694 mov r9, rINST, lsr #8 @ r9<- A+ 5695 mov r3, rINST, lsr #12 @ r3<- B 5696 and r9, r9, #15 5697 GET_VREG(r0, r9) @ r0<- vA 5698 GET_VREG(r1, r3) @ r1<- vB 5699 .if 0 5700 cmp r1, #0 @ is second operand zero? 5701 beq common_errDivideByZero 5702 .endif 5703 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5704 5705 and r1, r1, #31 @ optional op; may set condition codes 5706 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5707 GET_INST_OPCODE(ip) @ extract opcode from rINST 5708 SET_VREG(r0, r9) @ vAA<- r0 5709 GOTO_OPCODE(ip) @ jump to next instruction 5710 /* 10-13 instructions */ 5711 5712 5713 5714/* ------------------------------ */ 5715 .balign 64 5716.L_OP_USHR_INT_2ADDR: /* 0xba */ 5717/* File: armv5te/OP_USHR_INT_2ADDR.S */ 5718/* File: armv5te/binop2addr.S */ 5719 /* 5720 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5721 * that specifies an instruction that performs "result = r0 op r1". 5722 * This could be an ARM instruction or a function call. (If the result 5723 * comes back in a register other than r0, you can override "result".) 5724 * 5725 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5726 * vCC (r1). Useful for integer division and modulus. 5727 * 5728 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5729 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5730 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5731 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5732 */ 5733 /* binop/2addr vA, vB */ 5734 mov r9, rINST, lsr #8 @ r9<- A+ 5735 mov r3, rINST, lsr #12 @ r3<- B 5736 and r9, r9, #15 5737 GET_VREG(r0, r9) @ r0<- vA 5738 GET_VREG(r1, r3) @ r1<- vB 5739 .if 0 5740 cmp r1, #0 @ is second operand zero? 5741 beq common_errDivideByZero 5742 .endif 5743 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5744 5745 and r1, r1, #31 @ optional op; may set condition codes 5746 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5747 GET_INST_OPCODE(ip) @ extract opcode from rINST 5748 SET_VREG(r0, r9) @ vAA<- r0 5749 GOTO_OPCODE(ip) @ jump to next instruction 5750 /* 10-13 instructions */ 5751 5752 5753 5754/* ------------------------------ */ 5755 .balign 64 5756.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5757/* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5758/* File: armv5te/binopWide2addr.S */ 5759 /* 5760 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5761 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5762 * This could be an ARM instruction or a function call. (If the result 5763 * comes back in a register other than r0, you can override "result".) 5764 * 5765 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5766 * vCC (r1). Useful for integer division and modulus. 5767 * 5768 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5769 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5770 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5771 * rem-double/2addr 5772 */ 5773 /* binop/2addr vA, vB */ 5774 mov r9, rINST, lsr #8 @ r9<- A+ 5775 mov r1, rINST, lsr #12 @ r1<- B 5776 and r9, r9, #15 5777 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5778 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5779 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5780 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5781 .if 0 5782 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5783 beq common_errDivideByZero 5784 .endif 5785 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5786 5787 adds r0, r0, r2 @ optional op; may set condition codes 5788 adc r1, r1, r3 @ result<- op, r0-r3 changed 5789 GET_INST_OPCODE(ip) @ extract opcode from rINST 5790 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5791 GOTO_OPCODE(ip) @ jump to next instruction 5792 /* 12-15 instructions */ 5793 5794 5795 5796/* ------------------------------ */ 5797 .balign 64 5798.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5799/* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5800/* File: armv5te/binopWide2addr.S */ 5801 /* 5802 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5803 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5804 * This could be an ARM instruction or a function call. (If the result 5805 * comes back in a register other than r0, you can override "result".) 5806 * 5807 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5808 * vCC (r1). Useful for integer division and modulus. 5809 * 5810 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5811 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5812 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5813 * rem-double/2addr 5814 */ 5815 /* binop/2addr vA, vB */ 5816 mov r9, rINST, lsr #8 @ r9<- A+ 5817 mov r1, rINST, lsr #12 @ r1<- B 5818 and r9, r9, #15 5819 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5820 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5821 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5822 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5823 .if 0 5824 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5825 beq common_errDivideByZero 5826 .endif 5827 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5828 5829 subs r0, r0, r2 @ optional op; may set condition codes 5830 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5831 GET_INST_OPCODE(ip) @ extract opcode from rINST 5832 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5833 GOTO_OPCODE(ip) @ jump to next instruction 5834 /* 12-15 instructions */ 5835 5836 5837 5838/* ------------------------------ */ 5839 .balign 64 5840.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5841/* File: armv5te/OP_MUL_LONG_2ADDR.S */ 5842 /* 5843 * Signed 64-bit integer multiply, "/2addr" version. 5844 * 5845 * See OP_MUL_LONG for an explanation. 5846 * 5847 * We get a little tight on registers, so to avoid looking up &fp[A] 5848 * again we stuff it into rINST. 5849 */ 5850 /* mul-long/2addr vA, vB */ 5851 mov r9, rINST, lsr #8 @ r9<- A+ 5852 mov r1, rINST, lsr #12 @ r1<- B 5853 and r9, r9, #15 5854 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5855 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5856 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5857 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5858 mul ip, r2, r1 @ ip<- ZxW 5859 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5860 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5861 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5862 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5863 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5864 GET_INST_OPCODE(ip) @ extract opcode from rINST 5865 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5866 GOTO_OPCODE(ip) @ jump to next instruction 5867 5868 5869/* ------------------------------ */ 5870 .balign 64 5871.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5872/* File: armv5te/OP_DIV_LONG_2ADDR.S */ 5873/* File: armv5te/binopWide2addr.S */ 5874 /* 5875 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5876 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5877 * This could be an ARM instruction or a function call. (If the result 5878 * comes back in a register other than r0, you can override "result".) 5879 * 5880 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5881 * vCC (r1). Useful for integer division and modulus. 5882 * 5883 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5884 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5885 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5886 * rem-double/2addr 5887 */ 5888 /* binop/2addr vA, vB */ 5889 mov r9, rINST, lsr #8 @ r9<- A+ 5890 mov r1, rINST, lsr #12 @ r1<- B 5891 and r9, r9, #15 5892 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5893 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5894 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5895 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5896 .if 1 5897 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5898 beq common_errDivideByZero 5899 .endif 5900 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5901 5902 @ optional op; may set condition codes 5903 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5904 GET_INST_OPCODE(ip) @ extract opcode from rINST 5905 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5906 GOTO_OPCODE(ip) @ jump to next instruction 5907 /* 12-15 instructions */ 5908 5909 5910 5911/* ------------------------------ */ 5912 .balign 64 5913.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5914/* File: armv5te/OP_REM_LONG_2ADDR.S */ 5915/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5916/* File: armv5te/binopWide2addr.S */ 5917 /* 5918 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5919 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5920 * This could be an ARM instruction or a function call. (If the result 5921 * comes back in a register other than r0, you can override "result".) 5922 * 5923 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5924 * vCC (r1). Useful for integer division and modulus. 5925 * 5926 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5927 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5928 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5929 * rem-double/2addr 5930 */ 5931 /* binop/2addr vA, vB */ 5932 mov r9, rINST, lsr #8 @ r9<- A+ 5933 mov r1, rINST, lsr #12 @ r1<- B 5934 and r9, r9, #15 5935 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5936 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5937 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5938 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5939 .if 1 5940 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5941 beq common_errDivideByZero 5942 .endif 5943 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5944 5945 @ optional op; may set condition codes 5946 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5947 GET_INST_OPCODE(ip) @ extract opcode from rINST 5948 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5949 GOTO_OPCODE(ip) @ jump to next instruction 5950 /* 12-15 instructions */ 5951 5952 5953 5954/* ------------------------------ */ 5955 .balign 64 5956.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5957/* File: armv5te/OP_AND_LONG_2ADDR.S */ 5958/* File: armv5te/binopWide2addr.S */ 5959 /* 5960 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5961 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5962 * This could be an ARM instruction or a function call. (If the result 5963 * comes back in a register other than r0, you can override "result".) 5964 * 5965 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5966 * vCC (r1). Useful for integer division and modulus. 5967 * 5968 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5969 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5970 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5971 * rem-double/2addr 5972 */ 5973 /* binop/2addr vA, vB */ 5974 mov r9, rINST, lsr #8 @ r9<- A+ 5975 mov r1, rINST, lsr #12 @ r1<- B 5976 and r9, r9, #15 5977 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5978 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5979 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5980 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5981 .if 0 5982 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5983 beq common_errDivideByZero 5984 .endif 5985 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5986 5987 and r0, r0, r2 @ optional op; may set condition codes 5988 and r1, r1, r3 @ result<- op, r0-r3 changed 5989 GET_INST_OPCODE(ip) @ extract opcode from rINST 5990 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5991 GOTO_OPCODE(ip) @ jump to next instruction 5992 /* 12-15 instructions */ 5993 5994 5995 5996/* ------------------------------ */ 5997 .balign 64 5998.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5999/* File: armv5te/OP_OR_LONG_2ADDR.S */ 6000/* File: armv5te/binopWide2addr.S */ 6001 /* 6002 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6003 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6004 * This could be an ARM instruction or a function call. (If the result 6005 * comes back in a register other than r0, you can override "result".) 6006 * 6007 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6008 * vCC (r1). Useful for integer division and modulus. 6009 * 6010 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6011 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6012 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6013 * rem-double/2addr 6014 */ 6015 /* binop/2addr vA, vB */ 6016 mov r9, rINST, lsr #8 @ r9<- A+ 6017 mov r1, rINST, lsr #12 @ r1<- B 6018 and r9, r9, #15 6019 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6020 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6021 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6022 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6023 .if 0 6024 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6025 beq common_errDivideByZero 6026 .endif 6027 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6028 6029 orr r0, r0, r2 @ optional op; may set condition codes 6030 orr r1, r1, r3 @ result<- op, r0-r3 changed 6031 GET_INST_OPCODE(ip) @ extract opcode from rINST 6032 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6033 GOTO_OPCODE(ip) @ jump to next instruction 6034 /* 12-15 instructions */ 6035 6036 6037 6038/* ------------------------------ */ 6039 .balign 64 6040.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6041/* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6042/* File: armv5te/binopWide2addr.S */ 6043 /* 6044 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6045 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6046 * This could be an ARM instruction or a function call. (If the result 6047 * comes back in a register other than r0, you can override "result".) 6048 * 6049 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6050 * vCC (r1). Useful for integer division and modulus. 6051 * 6052 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6053 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6054 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6055 * rem-double/2addr 6056 */ 6057 /* binop/2addr vA, vB */ 6058 mov r9, rINST, lsr #8 @ r9<- A+ 6059 mov r1, rINST, lsr #12 @ r1<- B 6060 and r9, r9, #15 6061 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6062 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6063 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6064 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6065 .if 0 6066 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6067 beq common_errDivideByZero 6068 .endif 6069 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6070 6071 eor r0, r0, r2 @ optional op; may set condition codes 6072 eor r1, r1, r3 @ result<- op, r0-r3 changed 6073 GET_INST_OPCODE(ip) @ extract opcode from rINST 6074 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6075 GOTO_OPCODE(ip) @ jump to next instruction 6076 /* 12-15 instructions */ 6077 6078 6079 6080/* ------------------------------ */ 6081 .balign 64 6082.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6083/* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6084 /* 6085 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6086 * 32-bit shift distance. 6087 */ 6088 /* shl-long/2addr vA, vB */ 6089 mov r9, rINST, lsr #8 @ r9<- A+ 6090 mov r3, rINST, lsr #12 @ r3<- B 6091 and r9, r9, #15 6092 GET_VREG(r2, r3) @ r2<- vB 6093 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6094 and r2, r2, #63 @ r2<- r2 & 0x3f 6095 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6096 6097 mov r1, r1, asl r2 @ r1<- r1 << r2 6098 rsb r3, r2, #32 @ r3<- 32 - r2 6099 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6100 subs ip, r2, #32 @ ip<- r2 - 32 6101 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6102 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6103 mov r0, r0, asl r2 @ r0<- r0 << r2 6104 b .LOP_SHL_LONG_2ADDR_finish 6105 6106/* ------------------------------ */ 6107 .balign 64 6108.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6109/* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6110 /* 6111 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6112 * 32-bit shift distance. 6113 */ 6114 /* shr-long/2addr vA, vB */ 6115 mov r9, rINST, lsr #8 @ r9<- A+ 6116 mov r3, rINST, lsr #12 @ r3<- B 6117 and r9, r9, #15 6118 GET_VREG(r2, r3) @ r2<- vB 6119 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6120 and r2, r2, #63 @ r2<- r2 & 0x3f 6121 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6122 6123 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6124 rsb r3, r2, #32 @ r3<- 32 - r2 6125 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6126 subs ip, r2, #32 @ ip<- r2 - 32 6127 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6128 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6129 mov r1, r1, asr r2 @ r1<- r1 >> r2 6130 b .LOP_SHR_LONG_2ADDR_finish 6131 6132/* ------------------------------ */ 6133 .balign 64 6134.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6135/* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6136 /* 6137 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6138 * 32-bit shift distance. 6139 */ 6140 /* ushr-long/2addr vA, vB */ 6141 mov r9, rINST, lsr #8 @ r9<- A+ 6142 mov r3, rINST, lsr #12 @ r3<- B 6143 and r9, r9, #15 6144 GET_VREG(r2, r3) @ r2<- vB 6145 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6146 and r2, r2, #63 @ r2<- r2 & 0x3f 6147 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6148 6149 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6150 rsb r3, r2, #32 @ r3<- 32 - r2 6151 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6152 subs ip, r2, #32 @ ip<- r2 - 32 6153 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6154 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6155 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6156 b .LOP_USHR_LONG_2ADDR_finish 6157 6158/* ------------------------------ */ 6159 .balign 64 6160.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6161/* File: vfp/OP_ADD_FLOAT_2ADDR.S */ 6162/* File: vfp/fbinop2addr.S */ 6163 /* 6164 * Generic 32-bit floating point "/2addr" binary operation. Provide 6165 * an "instr" line that specifies an instruction that performs 6166 * "s2 = s0 op s1". 6167 * 6168 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6169 */ 6170 /* binop/2addr vA, vB */ 6171 mov r3, rINST, lsr #12 @ r3<- B 6172 mov r9, rINST, lsr #8 @ r9<- A+ 6173 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6174 and r9, r9, #15 @ r9<- A 6175 flds s1, [r3] @ s1<- vB 6176 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6177 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6178 flds s0, [r9] @ s0<- vA 6179 6180 fadds s2, s0, s1 @ s2<- op 6181 GET_INST_OPCODE(ip) @ extract opcode from rINST 6182 fsts s2, [r9] @ vAA<- s2 6183 GOTO_OPCODE(ip) @ jump to next instruction 6184 6185 6186/* ------------------------------ */ 6187 .balign 64 6188.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6189/* File: vfp/OP_SUB_FLOAT_2ADDR.S */ 6190/* File: vfp/fbinop2addr.S */ 6191 /* 6192 * Generic 32-bit floating point "/2addr" binary operation. Provide 6193 * an "instr" line that specifies an instruction that performs 6194 * "s2 = s0 op s1". 6195 * 6196 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6197 */ 6198 /* binop/2addr vA, vB */ 6199 mov r3, rINST, lsr #12 @ r3<- B 6200 mov r9, rINST, lsr #8 @ r9<- A+ 6201 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6202 and r9, r9, #15 @ r9<- A 6203 flds s1, [r3] @ s1<- vB 6204 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6205 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6206 flds s0, [r9] @ s0<- vA 6207 6208 fsubs s2, s0, s1 @ s2<- op 6209 GET_INST_OPCODE(ip) @ extract opcode from rINST 6210 fsts s2, [r9] @ vAA<- s2 6211 GOTO_OPCODE(ip) @ jump to next instruction 6212 6213 6214/* ------------------------------ */ 6215 .balign 64 6216.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6217/* File: vfp/OP_MUL_FLOAT_2ADDR.S */ 6218/* File: vfp/fbinop2addr.S */ 6219 /* 6220 * Generic 32-bit floating point "/2addr" binary operation. Provide 6221 * an "instr" line that specifies an instruction that performs 6222 * "s2 = s0 op s1". 6223 * 6224 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6225 */ 6226 /* binop/2addr vA, vB */ 6227 mov r3, rINST, lsr #12 @ r3<- B 6228 mov r9, rINST, lsr #8 @ r9<- A+ 6229 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6230 and r9, r9, #15 @ r9<- A 6231 flds s1, [r3] @ s1<- vB 6232 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6233 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6234 flds s0, [r9] @ s0<- vA 6235 6236 fmuls s2, s0, s1 @ s2<- op 6237 GET_INST_OPCODE(ip) @ extract opcode from rINST 6238 fsts s2, [r9] @ vAA<- s2 6239 GOTO_OPCODE(ip) @ jump to next instruction 6240 6241 6242/* ------------------------------ */ 6243 .balign 64 6244.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6245/* File: vfp/OP_DIV_FLOAT_2ADDR.S */ 6246/* File: vfp/fbinop2addr.S */ 6247 /* 6248 * Generic 32-bit floating point "/2addr" binary operation. Provide 6249 * an "instr" line that specifies an instruction that performs 6250 * "s2 = s0 op s1". 6251 * 6252 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6253 */ 6254 /* binop/2addr vA, vB */ 6255 mov r3, rINST, lsr #12 @ r3<- B 6256 mov r9, rINST, lsr #8 @ r9<- A+ 6257 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6258 and r9, r9, #15 @ r9<- A 6259 flds s1, [r3] @ s1<- vB 6260 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6261 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6262 flds s0, [r9] @ s0<- vA 6263 6264 fdivs s2, s0, s1 @ s2<- op 6265 GET_INST_OPCODE(ip) @ extract opcode from rINST 6266 fsts s2, [r9] @ vAA<- s2 6267 GOTO_OPCODE(ip) @ jump to next instruction 6268 6269 6270/* ------------------------------ */ 6271 .balign 64 6272.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6273/* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6274/* EABI doesn't define a float remainder function, but libm does */ 6275/* File: armv5te/binop2addr.S */ 6276 /* 6277 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6278 * that specifies an instruction that performs "result = r0 op r1". 6279 * This could be an ARM instruction or a function call. (If the result 6280 * comes back in a register other than r0, you can override "result".) 6281 * 6282 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6283 * vCC (r1). Useful for integer division and modulus. 6284 * 6285 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6286 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6287 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6288 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6289 */ 6290 /* binop/2addr vA, vB */ 6291 mov r9, rINST, lsr #8 @ r9<- A+ 6292 mov r3, rINST, lsr #12 @ r3<- B 6293 and r9, r9, #15 6294 GET_VREG(r0, r9) @ r0<- vA 6295 GET_VREG(r1, r3) @ r1<- vB 6296 .if 0 6297 cmp r1, #0 @ is second operand zero? 6298 beq common_errDivideByZero 6299 .endif 6300 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6301 6302 @ optional op; may set condition codes 6303 bl fmodf @ r0<- op, r0-r3 changed 6304 GET_INST_OPCODE(ip) @ extract opcode from rINST 6305 SET_VREG(r0, r9) @ vAA<- r0 6306 GOTO_OPCODE(ip) @ jump to next instruction 6307 /* 10-13 instructions */ 6308 6309 6310 6311/* ------------------------------ */ 6312 .balign 64 6313.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6314/* File: vfp/OP_ADD_DOUBLE_2ADDR.S */ 6315/* File: vfp/fbinopWide2addr.S */ 6316 /* 6317 * Generic 64-bit floating point "/2addr" binary operation. Provide 6318 * an "instr" line that specifies an instruction that performs 6319 * "d2 = d0 op d1". 6320 * 6321 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6322 * div-double/2addr 6323 */ 6324 /* binop/2addr vA, vB */ 6325 mov r3, rINST, lsr #12 @ r3<- B 6326 mov r9, rINST, lsr #8 @ r9<- A+ 6327 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6328 and r9, r9, #15 @ r9<- A 6329 fldd d1, [r3] @ d1<- vB 6330 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6331 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6332 fldd d0, [r9] @ d0<- vA 6333 6334 faddd d2, d0, d1 @ d2<- op 6335 GET_INST_OPCODE(ip) @ extract opcode from rINST 6336 fstd d2, [r9] @ vAA<- d2 6337 GOTO_OPCODE(ip) @ jump to next instruction 6338 6339 6340/* ------------------------------ */ 6341 .balign 64 6342.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6343/* File: vfp/OP_SUB_DOUBLE_2ADDR.S */ 6344/* File: vfp/fbinopWide2addr.S */ 6345 /* 6346 * Generic 64-bit floating point "/2addr" binary operation. Provide 6347 * an "instr" line that specifies an instruction that performs 6348 * "d2 = d0 op d1". 6349 * 6350 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6351 * div-double/2addr 6352 */ 6353 /* binop/2addr vA, vB */ 6354 mov r3, rINST, lsr #12 @ r3<- B 6355 mov r9, rINST, lsr #8 @ r9<- A+ 6356 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6357 and r9, r9, #15 @ r9<- A 6358 fldd d1, [r3] @ d1<- vB 6359 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6360 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6361 fldd d0, [r9] @ d0<- vA 6362 6363 fsubd d2, d0, d1 @ d2<- op 6364 GET_INST_OPCODE(ip) @ extract opcode from rINST 6365 fstd d2, [r9] @ vAA<- d2 6366 GOTO_OPCODE(ip) @ jump to next instruction 6367 6368 6369/* ------------------------------ */ 6370 .balign 64 6371.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6372/* File: vfp/OP_MUL_DOUBLE_2ADDR.S */ 6373/* File: vfp/fbinopWide2addr.S */ 6374 /* 6375 * Generic 64-bit floating point "/2addr" binary operation. Provide 6376 * an "instr" line that specifies an instruction that performs 6377 * "d2 = d0 op d1". 6378 * 6379 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6380 * div-double/2addr 6381 */ 6382 /* binop/2addr vA, vB */ 6383 mov r3, rINST, lsr #12 @ r3<- B 6384 mov r9, rINST, lsr #8 @ r9<- A+ 6385 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6386 and r9, r9, #15 @ r9<- A 6387 fldd d1, [r3] @ d1<- vB 6388 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6389 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6390 fldd d0, [r9] @ d0<- vA 6391 6392 fmuld d2, d0, d1 @ d2<- op 6393 GET_INST_OPCODE(ip) @ extract opcode from rINST 6394 fstd d2, [r9] @ vAA<- d2 6395 GOTO_OPCODE(ip) @ jump to next instruction 6396 6397 6398/* ------------------------------ */ 6399 .balign 64 6400.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6401/* File: vfp/OP_DIV_DOUBLE_2ADDR.S */ 6402/* File: vfp/fbinopWide2addr.S */ 6403 /* 6404 * Generic 64-bit floating point "/2addr" binary operation. Provide 6405 * an "instr" line that specifies an instruction that performs 6406 * "d2 = d0 op d1". 6407 * 6408 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6409 * div-double/2addr 6410 */ 6411 /* binop/2addr vA, vB */ 6412 mov r3, rINST, lsr #12 @ r3<- B 6413 mov r9, rINST, lsr #8 @ r9<- A+ 6414 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6415 and r9, r9, #15 @ r9<- A 6416 fldd d1, [r3] @ d1<- vB 6417 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6418 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6419 fldd d0, [r9] @ d0<- vA 6420 6421 fdivd d2, d0, d1 @ d2<- op 6422 GET_INST_OPCODE(ip) @ extract opcode from rINST 6423 fstd d2, [r9] @ vAA<- d2 6424 GOTO_OPCODE(ip) @ jump to next instruction 6425 6426 6427/* ------------------------------ */ 6428 .balign 64 6429.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6430/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6431/* EABI doesn't define a double remainder function, but libm does */ 6432/* File: armv5te/binopWide2addr.S */ 6433 /* 6434 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6435 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6436 * This could be an ARM instruction or a function call. (If the result 6437 * comes back in a register other than r0, you can override "result".) 6438 * 6439 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6440 * vCC (r1). Useful for integer division and modulus. 6441 * 6442 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6443 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6444 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6445 * rem-double/2addr 6446 */ 6447 /* binop/2addr vA, vB */ 6448 mov r9, rINST, lsr #8 @ r9<- A+ 6449 mov r1, rINST, lsr #12 @ r1<- B 6450 and r9, r9, #15 6451 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6452 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6453 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6454 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6455 .if 0 6456 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6457 beq common_errDivideByZero 6458 .endif 6459 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6460 6461 @ optional op; may set condition codes 6462 bl fmod @ result<- op, r0-r3 changed 6463 GET_INST_OPCODE(ip) @ extract opcode from rINST 6464 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6465 GOTO_OPCODE(ip) @ jump to next instruction 6466 /* 12-15 instructions */ 6467 6468 6469 6470/* ------------------------------ */ 6471 .balign 64 6472.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6473/* File: armv5te/OP_ADD_INT_LIT16.S */ 6474/* File: armv5te/binopLit16.S */ 6475 /* 6476 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6477 * that specifies an instruction that performs "result = r0 op r1". 6478 * This could be an ARM instruction or a function call. (If the result 6479 * comes back in a register other than r0, you can override "result".) 6480 * 6481 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6482 * vCC (r1). Useful for integer division and modulus. 6483 * 6484 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6485 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6486 */ 6487 /* binop/lit16 vA, vB, #+CCCC */ 6488 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6489 mov r2, rINST, lsr #12 @ r2<- B 6490 mov r9, rINST, lsr #8 @ r9<- A+ 6491 GET_VREG(r0, r2) @ r0<- vB 6492 and r9, r9, #15 6493 .if 0 6494 cmp r1, #0 @ is second operand zero? 6495 beq common_errDivideByZero 6496 .endif 6497 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6498 6499 add r0, r0, r1 @ r0<- op, r0-r3 changed 6500 GET_INST_OPCODE(ip) @ extract opcode from rINST 6501 SET_VREG(r0, r9) @ vAA<- r0 6502 GOTO_OPCODE(ip) @ jump to next instruction 6503 /* 10-13 instructions */ 6504 6505 6506 6507/* ------------------------------ */ 6508 .balign 64 6509.L_OP_RSUB_INT: /* 0xd1 */ 6510/* File: armv5te/OP_RSUB_INT.S */ 6511/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6512/* File: armv5te/binopLit16.S */ 6513 /* 6514 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6515 * that specifies an instruction that performs "result = r0 op r1". 6516 * This could be an ARM instruction or a function call. (If the result 6517 * comes back in a register other than r0, you can override "result".) 6518 * 6519 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6520 * vCC (r1). Useful for integer division and modulus. 6521 * 6522 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6523 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6524 */ 6525 /* binop/lit16 vA, vB, #+CCCC */ 6526 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6527 mov r2, rINST, lsr #12 @ r2<- B 6528 mov r9, rINST, lsr #8 @ r9<- A+ 6529 GET_VREG(r0, r2) @ r0<- vB 6530 and r9, r9, #15 6531 .if 0 6532 cmp r1, #0 @ is second operand zero? 6533 beq common_errDivideByZero 6534 .endif 6535 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6536 6537 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6538 GET_INST_OPCODE(ip) @ extract opcode from rINST 6539 SET_VREG(r0, r9) @ vAA<- r0 6540 GOTO_OPCODE(ip) @ jump to next instruction 6541 /* 10-13 instructions */ 6542 6543 6544 6545/* ------------------------------ */ 6546 .balign 64 6547.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6548/* File: armv5te/OP_MUL_INT_LIT16.S */ 6549/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6550/* File: armv5te/binopLit16.S */ 6551 /* 6552 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6553 * that specifies an instruction that performs "result = r0 op r1". 6554 * This could be an ARM instruction or a function call. (If the result 6555 * comes back in a register other than r0, you can override "result".) 6556 * 6557 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6558 * vCC (r1). Useful for integer division and modulus. 6559 * 6560 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6561 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6562 */ 6563 /* binop/lit16 vA, vB, #+CCCC */ 6564 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6565 mov r2, rINST, lsr #12 @ r2<- B 6566 mov r9, rINST, lsr #8 @ r9<- A+ 6567 GET_VREG(r0, r2) @ r0<- vB 6568 and r9, r9, #15 6569 .if 0 6570 cmp r1, #0 @ is second operand zero? 6571 beq common_errDivideByZero 6572 .endif 6573 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6574 6575 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6576 GET_INST_OPCODE(ip) @ extract opcode from rINST 6577 SET_VREG(r0, r9) @ vAA<- r0 6578 GOTO_OPCODE(ip) @ jump to next instruction 6579 /* 10-13 instructions */ 6580 6581 6582 6583/* ------------------------------ */ 6584 .balign 64 6585.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6586/* File: armv5te/OP_DIV_INT_LIT16.S */ 6587/* File: armv5te/binopLit16.S */ 6588 /* 6589 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6590 * that specifies an instruction that performs "result = r0 op r1". 6591 * This could be an ARM instruction or a function call. (If the result 6592 * comes back in a register other than r0, you can override "result".) 6593 * 6594 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6595 * vCC (r1). Useful for integer division and modulus. 6596 * 6597 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6598 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6599 */ 6600 /* binop/lit16 vA, vB, #+CCCC */ 6601 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6602 mov r2, rINST, lsr #12 @ r2<- B 6603 mov r9, rINST, lsr #8 @ r9<- A+ 6604 GET_VREG(r0, r2) @ r0<- vB 6605 and r9, r9, #15 6606 .if 1 6607 cmp r1, #0 @ is second operand zero? 6608 beq common_errDivideByZero 6609 .endif 6610 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6611 6612 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6613 GET_INST_OPCODE(ip) @ extract opcode from rINST 6614 SET_VREG(r0, r9) @ vAA<- r0 6615 GOTO_OPCODE(ip) @ jump to next instruction 6616 /* 10-13 instructions */ 6617 6618 6619 6620/* ------------------------------ */ 6621 .balign 64 6622.L_OP_REM_INT_LIT16: /* 0xd4 */ 6623/* File: armv5te/OP_REM_INT_LIT16.S */ 6624/* idivmod returns quotient in r0 and remainder in r1 */ 6625/* File: armv5te/binopLit16.S */ 6626 /* 6627 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6628 * that specifies an instruction that performs "result = r0 op r1". 6629 * This could be an ARM instruction or a function call. (If the result 6630 * comes back in a register other than r0, you can override "result".) 6631 * 6632 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6633 * vCC (r1). Useful for integer division and modulus. 6634 * 6635 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6636 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6637 */ 6638 /* binop/lit16 vA, vB, #+CCCC */ 6639 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6640 mov r2, rINST, lsr #12 @ r2<- B 6641 mov r9, rINST, lsr #8 @ r9<- A+ 6642 GET_VREG(r0, r2) @ r0<- vB 6643 and r9, r9, #15 6644 .if 1 6645 cmp r1, #0 @ is second operand zero? 6646 beq common_errDivideByZero 6647 .endif 6648 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6649 6650 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6651 GET_INST_OPCODE(ip) @ extract opcode from rINST 6652 SET_VREG(r1, r9) @ vAA<- r1 6653 GOTO_OPCODE(ip) @ jump to next instruction 6654 /* 10-13 instructions */ 6655 6656 6657 6658/* ------------------------------ */ 6659 .balign 64 6660.L_OP_AND_INT_LIT16: /* 0xd5 */ 6661/* File: armv5te/OP_AND_INT_LIT16.S */ 6662/* File: armv5te/binopLit16.S */ 6663 /* 6664 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6665 * that specifies an instruction that performs "result = r0 op r1". 6666 * This could be an ARM instruction or a function call. (If the result 6667 * comes back in a register other than r0, you can override "result".) 6668 * 6669 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6670 * vCC (r1). Useful for integer division and modulus. 6671 * 6672 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6673 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6674 */ 6675 /* binop/lit16 vA, vB, #+CCCC */ 6676 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6677 mov r2, rINST, lsr #12 @ r2<- B 6678 mov r9, rINST, lsr #8 @ r9<- A+ 6679 GET_VREG(r0, r2) @ r0<- vB 6680 and r9, r9, #15 6681 .if 0 6682 cmp r1, #0 @ is second operand zero? 6683 beq common_errDivideByZero 6684 .endif 6685 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6686 6687 and r0, r0, r1 @ r0<- op, r0-r3 changed 6688 GET_INST_OPCODE(ip) @ extract opcode from rINST 6689 SET_VREG(r0, r9) @ vAA<- r0 6690 GOTO_OPCODE(ip) @ jump to next instruction 6691 /* 10-13 instructions */ 6692 6693 6694 6695/* ------------------------------ */ 6696 .balign 64 6697.L_OP_OR_INT_LIT16: /* 0xd6 */ 6698/* File: armv5te/OP_OR_INT_LIT16.S */ 6699/* File: armv5te/binopLit16.S */ 6700 /* 6701 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6702 * that specifies an instruction that performs "result = r0 op r1". 6703 * This could be an ARM instruction or a function call. (If the result 6704 * comes back in a register other than r0, you can override "result".) 6705 * 6706 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6707 * vCC (r1). Useful for integer division and modulus. 6708 * 6709 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6710 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6711 */ 6712 /* binop/lit16 vA, vB, #+CCCC */ 6713 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6714 mov r2, rINST, lsr #12 @ r2<- B 6715 mov r9, rINST, lsr #8 @ r9<- A+ 6716 GET_VREG(r0, r2) @ r0<- vB 6717 and r9, r9, #15 6718 .if 0 6719 cmp r1, #0 @ is second operand zero? 6720 beq common_errDivideByZero 6721 .endif 6722 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6723 6724 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6725 GET_INST_OPCODE(ip) @ extract opcode from rINST 6726 SET_VREG(r0, r9) @ vAA<- r0 6727 GOTO_OPCODE(ip) @ jump to next instruction 6728 /* 10-13 instructions */ 6729 6730 6731 6732/* ------------------------------ */ 6733 .balign 64 6734.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6735/* File: armv5te/OP_XOR_INT_LIT16.S */ 6736/* File: armv5te/binopLit16.S */ 6737 /* 6738 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6739 * that specifies an instruction that performs "result = r0 op r1". 6740 * This could be an ARM instruction or a function call. (If the result 6741 * comes back in a register other than r0, you can override "result".) 6742 * 6743 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6744 * vCC (r1). Useful for integer division and modulus. 6745 * 6746 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6747 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6748 */ 6749 /* binop/lit16 vA, vB, #+CCCC */ 6750 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6751 mov r2, rINST, lsr #12 @ r2<- B 6752 mov r9, rINST, lsr #8 @ r9<- A+ 6753 GET_VREG(r0, r2) @ r0<- vB 6754 and r9, r9, #15 6755 .if 0 6756 cmp r1, #0 @ is second operand zero? 6757 beq common_errDivideByZero 6758 .endif 6759 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6760 6761 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6762 GET_INST_OPCODE(ip) @ extract opcode from rINST 6763 SET_VREG(r0, r9) @ vAA<- r0 6764 GOTO_OPCODE(ip) @ jump to next instruction 6765 /* 10-13 instructions */ 6766 6767 6768 6769/* ------------------------------ */ 6770 .balign 64 6771.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6772/* File: armv5te/OP_ADD_INT_LIT8.S */ 6773/* File: armv5te/binopLit8.S */ 6774 /* 6775 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6776 * that specifies an instruction that performs "result = r0 op r1". 6777 * This could be an ARM instruction or a function call. (If the result 6778 * comes back in a register other than r0, you can override "result".) 6779 * 6780 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6781 * vCC (r1). Useful for integer division and modulus. 6782 * 6783 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6784 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6785 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6786 */ 6787 /* binop/lit8 vAA, vBB, #+CC */ 6788 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6789 mov r9, rINST, lsr #8 @ r9<- AA 6790 and r2, r3, #255 @ r2<- BB 6791 GET_VREG(r0, r2) @ r0<- vBB 6792 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6793 .if 0 6794 @cmp r1, #0 @ is second operand zero? 6795 beq common_errDivideByZero 6796 .endif 6797 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6798 6799 @ optional op; may set condition codes 6800 add r0, r0, r1 @ r0<- op, r0-r3 changed 6801 GET_INST_OPCODE(ip) @ extract opcode from rINST 6802 SET_VREG(r0, r9) @ vAA<- r0 6803 GOTO_OPCODE(ip) @ jump to next instruction 6804 /* 10-12 instructions */ 6805 6806 6807 6808/* ------------------------------ */ 6809 .balign 64 6810.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6811/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6812/* File: armv5te/binopLit8.S */ 6813 /* 6814 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6815 * that specifies an instruction that performs "result = r0 op r1". 6816 * This could be an ARM instruction or a function call. (If the result 6817 * comes back in a register other than r0, you can override "result".) 6818 * 6819 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6820 * vCC (r1). Useful for integer division and modulus. 6821 * 6822 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6823 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6824 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6825 */ 6826 /* binop/lit8 vAA, vBB, #+CC */ 6827 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6828 mov r9, rINST, lsr #8 @ r9<- AA 6829 and r2, r3, #255 @ r2<- BB 6830 GET_VREG(r0, r2) @ r0<- vBB 6831 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6832 .if 0 6833 @cmp r1, #0 @ is second operand zero? 6834 beq common_errDivideByZero 6835 .endif 6836 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6837 6838 @ optional op; may set condition codes 6839 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6840 GET_INST_OPCODE(ip) @ extract opcode from rINST 6841 SET_VREG(r0, r9) @ vAA<- r0 6842 GOTO_OPCODE(ip) @ jump to next instruction 6843 /* 10-12 instructions */ 6844 6845 6846 6847/* ------------------------------ */ 6848 .balign 64 6849.L_OP_MUL_INT_LIT8: /* 0xda */ 6850/* File: armv5te/OP_MUL_INT_LIT8.S */ 6851/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6852/* File: armv5te/binopLit8.S */ 6853 /* 6854 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6855 * that specifies an instruction that performs "result = r0 op r1". 6856 * This could be an ARM instruction or a function call. (If the result 6857 * comes back in a register other than r0, you can override "result".) 6858 * 6859 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6860 * vCC (r1). Useful for integer division and modulus. 6861 * 6862 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6863 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6864 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6865 */ 6866 /* binop/lit8 vAA, vBB, #+CC */ 6867 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6868 mov r9, rINST, lsr #8 @ r9<- AA 6869 and r2, r3, #255 @ r2<- BB 6870 GET_VREG(r0, r2) @ r0<- vBB 6871 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6872 .if 0 6873 @cmp r1, #0 @ is second operand zero? 6874 beq common_errDivideByZero 6875 .endif 6876 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6877 6878 @ optional op; may set condition codes 6879 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6880 GET_INST_OPCODE(ip) @ extract opcode from rINST 6881 SET_VREG(r0, r9) @ vAA<- r0 6882 GOTO_OPCODE(ip) @ jump to next instruction 6883 /* 10-12 instructions */ 6884 6885 6886 6887/* ------------------------------ */ 6888 .balign 64 6889.L_OP_DIV_INT_LIT8: /* 0xdb */ 6890/* File: armv5te/OP_DIV_INT_LIT8.S */ 6891/* File: armv5te/binopLit8.S */ 6892 /* 6893 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6894 * that specifies an instruction that performs "result = r0 op r1". 6895 * This could be an ARM instruction or a function call. (If the result 6896 * comes back in a register other than r0, you can override "result".) 6897 * 6898 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6899 * vCC (r1). Useful for integer division and modulus. 6900 * 6901 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6902 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6903 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6904 */ 6905 /* binop/lit8 vAA, vBB, #+CC */ 6906 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6907 mov r9, rINST, lsr #8 @ r9<- AA 6908 and r2, r3, #255 @ r2<- BB 6909 GET_VREG(r0, r2) @ r0<- vBB 6910 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6911 .if 1 6912 @cmp r1, #0 @ is second operand zero? 6913 beq common_errDivideByZero 6914 .endif 6915 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6916 6917 @ optional op; may set condition codes 6918 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6919 GET_INST_OPCODE(ip) @ extract opcode from rINST 6920 SET_VREG(r0, r9) @ vAA<- r0 6921 GOTO_OPCODE(ip) @ jump to next instruction 6922 /* 10-12 instructions */ 6923 6924 6925 6926/* ------------------------------ */ 6927 .balign 64 6928.L_OP_REM_INT_LIT8: /* 0xdc */ 6929/* File: armv5te/OP_REM_INT_LIT8.S */ 6930/* idivmod returns quotient in r0 and remainder in r1 */ 6931/* File: armv5te/binopLit8.S */ 6932 /* 6933 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6934 * that specifies an instruction that performs "result = r0 op r1". 6935 * This could be an ARM instruction or a function call. (If the result 6936 * comes back in a register other than r0, you can override "result".) 6937 * 6938 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6939 * vCC (r1). Useful for integer division and modulus. 6940 * 6941 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6942 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6943 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6944 */ 6945 /* binop/lit8 vAA, vBB, #+CC */ 6946 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6947 mov r9, rINST, lsr #8 @ r9<- AA 6948 and r2, r3, #255 @ r2<- BB 6949 GET_VREG(r0, r2) @ r0<- vBB 6950 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6951 .if 1 6952 @cmp r1, #0 @ is second operand zero? 6953 beq common_errDivideByZero 6954 .endif 6955 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6956 6957 @ optional op; may set condition codes 6958 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6959 GET_INST_OPCODE(ip) @ extract opcode from rINST 6960 SET_VREG(r1, r9) @ vAA<- r1 6961 GOTO_OPCODE(ip) @ jump to next instruction 6962 /* 10-12 instructions */ 6963 6964 6965 6966/* ------------------------------ */ 6967 .balign 64 6968.L_OP_AND_INT_LIT8: /* 0xdd */ 6969/* File: armv5te/OP_AND_INT_LIT8.S */ 6970/* File: armv5te/binopLit8.S */ 6971 /* 6972 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6973 * that specifies an instruction that performs "result = r0 op r1". 6974 * This could be an ARM instruction or a function call. (If the result 6975 * comes back in a register other than r0, you can override "result".) 6976 * 6977 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6978 * vCC (r1). Useful for integer division and modulus. 6979 * 6980 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6981 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6982 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6983 */ 6984 /* binop/lit8 vAA, vBB, #+CC */ 6985 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6986 mov r9, rINST, lsr #8 @ r9<- AA 6987 and r2, r3, #255 @ r2<- BB 6988 GET_VREG(r0, r2) @ r0<- vBB 6989 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6990 .if 0 6991 @cmp r1, #0 @ is second operand zero? 6992 beq common_errDivideByZero 6993 .endif 6994 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6995 6996 @ optional op; may set condition codes 6997 and r0, r0, r1 @ r0<- op, r0-r3 changed 6998 GET_INST_OPCODE(ip) @ extract opcode from rINST 6999 SET_VREG(r0, r9) @ vAA<- r0 7000 GOTO_OPCODE(ip) @ jump to next instruction 7001 /* 10-12 instructions */ 7002 7003 7004 7005/* ------------------------------ */ 7006 .balign 64 7007.L_OP_OR_INT_LIT8: /* 0xde */ 7008/* File: armv5te/OP_OR_INT_LIT8.S */ 7009/* File: armv5te/binopLit8.S */ 7010 /* 7011 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7012 * that specifies an instruction that performs "result = r0 op r1". 7013 * This could be an ARM instruction or a function call. (If the result 7014 * comes back in a register other than r0, you can override "result".) 7015 * 7016 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7017 * vCC (r1). Useful for integer division and modulus. 7018 * 7019 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7020 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7021 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7022 */ 7023 /* binop/lit8 vAA, vBB, #+CC */ 7024 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7025 mov r9, rINST, lsr #8 @ r9<- AA 7026 and r2, r3, #255 @ r2<- BB 7027 GET_VREG(r0, r2) @ r0<- vBB 7028 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7029 .if 0 7030 @cmp r1, #0 @ is second operand zero? 7031 beq common_errDivideByZero 7032 .endif 7033 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7034 7035 @ optional op; may set condition codes 7036 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7037 GET_INST_OPCODE(ip) @ extract opcode from rINST 7038 SET_VREG(r0, r9) @ vAA<- r0 7039 GOTO_OPCODE(ip) @ jump to next instruction 7040 /* 10-12 instructions */ 7041 7042 7043 7044/* ------------------------------ */ 7045 .balign 64 7046.L_OP_XOR_INT_LIT8: /* 0xdf */ 7047/* File: armv5te/OP_XOR_INT_LIT8.S */ 7048/* File: armv5te/binopLit8.S */ 7049 /* 7050 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7051 * that specifies an instruction that performs "result = r0 op r1". 7052 * This could be an ARM instruction or a function call. (If the result 7053 * comes back in a register other than r0, you can override "result".) 7054 * 7055 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7056 * vCC (r1). Useful for integer division and modulus. 7057 * 7058 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7059 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7060 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7061 */ 7062 /* binop/lit8 vAA, vBB, #+CC */ 7063 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7064 mov r9, rINST, lsr #8 @ r9<- AA 7065 and r2, r3, #255 @ r2<- BB 7066 GET_VREG(r0, r2) @ r0<- vBB 7067 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7068 .if 0 7069 @cmp r1, #0 @ is second operand zero? 7070 beq common_errDivideByZero 7071 .endif 7072 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7073 7074 @ optional op; may set condition codes 7075 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7076 GET_INST_OPCODE(ip) @ extract opcode from rINST 7077 SET_VREG(r0, r9) @ vAA<- r0 7078 GOTO_OPCODE(ip) @ jump to next instruction 7079 /* 10-12 instructions */ 7080 7081 7082 7083/* ------------------------------ */ 7084 .balign 64 7085.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7086/* File: armv5te/OP_SHL_INT_LIT8.S */ 7087/* File: armv5te/binopLit8.S */ 7088 /* 7089 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7090 * that specifies an instruction that performs "result = r0 op r1". 7091 * This could be an ARM instruction or a function call. (If the result 7092 * comes back in a register other than r0, you can override "result".) 7093 * 7094 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7095 * vCC (r1). Useful for integer division and modulus. 7096 * 7097 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7098 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7099 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7100 */ 7101 /* binop/lit8 vAA, vBB, #+CC */ 7102 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7103 mov r9, rINST, lsr #8 @ r9<- AA 7104 and r2, r3, #255 @ r2<- BB 7105 GET_VREG(r0, r2) @ r0<- vBB 7106 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7107 .if 0 7108 @cmp r1, #0 @ is second operand zero? 7109 beq common_errDivideByZero 7110 .endif 7111 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7112 7113 and r1, r1, #31 @ optional op; may set condition codes 7114 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7115 GET_INST_OPCODE(ip) @ extract opcode from rINST 7116 SET_VREG(r0, r9) @ vAA<- r0 7117 GOTO_OPCODE(ip) @ jump to next instruction 7118 /* 10-12 instructions */ 7119 7120 7121 7122/* ------------------------------ */ 7123 .balign 64 7124.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7125/* File: armv5te/OP_SHR_INT_LIT8.S */ 7126/* File: armv5te/binopLit8.S */ 7127 /* 7128 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7129 * that specifies an instruction that performs "result = r0 op r1". 7130 * This could be an ARM instruction or a function call. (If the result 7131 * comes back in a register other than r0, you can override "result".) 7132 * 7133 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7134 * vCC (r1). Useful for integer division and modulus. 7135 * 7136 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7137 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7138 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7139 */ 7140 /* binop/lit8 vAA, vBB, #+CC */ 7141 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7142 mov r9, rINST, lsr #8 @ r9<- AA 7143 and r2, r3, #255 @ r2<- BB 7144 GET_VREG(r0, r2) @ r0<- vBB 7145 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7146 .if 0 7147 @cmp r1, #0 @ is second operand zero? 7148 beq common_errDivideByZero 7149 .endif 7150 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7151 7152 and r1, r1, #31 @ optional op; may set condition codes 7153 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7154 GET_INST_OPCODE(ip) @ extract opcode from rINST 7155 SET_VREG(r0, r9) @ vAA<- r0 7156 GOTO_OPCODE(ip) @ jump to next instruction 7157 /* 10-12 instructions */ 7158 7159 7160 7161/* ------------------------------ */ 7162 .balign 64 7163.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7164/* File: armv5te/OP_USHR_INT_LIT8.S */ 7165/* File: armv5te/binopLit8.S */ 7166 /* 7167 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7168 * that specifies an instruction that performs "result = r0 op r1". 7169 * This could be an ARM instruction or a function call. (If the result 7170 * comes back in a register other than r0, you can override "result".) 7171 * 7172 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7173 * vCC (r1). Useful for integer division and modulus. 7174 * 7175 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7176 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7177 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7178 */ 7179 /* binop/lit8 vAA, vBB, #+CC */ 7180 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7181 mov r9, rINST, lsr #8 @ r9<- AA 7182 and r2, r3, #255 @ r2<- BB 7183 GET_VREG(r0, r2) @ r0<- vBB 7184 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7185 .if 0 7186 @cmp r1, #0 @ is second operand zero? 7187 beq common_errDivideByZero 7188 .endif 7189 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7190 7191 and r1, r1, #31 @ optional op; may set condition codes 7192 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7193 GET_INST_OPCODE(ip) @ extract opcode from rINST 7194 SET_VREG(r0, r9) @ vAA<- r0 7195 GOTO_OPCODE(ip) @ jump to next instruction 7196 /* 10-12 instructions */ 7197 7198 7199 7200/* ------------------------------ */ 7201 .balign 64 7202.L_OP_UNUSED_E3: /* 0xe3 */ 7203/* File: armv5te/OP_UNUSED_E3.S */ 7204/* File: armv5te/unused.S */ 7205 bl common_abort 7206 7207 7208 7209/* ------------------------------ */ 7210 .balign 64 7211.L_OP_UNUSED_E4: /* 0xe4 */ 7212/* File: armv5te/OP_UNUSED_E4.S */ 7213/* File: armv5te/unused.S */ 7214 bl common_abort 7215 7216 7217 7218/* ------------------------------ */ 7219 .balign 64 7220.L_OP_UNUSED_E5: /* 0xe5 */ 7221/* File: armv5te/OP_UNUSED_E5.S */ 7222/* File: armv5te/unused.S */ 7223 bl common_abort 7224 7225 7226 7227/* ------------------------------ */ 7228 .balign 64 7229.L_OP_UNUSED_E6: /* 0xe6 */ 7230/* File: armv5te/OP_UNUSED_E6.S */ 7231/* File: armv5te/unused.S */ 7232 bl common_abort 7233 7234 7235 7236/* ------------------------------ */ 7237 .balign 64 7238.L_OP_UNUSED_E7: /* 0xe7 */ 7239/* File: armv5te/OP_UNUSED_E7.S */ 7240/* File: armv5te/unused.S */ 7241 bl common_abort 7242 7243 7244 7245/* ------------------------------ */ 7246 .balign 64 7247.L_OP_UNUSED_E8: /* 0xe8 */ 7248/* File: armv5te/OP_UNUSED_E8.S */ 7249/* File: armv5te/unused.S */ 7250 bl common_abort 7251 7252 7253 7254/* ------------------------------ */ 7255 .balign 64 7256.L_OP_UNUSED_E9: /* 0xe9 */ 7257/* File: armv5te/OP_UNUSED_E9.S */ 7258/* File: armv5te/unused.S */ 7259 bl common_abort 7260 7261 7262 7263/* ------------------------------ */ 7264 .balign 64 7265.L_OP_UNUSED_EA: /* 0xea */ 7266/* File: armv5te/OP_UNUSED_EA.S */ 7267/* File: armv5te/unused.S */ 7268 bl common_abort 7269 7270 7271 7272/* ------------------------------ */ 7273 .balign 64 7274.L_OP_UNUSED_EB: /* 0xeb */ 7275/* File: armv5te/OP_UNUSED_EB.S */ 7276/* File: armv5te/unused.S */ 7277 bl common_abort 7278 7279 7280 7281/* ------------------------------ */ 7282 .balign 64 7283.L_OP_UNUSED_EC: /* 0xec */ 7284/* File: armv5te/OP_UNUSED_EC.S */ 7285/* File: armv5te/unused.S */ 7286 bl common_abort 7287 7288 7289 7290/* ------------------------------ */ 7291 .balign 64 7292.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7293/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7294 /* 7295 * Handle a throw-verification-error instruction. This throws an 7296 * exception for an error discovered during verification. The 7297 * exception is indicated by AA, with some detail provided by BBBB. 7298 */ 7299 /* op AA, ref@BBBB */ 7300 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7301 FETCH(r2, 1) @ r2<- BBBB 7302 EXPORT_PC() @ export the PC 7303 mov r1, rINST, lsr #8 @ r1<- AA 7304 bl dvmThrowVerificationError @ always throws 7305 b common_exceptionThrown @ handle exception 7306 7307 7308/* ------------------------------ */ 7309 .balign 64 7310.L_OP_EXECUTE_INLINE: /* 0xee */ 7311/* File: armv5te/OP_EXECUTE_INLINE.S */ 7312 /* 7313 * Execute a "native inline" instruction. 7314 * 7315 * We need to call: 7316 * dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref) 7317 * 7318 * The first four args are in r0-r3, but the last two must be pushed 7319 * onto the stack. 7320 */ 7321 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7322 FETCH(r10, 1) @ r10<- BBBB 7323 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7324 EXPORT_PC() @ can throw 7325 sub sp, sp, #8 @ make room for arg(s) 7326 mov r0, rINST, lsr #12 @ r0<- B 7327 str r1, [sp] @ push &glue->retval 7328 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7329 add sp, sp, #8 @ pop stack 7330 cmp r0, #0 @ test boolean result of inline 7331 beq common_exceptionThrown @ returned false, handle exception 7332 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7333 GET_INST_OPCODE(ip) @ extract opcode from rINST 7334 GOTO_OPCODE(ip) @ jump to next instruction 7335 7336/* ------------------------------ */ 7337 .balign 64 7338.L_OP_UNUSED_EF: /* 0xef */ 7339/* File: armv5te/OP_UNUSED_EF.S */ 7340/* File: armv5te/unused.S */ 7341 bl common_abort 7342 7343 7344 7345/* ------------------------------ */ 7346 .balign 64 7347.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7348/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7349 /* 7350 * invoke-direct-empty is a no-op in a "standard" interpreter. 7351 */ 7352 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7353 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7354 GOTO_OPCODE(ip) @ execute it 7355 7356/* ------------------------------ */ 7357 .balign 64 7358.L_OP_UNUSED_F1: /* 0xf1 */ 7359/* File: armv5te/OP_UNUSED_F1.S */ 7360/* File: armv5te/unused.S */ 7361 bl common_abort 7362 7363 7364 7365/* ------------------------------ */ 7366 .balign 64 7367.L_OP_IGET_QUICK: /* 0xf2 */ 7368/* File: armv5te/OP_IGET_QUICK.S */ 7369 /* For: iget-quick, iget-object-quick */ 7370 /* op vA, vB, offset@CCCC */ 7371 mov r2, rINST, lsr #12 @ r2<- B 7372 GET_VREG(r3, r2) @ r3<- object we're operating on 7373 FETCH(r1, 1) @ r1<- field byte offset 7374 cmp r3, #0 @ check object for null 7375 mov r2, rINST, lsr #8 @ r2<- A(+) 7376 beq common_errNullObject @ object was null 7377 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7378 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7379 and r2, r2, #15 7380 GET_INST_OPCODE(ip) @ extract opcode from rINST 7381 SET_VREG(r0, r2) @ fp[A]<- r0 7382 GOTO_OPCODE(ip) @ jump to next instruction 7383 7384 7385/* ------------------------------ */ 7386 .balign 64 7387.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7388/* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7389 /* iget-wide-quick vA, vB, offset@CCCC */ 7390 mov r2, rINST, lsr #12 @ r2<- B 7391 GET_VREG(r3, r2) @ r3<- object we're operating on 7392 FETCH(r1, 1) @ r1<- field byte offset 7393 cmp r3, #0 @ check object for null 7394 mov r2, rINST, lsr #8 @ r2<- A(+) 7395 beq common_errNullObject @ object was null 7396 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned) 7397 and r2, r2, #15 7398 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7399 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7400 GET_INST_OPCODE(ip) @ extract opcode from rINST 7401 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7402 GOTO_OPCODE(ip) @ jump to next instruction 7403 7404 7405/* ------------------------------ */ 7406 .balign 64 7407.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7408/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7409/* File: armv5te/OP_IGET_QUICK.S */ 7410 /* For: iget-quick, iget-object-quick */ 7411 /* op vA, vB, offset@CCCC */ 7412 mov r2, rINST, lsr #12 @ r2<- B 7413 GET_VREG(r3, r2) @ r3<- object we're operating on 7414 FETCH(r1, 1) @ r1<- field byte offset 7415 cmp r3, #0 @ check object for null 7416 mov r2, rINST, lsr #8 @ r2<- A(+) 7417 beq common_errNullObject @ object was null 7418 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7419 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7420 and r2, r2, #15 7421 GET_INST_OPCODE(ip) @ extract opcode from rINST 7422 SET_VREG(r0, r2) @ fp[A]<- r0 7423 GOTO_OPCODE(ip) @ jump to next instruction 7424 7425 7426 7427/* ------------------------------ */ 7428 .balign 64 7429.L_OP_IPUT_QUICK: /* 0xf5 */ 7430/* File: armv5te/OP_IPUT_QUICK.S */ 7431 /* For: iput-quick, iput-object-quick */ 7432 /* op vA, vB, offset@CCCC */ 7433 mov r2, rINST, lsr #12 @ r2<- B 7434 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7435 FETCH(r1, 1) @ r1<- field byte offset 7436 cmp r3, #0 @ check object for null 7437 mov r2, rINST, lsr #8 @ r2<- A(+) 7438 beq common_errNullObject @ object was null 7439 and r2, r2, #15 7440 GET_VREG(r0, r2) @ r0<- fp[A] 7441 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7442 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7443 GET_INST_OPCODE(ip) @ extract opcode from rINST 7444 GOTO_OPCODE(ip) @ jump to next instruction 7445 7446 7447/* ------------------------------ */ 7448 .balign 64 7449.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7450/* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7451 /* iput-wide-quick vA, vB, offset@CCCC */ 7452 mov r0, rINST, lsr #8 @ r0<- A(+) 7453 mov r1, rINST, lsr #12 @ r1<- B 7454 and r0, r0, #15 7455 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7456 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7457 cmp r2, #0 @ check object for null 7458 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7459 beq common_errNullObject @ object was null 7460 FETCH(r3, 1) @ r3<- field byte offset 7461 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7462 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7463 GET_INST_OPCODE(ip) @ extract opcode from rINST 7464 GOTO_OPCODE(ip) @ jump to next instruction 7465 7466 7467/* ------------------------------ */ 7468 .balign 64 7469.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7470/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7471/* File: armv5te/OP_IPUT_QUICK.S */ 7472 /* For: iput-quick, iput-object-quick */ 7473 /* op vA, vB, offset@CCCC */ 7474 mov r2, rINST, lsr #12 @ r2<- B 7475 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7476 FETCH(r1, 1) @ r1<- field byte offset 7477 cmp r3, #0 @ check object for null 7478 mov r2, rINST, lsr #8 @ r2<- A(+) 7479 beq common_errNullObject @ object was null 7480 and r2, r2, #15 7481 GET_VREG(r0, r2) @ r0<- fp[A] 7482 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7483 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7484 GET_INST_OPCODE(ip) @ extract opcode from rINST 7485 GOTO_OPCODE(ip) @ jump to next instruction 7486 7487 7488 7489/* ------------------------------ */ 7490 .balign 64 7491.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7492/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7493 /* 7494 * Handle an optimized virtual method call. 7495 * 7496 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7497 */ 7498 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7499 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7500 FETCH(r3, 2) @ r3<- FEDC or CCCC 7501 FETCH(r1, 1) @ r1<- BBBB 7502 .if (!0) 7503 and r3, r3, #15 @ r3<- C (or stays CCCC) 7504 .endif 7505 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7506 cmp r2, #0 @ is "this" null? 7507 beq common_errNullObject @ null "this", throw exception 7508 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7509 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7510 EXPORT_PC() @ invoke must export 7511 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7512 bl common_invokeMethodNoRange @ continue on 7513 7514/* ------------------------------ */ 7515 .balign 64 7516.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7517/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7518/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7519 /* 7520 * Handle an optimized virtual method call. 7521 * 7522 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7523 */ 7524 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7525 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7526 FETCH(r3, 2) @ r3<- FEDC or CCCC 7527 FETCH(r1, 1) @ r1<- BBBB 7528 .if (!1) 7529 and r3, r3, #15 @ r3<- C (or stays CCCC) 7530 .endif 7531 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7532 cmp r2, #0 @ is "this" null? 7533 beq common_errNullObject @ null "this", throw exception 7534 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7535 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7536 EXPORT_PC() @ invoke must export 7537 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7538 bl common_invokeMethodRange @ continue on 7539 7540 7541/* ------------------------------ */ 7542 .balign 64 7543.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7544/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7545 /* 7546 * Handle an optimized "super" method call. 7547 * 7548 * for: [opt] invoke-super-quick, invoke-super-quick/range 7549 */ 7550 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7551 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7552 FETCH(r10, 2) @ r10<- GFED or CCCC 7553 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7554 .if (!0) 7555 and r10, r10, #15 @ r10<- D (or stays CCCC) 7556 .endif 7557 FETCH(r1, 1) @ r1<- BBBB 7558 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7559 EXPORT_PC() @ must export for invoke 7560 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7561 GET_VREG(r3, r10) @ r3<- "this" 7562 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7563 cmp r3, #0 @ null "this" ref? 7564 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7565 beq common_errNullObject @ "this" is null, throw exception 7566 bl common_invokeMethodNoRange @ continue on 7567 7568 7569/* ------------------------------ */ 7570 .balign 64 7571.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7572/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7573/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7574 /* 7575 * Handle an optimized "super" method call. 7576 * 7577 * for: [opt] invoke-super-quick, invoke-super-quick/range 7578 */ 7579 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7580 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7581 FETCH(r10, 2) @ r10<- GFED or CCCC 7582 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7583 .if (!1) 7584 and r10, r10, #15 @ r10<- D (or stays CCCC) 7585 .endif 7586 FETCH(r1, 1) @ r1<- BBBB 7587 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7588 EXPORT_PC() @ must export for invoke 7589 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7590 GET_VREG(r3, r10) @ r3<- "this" 7591 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7592 cmp r3, #0 @ null "this" ref? 7593 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7594 beq common_errNullObject @ "this" is null, throw exception 7595 bl common_invokeMethodRange @ continue on 7596 7597 7598 7599/* ------------------------------ */ 7600 .balign 64 7601.L_OP_UNUSED_FC: /* 0xfc */ 7602/* File: armv5te/OP_UNUSED_FC.S */ 7603/* File: armv5te/unused.S */ 7604 bl common_abort 7605 7606 7607 7608/* ------------------------------ */ 7609 .balign 64 7610.L_OP_UNUSED_FD: /* 0xfd */ 7611/* File: armv5te/OP_UNUSED_FD.S */ 7612/* File: armv5te/unused.S */ 7613 bl common_abort 7614 7615 7616 7617/* ------------------------------ */ 7618 .balign 64 7619.L_OP_UNUSED_FE: /* 0xfe */ 7620/* File: armv5te/OP_UNUSED_FE.S */ 7621/* File: armv5te/unused.S */ 7622 bl common_abort 7623 7624 7625 7626/* ------------------------------ */ 7627 .balign 64 7628.L_OP_UNUSED_FF: /* 0xff */ 7629/* File: armv5te/OP_UNUSED_FF.S */ 7630/* File: armv5te/unused.S */ 7631 bl common_abort 7632 7633 7634 7635 7636 .balign 64 7637 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7638 .global dvmAsmInstructionEnd 7639dvmAsmInstructionEnd: 7640 7641/* 7642 * =========================================================================== 7643 * Sister implementations 7644 * =========================================================================== 7645 */ 7646 .global dvmAsmSisterStart 7647 .type dvmAsmSisterStart, %function 7648 .text 7649 .balign 4 7650dvmAsmSisterStart: 7651 7652/* continuation for OP_CONST_STRING */ 7653 7654 /* 7655 * Continuation if the String has not yet been resolved. 7656 * r1: BBBB (String ref) 7657 * r9: target register 7658 */ 7659.LOP_CONST_STRING_resolve: 7660 EXPORT_PC() 7661 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7662 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7663 bl dvmResolveString @ r0<- String reference 7664 cmp r0, #0 @ failed? 7665 beq common_exceptionThrown @ yup, handle the exception 7666 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7667 GET_INST_OPCODE(ip) @ extract opcode from rINST 7668 SET_VREG(r0, r9) @ vAA<- r0 7669 GOTO_OPCODE(ip) @ jump to next instruction 7670 7671 7672/* continuation for OP_CONST_STRING_JUMBO */ 7673 7674 /* 7675 * Continuation if the String has not yet been resolved. 7676 * r1: BBBBBBBB (String ref) 7677 * r9: target register 7678 */ 7679.LOP_CONST_STRING_JUMBO_resolve: 7680 EXPORT_PC() 7681 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7682 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7683 bl dvmResolveString @ r0<- String reference 7684 cmp r0, #0 @ failed? 7685 beq common_exceptionThrown @ yup, handle the exception 7686 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7687 GET_INST_OPCODE(ip) @ extract opcode from rINST 7688 SET_VREG(r0, r9) @ vAA<- r0 7689 GOTO_OPCODE(ip) @ jump to next instruction 7690 7691 7692/* continuation for OP_CONST_CLASS */ 7693 7694 /* 7695 * Continuation if the Class has not yet been resolved. 7696 * r1: BBBB (Class ref) 7697 * r9: target register 7698 */ 7699.LOP_CONST_CLASS_resolve: 7700 EXPORT_PC() 7701 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7702 mov r2, #1 @ r2<- true 7703 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7704 bl dvmResolveClass @ r0<- Class reference 7705 cmp r0, #0 @ failed? 7706 beq common_exceptionThrown @ yup, handle the exception 7707 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7708 GET_INST_OPCODE(ip) @ extract opcode from rINST 7709 SET_VREG(r0, r9) @ vAA<- r0 7710 GOTO_OPCODE(ip) @ jump to next instruction 7711 7712 7713/* continuation for OP_CHECK_CAST */ 7714 7715 /* 7716 * Trivial test failed, need to perform full check. This is common. 7717 * r0 holds obj->clazz 7718 * r1 holds class resolved from BBBB 7719 * r9 holds object 7720 */ 7721.LOP_CHECK_CAST_fullcheck: 7722 bl dvmInstanceofNonTrivial @ r0<- boolean result 7723 cmp r0, #0 @ failed? 7724 bne .LOP_CHECK_CAST_okay @ no, success 7725 7726 @ A cast has failed. We need to throw a ClassCastException with the 7727 @ class of the object that failed to be cast. 7728 EXPORT_PC() @ about to throw 7729 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7730 ldr r0, .LstrClassCastExceptionPtr 7731 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7732 bl dvmThrowExceptionWithClassMessage 7733 b common_exceptionThrown 7734 7735 /* 7736 * Resolution required. This is the least-likely path. 7737 * 7738 * r2 holds BBBB 7739 * r9 holds object 7740 */ 7741.LOP_CHECK_CAST_resolve: 7742 EXPORT_PC() @ resolve() could throw 7743 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7744 mov r1, r2 @ r1<- BBBB 7745 mov r2, #0 @ r2<- false 7746 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7747 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7748 cmp r0, #0 @ got null? 7749 beq common_exceptionThrown @ yes, handle exception 7750 mov r1, r0 @ r1<- class resolved from BBB 7751 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7752 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7753 7754.LstrClassCastExceptionPtr: 7755 .word .LstrClassCastException 7756 7757 7758/* continuation for OP_INSTANCE_OF */ 7759 7760 /* 7761 * Trivial test failed, need to perform full check. This is common. 7762 * r0 holds obj->clazz 7763 * r1 holds class resolved from BBBB 7764 * r9 holds A 7765 */ 7766.LOP_INSTANCE_OF_fullcheck: 7767 bl dvmInstanceofNonTrivial @ r0<- boolean result 7768 @ fall through to OP_INSTANCE_OF_store 7769 7770 /* 7771 * r0 holds boolean result 7772 * r9 holds A 7773 */ 7774.LOP_INSTANCE_OF_store: 7775 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7776 SET_VREG(r0, r9) @ vA<- r0 7777 GET_INST_OPCODE(ip) @ extract opcode from rINST 7778 GOTO_OPCODE(ip) @ jump to next instruction 7779 7780 /* 7781 * Trivial test succeeded, save and bail. 7782 * r9 holds A 7783 */ 7784.LOP_INSTANCE_OF_trivial: 7785 mov r0, #1 @ indicate success 7786 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7787 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7788 SET_VREG(r0, r9) @ vA<- r0 7789 GET_INST_OPCODE(ip) @ extract opcode from rINST 7790 GOTO_OPCODE(ip) @ jump to next instruction 7791 7792 /* 7793 * Resolution required. This is the least-likely path. 7794 * 7795 * r3 holds BBBB 7796 * r9 holds A 7797 */ 7798.LOP_INSTANCE_OF_resolve: 7799 EXPORT_PC() @ resolve() could throw 7800 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7801 mov r1, r3 @ r1<- BBBB 7802 mov r2, #1 @ r2<- true 7803 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7804 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7805 cmp r0, #0 @ got null? 7806 beq common_exceptionThrown @ yes, handle exception 7807 mov r1, r0 @ r1<- class resolved from BBB 7808 mov r3, rINST, lsr #12 @ r3<- B 7809 GET_VREG(r0, r3) @ r0<- vB (object) 7810 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7811 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7812 7813 7814/* continuation for OP_NEW_INSTANCE */ 7815 7816 .balign 32 @ minimize cache lines 7817.LOP_NEW_INSTANCE_finish: @ r0=new object 7818 mov r3, rINST, lsr #8 @ r3<- AA 7819 cmp r0, #0 @ failed? 7820 beq common_exceptionThrown @ yes, handle the exception 7821 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7822 GET_INST_OPCODE(ip) @ extract opcode from rINST 7823 SET_VREG(r0, r3) @ vAA<- r0 7824 GOTO_OPCODE(ip) @ jump to next instruction 7825 7826 /* 7827 * Class initialization required. 7828 * 7829 * r0 holds class object 7830 */ 7831.LOP_NEW_INSTANCE_needinit: 7832 mov r9, r0 @ save r0 7833 bl dvmInitClass @ initialize class 7834 cmp r0, #0 @ check boolean result 7835 mov r0, r9 @ restore r0 7836 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7837 b common_exceptionThrown @ failed, deal with init exception 7838 7839 /* 7840 * Resolution required. This is the least-likely path. 7841 * 7842 * r1 holds BBBB 7843 */ 7844.LOP_NEW_INSTANCE_resolve: 7845 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7846 mov r2, #0 @ r2<- false 7847 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7848 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7849 cmp r0, #0 @ got null? 7850 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7851 b common_exceptionThrown @ yes, handle exception 7852 7853.LstrInstantiationErrorPtr: 7854 .word .LstrInstantiationError 7855 7856 7857/* continuation for OP_NEW_ARRAY */ 7858 7859 7860 /* 7861 * Resolve class. (This is an uncommon case.) 7862 * 7863 * r1 holds array length 7864 * r2 holds class ref CCCC 7865 */ 7866.LOP_NEW_ARRAY_resolve: 7867 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7868 mov r9, r1 @ r9<- length (save) 7869 mov r1, r2 @ r1<- CCCC 7870 mov r2, #0 @ r2<- false 7871 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7872 bl dvmResolveClass @ r0<- call(clazz, ref) 7873 cmp r0, #0 @ got null? 7874 mov r1, r9 @ r1<- length (restore) 7875 beq common_exceptionThrown @ yes, handle exception 7876 @ fall through to OP_NEW_ARRAY_finish 7877 7878 /* 7879 * Finish allocation. 7880 * 7881 * r0 holds class 7882 * r1 holds array length 7883 */ 7884.LOP_NEW_ARRAY_finish: 7885 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 7886 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 7887 cmp r0, #0 @ failed? 7888 mov r2, rINST, lsr #8 @ r2<- A+ 7889 beq common_exceptionThrown @ yes, handle the exception 7890 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7891 and r2, r2, #15 @ r2<- A 7892 GET_INST_OPCODE(ip) @ extract opcode from rINST 7893 SET_VREG(r0, r2) @ vA<- r0 7894 GOTO_OPCODE(ip) @ jump to next instruction 7895 7896 7897/* continuation for OP_FILLED_NEW_ARRAY */ 7898 7899 /* 7900 * On entry: 7901 * r0 holds array class 7902 * r10 holds AA or BA 7903 */ 7904.LOP_FILLED_NEW_ARRAY_continue: 7905 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7906 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7907 ldrb r3, [r3, #1] @ r3<- descriptor[1] 7908 .if 0 7909 mov r1, r10 @ r1<- AA (length) 7910 .else 7911 mov r1, r10, lsr #4 @ r1<- B (length) 7912 .endif 7913 cmp r3, #'I' @ array of ints? 7914 cmpne r3, #'L' @ array of objects? 7915 cmpne r3, #'[' @ array of arrays? 7916 mov r9, r1 @ save length in r9 7917 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 7918 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7919 cmp r0, #0 @ null return? 7920 beq common_exceptionThrown @ alloc failed, handle exception 7921 7922 FETCH(r1, 2) @ r1<- FEDC or CCCC 7923 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 7924 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 7925 subs r9, r9, #1 @ length--, check for neg 7926 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7927 bmi 2f @ was zero, bail 7928 7929 @ copy values from registers into the array 7930 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 7931 .if 0 7932 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 79331: ldr r3, [r2], #4 @ r3<- *r2++ 7934 subs r9, r9, #1 @ count-- 7935 str r3, [r0], #4 @ *contents++ = vX 7936 bpl 1b 7937 @ continue at 2 7938 .else 7939 cmp r9, #4 @ length was initially 5? 7940 and r2, r10, #15 @ r2<- A 7941 bne 1f @ <= 4 args, branch 7942 GET_VREG(r3, r2) @ r3<- vA 7943 sub r9, r9, #1 @ count-- 7944 str r3, [r0, #16] @ contents[4] = vA 79451: and r2, r1, #15 @ r2<- F/E/D/C 7946 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 7947 mov r1, r1, lsr #4 @ r1<- next reg in low 4 7948 subs r9, r9, #1 @ count-- 7949 str r3, [r0], #4 @ *contents++ = vX 7950 bpl 1b 7951 @ continue at 2 7952 .endif 7953 79542: 7955 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7956 GOTO_OPCODE(ip) @ execute it 7957 7958 /* 7959 * Throw an exception indicating that we have not implemented this 7960 * mode of filled-new-array. 7961 */ 7962.LOP_FILLED_NEW_ARRAY_notimpl: 7963 ldr r0, .L_strInternalError 7964 ldr r1, .L_strFilledNewArrayNotImpl 7965 bl dvmThrowException 7966 b common_exceptionThrown 7967 7968 .if (!0) @ define in one or the other, not both 7969.L_strFilledNewArrayNotImpl: 7970 .word .LstrFilledNewArrayNotImpl 7971.L_strInternalError: 7972 .word .LstrInternalError 7973 .endif 7974 7975 7976/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 7977 7978 /* 7979 * On entry: 7980 * r0 holds array class 7981 * r10 holds AA or BA 7982 */ 7983.LOP_FILLED_NEW_ARRAY_RANGE_continue: 7984 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7985 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7986 ldrb r3, [r3, #1] @ r3<- descriptor[1] 7987 .if 1 7988 mov r1, r10 @ r1<- AA (length) 7989 .else 7990 mov r1, r10, lsr #4 @ r1<- B (length) 7991 .endif 7992 cmp r3, #'I' @ array of ints? 7993 cmpne r3, #'L' @ array of objects? 7994 cmpne r3, #'[' @ array of arrays? 7995 mov r9, r1 @ save length in r9 7996 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 7997 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7998 cmp r0, #0 @ null return? 7999 beq common_exceptionThrown @ alloc failed, handle exception 8000 8001 FETCH(r1, 2) @ r1<- FEDC or CCCC 8002 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8003 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8004 subs r9, r9, #1 @ length--, check for neg 8005 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8006 bmi 2f @ was zero, bail 8007 8008 @ copy values from registers into the array 8009 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8010 .if 1 8011 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 80121: ldr r3, [r2], #4 @ r3<- *r2++ 8013 subs r9, r9, #1 @ count-- 8014 str r3, [r0], #4 @ *contents++ = vX 8015 bpl 1b 8016 @ continue at 2 8017 .else 8018 cmp r9, #4 @ length was initially 5? 8019 and r2, r10, #15 @ r2<- A 8020 bne 1f @ <= 4 args, branch 8021 GET_VREG(r3, r2) @ r3<- vA 8022 sub r9, r9, #1 @ count-- 8023 str r3, [r0, #16] @ contents[4] = vA 80241: and r2, r1, #15 @ r2<- F/E/D/C 8025 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8026 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8027 subs r9, r9, #1 @ count-- 8028 str r3, [r0], #4 @ *contents++ = vX 8029 bpl 1b 8030 @ continue at 2 8031 .endif 8032 80332: 8034 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8035 GOTO_OPCODE(ip) @ execute it 8036 8037 /* 8038 * Throw an exception indicating that we have not implemented this 8039 * mode of filled-new-array. 8040 */ 8041.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8042 ldr r0, .L_strInternalError 8043 ldr r1, .L_strFilledNewArrayNotImpl 8044 bl dvmThrowException 8045 b common_exceptionThrown 8046 8047 .if (!1) @ define in one or the other, not both 8048.L_strFilledNewArrayNotImpl: 8049 .word .LstrFilledNewArrayNotImpl 8050.L_strInternalError: 8051 .word .LstrInternalError 8052 .endif 8053 8054 8055/* continuation for OP_CMPL_FLOAT */ 8056.LOP_CMPL_FLOAT_finish: 8057 SET_VREG(r0, r9) @ vAA<- r0 8058 GOTO_OPCODE(ip) @ jump to next instruction 8059 8060 8061/* continuation for OP_CMPG_FLOAT */ 8062.LOP_CMPG_FLOAT_finish: 8063 SET_VREG(r0, r9) @ vAA<- r0 8064 GOTO_OPCODE(ip) @ jump to next instruction 8065 8066 8067/* continuation for OP_CMPL_DOUBLE */ 8068.LOP_CMPL_DOUBLE_finish: 8069 SET_VREG(r0, r9) @ vAA<- r0 8070 GOTO_OPCODE(ip) @ jump to next instruction 8071 8072 8073/* continuation for OP_CMPG_DOUBLE */ 8074.LOP_CMPG_DOUBLE_finish: 8075 SET_VREG(r0, r9) @ vAA<- r0 8076 GOTO_OPCODE(ip) @ jump to next instruction 8077 8078 8079/* continuation for OP_CMP_LONG */ 8080 8081.LOP_CMP_LONG_less: 8082 mvn r1, #0 @ r1<- -1 8083 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8084 @ instead, we just replicate the tail end. 8085 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8086 SET_VREG(r1, r9) @ vAA<- r1 8087 GET_INST_OPCODE(ip) @ extract opcode from rINST 8088 GOTO_OPCODE(ip) @ jump to next instruction 8089 8090.LOP_CMP_LONG_greater: 8091 mov r1, #1 @ r1<- 1 8092 @ fall through to _finish 8093 8094.LOP_CMP_LONG_finish: 8095 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8096 SET_VREG(r1, r9) @ vAA<- r1 8097 GET_INST_OPCODE(ip) @ extract opcode from rINST 8098 GOTO_OPCODE(ip) @ jump to next instruction 8099 8100 8101/* continuation for OP_AGET_WIDE */ 8102 8103.LOP_AGET_WIDE_finish: 8104 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8105 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8106 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8107 GET_INST_OPCODE(ip) @ extract opcode from rINST 8108 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8109 GOTO_OPCODE(ip) @ jump to next instruction 8110 8111 8112/* continuation for OP_APUT_WIDE */ 8113 8114.LOP_APUT_WIDE_finish: 8115 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8116 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8117 GET_INST_OPCODE(ip) @ extract opcode from rINST 8118 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8119 GOTO_OPCODE(ip) @ jump to next instruction 8120 8121 8122/* continuation for OP_APUT_OBJECT */ 8123 /* 8124 * On entry: 8125 * r1 = vBB (arrayObj) 8126 * r9 = vAA (obj) 8127 * r10 = offset into array (vBB + vCC * width) 8128 */ 8129.LOP_APUT_OBJECT_finish: 8130 cmp r9, #0 @ storing null reference? 8131 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8132 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8133 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8134 bl dvmCanPutArrayElement @ test object type vs. array type 8135 cmp r0, #0 @ okay? 8136 beq common_errArrayStore @ no 8137.LOP_APUT_OBJECT_skip_check: 8138 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8139 GET_INST_OPCODE(ip) @ extract opcode from rINST 8140 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8141 GOTO_OPCODE(ip) @ jump to next instruction 8142 8143 8144/* continuation for OP_IGET */ 8145 8146 /* 8147 * Currently: 8148 * r0 holds resolved field 8149 * r9 holds object 8150 */ 8151.LOP_IGET_finish: 8152 @bl common_squeak0 8153 cmp r9, #0 @ check object for null 8154 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8155 beq common_errNullObject @ object was null 8156 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8157 mov r2, rINST, lsr #8 @ r2<- A+ 8158 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8159 and r2, r2, #15 @ r2<- A 8160 GET_INST_OPCODE(ip) @ extract opcode from rINST 8161 SET_VREG(r0, r2) @ fp[A]<- r0 8162 GOTO_OPCODE(ip) @ jump to next instruction 8163 8164 8165/* continuation for OP_IGET_WIDE */ 8166 8167 /* 8168 * Currently: 8169 * r0 holds resolved field 8170 * r9 holds object 8171 */ 8172.LOP_IGET_WIDE_finish: 8173 cmp r9, #0 @ check object for null 8174 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8175 beq common_errNullObject @ object was null 8176 mov r2, rINST, lsr #8 @ r2<- A+ 8177 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8178 and r2, r2, #15 @ r2<- A 8179 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8180 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8181 GET_INST_OPCODE(ip) @ extract opcode from rINST 8182 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8183 GOTO_OPCODE(ip) @ jump to next instruction 8184 8185 8186/* continuation for OP_IGET_OBJECT */ 8187 8188 /* 8189 * Currently: 8190 * r0 holds resolved field 8191 * r9 holds object 8192 */ 8193.LOP_IGET_OBJECT_finish: 8194 @bl common_squeak0 8195 cmp r9, #0 @ check object for null 8196 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8197 beq common_errNullObject @ object was null 8198 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8199 mov r2, rINST, lsr #8 @ r2<- A+ 8200 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8201 and r2, r2, #15 @ r2<- A 8202 GET_INST_OPCODE(ip) @ extract opcode from rINST 8203 SET_VREG(r0, r2) @ fp[A]<- r0 8204 GOTO_OPCODE(ip) @ jump to next instruction 8205 8206 8207/* continuation for OP_IGET_BOOLEAN */ 8208 8209 /* 8210 * Currently: 8211 * r0 holds resolved field 8212 * r9 holds object 8213 */ 8214.LOP_IGET_BOOLEAN_finish: 8215 @bl common_squeak1 8216 cmp r9, #0 @ check object for null 8217 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8218 beq common_errNullObject @ object was null 8219 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8220 mov r2, rINST, lsr #8 @ r2<- A+ 8221 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8222 and r2, r2, #15 @ r2<- A 8223 GET_INST_OPCODE(ip) @ extract opcode from rINST 8224 SET_VREG(r0, r2) @ fp[A]<- r0 8225 GOTO_OPCODE(ip) @ jump to next instruction 8226 8227 8228/* continuation for OP_IGET_BYTE */ 8229 8230 /* 8231 * Currently: 8232 * r0 holds resolved field 8233 * r9 holds object 8234 */ 8235.LOP_IGET_BYTE_finish: 8236 @bl common_squeak2 8237 cmp r9, #0 @ check object for null 8238 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8239 beq common_errNullObject @ object was null 8240 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8241 mov r2, rINST, lsr #8 @ r2<- A+ 8242 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8243 and r2, r2, #15 @ r2<- A 8244 GET_INST_OPCODE(ip) @ extract opcode from rINST 8245 SET_VREG(r0, r2) @ fp[A]<- r0 8246 GOTO_OPCODE(ip) @ jump to next instruction 8247 8248 8249/* continuation for OP_IGET_CHAR */ 8250 8251 /* 8252 * Currently: 8253 * r0 holds resolved field 8254 * r9 holds object 8255 */ 8256.LOP_IGET_CHAR_finish: 8257 @bl common_squeak3 8258 cmp r9, #0 @ check object for null 8259 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8260 beq common_errNullObject @ object was null 8261 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8262 mov r2, rINST, lsr #8 @ r2<- A+ 8263 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8264 and r2, r2, #15 @ r2<- A 8265 GET_INST_OPCODE(ip) @ extract opcode from rINST 8266 SET_VREG(r0, r2) @ fp[A]<- r0 8267 GOTO_OPCODE(ip) @ jump to next instruction 8268 8269 8270/* continuation for OP_IGET_SHORT */ 8271 8272 /* 8273 * Currently: 8274 * r0 holds resolved field 8275 * r9 holds object 8276 */ 8277.LOP_IGET_SHORT_finish: 8278 @bl common_squeak4 8279 cmp r9, #0 @ check object for null 8280 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8281 beq common_errNullObject @ object was null 8282 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8283 mov r2, rINST, lsr #8 @ r2<- A+ 8284 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8285 and r2, r2, #15 @ r2<- A 8286 GET_INST_OPCODE(ip) @ extract opcode from rINST 8287 SET_VREG(r0, r2) @ fp[A]<- r0 8288 GOTO_OPCODE(ip) @ jump to next instruction 8289 8290 8291/* continuation for OP_IPUT */ 8292 8293 /* 8294 * Currently: 8295 * r0 holds resolved field 8296 * r9 holds object 8297 */ 8298.LOP_IPUT_finish: 8299 @bl common_squeak0 8300 mov r1, rINST, lsr #8 @ r1<- A+ 8301 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8302 and r1, r1, #15 @ r1<- A 8303 cmp r9, #0 @ check object for null 8304 GET_VREG(r0, r1) @ r0<- fp[A] 8305 beq common_errNullObject @ object was null 8306 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8307 GET_INST_OPCODE(ip) @ extract opcode from rINST 8308 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8309 GOTO_OPCODE(ip) @ jump to next instruction 8310 8311 8312/* continuation for OP_IPUT_WIDE */ 8313 8314 /* 8315 * Currently: 8316 * r0 holds resolved field 8317 * r9 holds object 8318 */ 8319.LOP_IPUT_WIDE_finish: 8320 mov r2, rINST, lsr #8 @ r2<- A+ 8321 cmp r9, #0 @ check object for null 8322 and r2, r2, #15 @ r2<- A 8323 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8324 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8325 beq common_errNullObject @ object was null 8326 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8327 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8328 GET_INST_OPCODE(ip) @ extract opcode from rINST 8329 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0 8330 GOTO_OPCODE(ip) @ jump to next instruction 8331 8332 8333/* continuation for OP_IPUT_OBJECT */ 8334 8335 /* 8336 * Currently: 8337 * r0 holds resolved field 8338 * r9 holds object 8339 */ 8340.LOP_IPUT_OBJECT_finish: 8341 @bl common_squeak0 8342 mov r1, rINST, lsr #8 @ r1<- A+ 8343 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8344 and r1, r1, #15 @ r1<- A 8345 cmp r9, #0 @ check object for null 8346 GET_VREG(r0, r1) @ r0<- fp[A] 8347 beq common_errNullObject @ object was null 8348 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8349 GET_INST_OPCODE(ip) @ extract opcode from rINST 8350 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8351 GOTO_OPCODE(ip) @ jump to next instruction 8352 8353 8354/* continuation for OP_IPUT_BOOLEAN */ 8355 8356 /* 8357 * Currently: 8358 * r0 holds resolved field 8359 * r9 holds object 8360 */ 8361.LOP_IPUT_BOOLEAN_finish: 8362 @bl common_squeak1 8363 mov r1, rINST, lsr #8 @ r1<- A+ 8364 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8365 and r1, r1, #15 @ r1<- A 8366 cmp r9, #0 @ check object for null 8367 GET_VREG(r0, r1) @ r0<- fp[A] 8368 beq common_errNullObject @ object was null 8369 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8370 GET_INST_OPCODE(ip) @ extract opcode from rINST 8371 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8372 GOTO_OPCODE(ip) @ jump to next instruction 8373 8374 8375/* continuation for OP_IPUT_BYTE */ 8376 8377 /* 8378 * Currently: 8379 * r0 holds resolved field 8380 * r9 holds object 8381 */ 8382.LOP_IPUT_BYTE_finish: 8383 @bl common_squeak2 8384 mov r1, rINST, lsr #8 @ r1<- A+ 8385 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8386 and r1, r1, #15 @ r1<- A 8387 cmp r9, #0 @ check object for null 8388 GET_VREG(r0, r1) @ r0<- fp[A] 8389 beq common_errNullObject @ object was null 8390 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8391 GET_INST_OPCODE(ip) @ extract opcode from rINST 8392 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8393 GOTO_OPCODE(ip) @ jump to next instruction 8394 8395 8396/* continuation for OP_IPUT_CHAR */ 8397 8398 /* 8399 * Currently: 8400 * r0 holds resolved field 8401 * r9 holds object 8402 */ 8403.LOP_IPUT_CHAR_finish: 8404 @bl common_squeak3 8405 mov r1, rINST, lsr #8 @ r1<- A+ 8406 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8407 and r1, r1, #15 @ r1<- A 8408 cmp r9, #0 @ check object for null 8409 GET_VREG(r0, r1) @ r0<- fp[A] 8410 beq common_errNullObject @ object was null 8411 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8412 GET_INST_OPCODE(ip) @ extract opcode from rINST 8413 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8414 GOTO_OPCODE(ip) @ jump to next instruction 8415 8416 8417/* continuation for OP_IPUT_SHORT */ 8418 8419 /* 8420 * Currently: 8421 * r0 holds resolved field 8422 * r9 holds object 8423 */ 8424.LOP_IPUT_SHORT_finish: 8425 @bl common_squeak4 8426 mov r1, rINST, lsr #8 @ r1<- A+ 8427 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8428 and r1, r1, #15 @ r1<- A 8429 cmp r9, #0 @ check object for null 8430 GET_VREG(r0, r1) @ r0<- fp[A] 8431 beq common_errNullObject @ object was null 8432 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8433 GET_INST_OPCODE(ip) @ extract opcode from rINST 8434 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8435 GOTO_OPCODE(ip) @ jump to next instruction 8436 8437 8438/* continuation for OP_SGET */ 8439 8440 /* 8441 * Continuation if the field has not yet been resolved. 8442 * r1: BBBB field ref 8443 */ 8444.LOP_SGET_resolve: 8445 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8446 EXPORT_PC() @ resolve() could throw, so export now 8447 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8448 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8449 cmp r0, #0 @ success? 8450 bne .LOP_SGET_finish @ yes, finish 8451 b common_exceptionThrown @ no, handle exception 8452 8453 8454/* continuation for OP_SGET_WIDE */ 8455 8456 /* 8457 * Continuation if the field has not yet been resolved. 8458 * r1: BBBB field ref 8459 */ 8460.LOP_SGET_WIDE_resolve: 8461 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8462 EXPORT_PC() @ resolve() could throw, so export now 8463 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8464 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8465 cmp r0, #0 @ success? 8466 bne .LOP_SGET_WIDE_finish @ yes, finish 8467 b common_exceptionThrown @ no, handle exception 8468 8469 8470/* continuation for OP_SGET_OBJECT */ 8471 8472 /* 8473 * Continuation if the field has not yet been resolved. 8474 * r1: BBBB field ref 8475 */ 8476.LOP_SGET_OBJECT_resolve: 8477 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8478 EXPORT_PC() @ resolve() could throw, so export now 8479 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8480 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8481 cmp r0, #0 @ success? 8482 bne .LOP_SGET_OBJECT_finish @ yes, finish 8483 b common_exceptionThrown @ no, handle exception 8484 8485 8486/* continuation for OP_SGET_BOOLEAN */ 8487 8488 /* 8489 * Continuation if the field has not yet been resolved. 8490 * r1: BBBB field ref 8491 */ 8492.LOP_SGET_BOOLEAN_resolve: 8493 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8494 EXPORT_PC() @ resolve() could throw, so export now 8495 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8496 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8497 cmp r0, #0 @ success? 8498 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8499 b common_exceptionThrown @ no, handle exception 8500 8501 8502/* continuation for OP_SGET_BYTE */ 8503 8504 /* 8505 * Continuation if the field has not yet been resolved. 8506 * r1: BBBB field ref 8507 */ 8508.LOP_SGET_BYTE_resolve: 8509 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8510 EXPORT_PC() @ resolve() could throw, so export now 8511 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8512 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8513 cmp r0, #0 @ success? 8514 bne .LOP_SGET_BYTE_finish @ yes, finish 8515 b common_exceptionThrown @ no, handle exception 8516 8517 8518/* continuation for OP_SGET_CHAR */ 8519 8520 /* 8521 * Continuation if the field has not yet been resolved. 8522 * r1: BBBB field ref 8523 */ 8524.LOP_SGET_CHAR_resolve: 8525 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8526 EXPORT_PC() @ resolve() could throw, so export now 8527 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8528 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8529 cmp r0, #0 @ success? 8530 bne .LOP_SGET_CHAR_finish @ yes, finish 8531 b common_exceptionThrown @ no, handle exception 8532 8533 8534/* continuation for OP_SGET_SHORT */ 8535 8536 /* 8537 * Continuation if the field has not yet been resolved. 8538 * r1: BBBB field ref 8539 */ 8540.LOP_SGET_SHORT_resolve: 8541 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8542 EXPORT_PC() @ resolve() could throw, so export now 8543 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8544 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8545 cmp r0, #0 @ success? 8546 bne .LOP_SGET_SHORT_finish @ yes, finish 8547 b common_exceptionThrown @ no, handle exception 8548 8549 8550/* continuation for OP_SPUT */ 8551 8552 /* 8553 * Continuation if the field has not yet been resolved. 8554 * r1: BBBB field ref 8555 */ 8556.LOP_SPUT_resolve: 8557 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8558 EXPORT_PC() @ resolve() could throw, so export now 8559 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8560 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8561 cmp r0, #0 @ success? 8562 bne .LOP_SPUT_finish @ yes, finish 8563 b common_exceptionThrown @ no, handle exception 8564 8565 8566/* continuation for OP_SPUT_WIDE */ 8567 8568 /* 8569 * Continuation if the field has not yet been resolved. 8570 * r1: BBBB field ref 8571 * r9: &fp[AA] 8572 */ 8573.LOP_SPUT_WIDE_resolve: 8574 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8575 EXPORT_PC() @ resolve() could throw, so export now 8576 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8577 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8578 cmp r0, #0 @ success? 8579 bne .LOP_SPUT_WIDE_finish @ yes, finish 8580 b common_exceptionThrown @ no, handle exception 8581 8582 8583/* continuation for OP_SPUT_OBJECT */ 8584 8585 /* 8586 * Continuation if the field has not yet been resolved. 8587 * r1: BBBB field ref 8588 */ 8589.LOP_SPUT_OBJECT_resolve: 8590 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8591 EXPORT_PC() @ resolve() could throw, so export now 8592 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8593 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8594 cmp r0, #0 @ success? 8595 bne .LOP_SPUT_OBJECT_finish @ yes, finish 8596 b common_exceptionThrown @ no, handle exception 8597 8598 8599/* continuation for OP_SPUT_BOOLEAN */ 8600 8601 /* 8602 * Continuation if the field has not yet been resolved. 8603 * r1: BBBB field ref 8604 */ 8605.LOP_SPUT_BOOLEAN_resolve: 8606 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8607 EXPORT_PC() @ resolve() could throw, so export now 8608 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8609 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8610 cmp r0, #0 @ success? 8611 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8612 b common_exceptionThrown @ no, handle exception 8613 8614 8615/* continuation for OP_SPUT_BYTE */ 8616 8617 /* 8618 * Continuation if the field has not yet been resolved. 8619 * r1: BBBB field ref 8620 */ 8621.LOP_SPUT_BYTE_resolve: 8622 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8623 EXPORT_PC() @ resolve() could throw, so export now 8624 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8625 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8626 cmp r0, #0 @ success? 8627 bne .LOP_SPUT_BYTE_finish @ yes, finish 8628 b common_exceptionThrown @ no, handle exception 8629 8630 8631/* continuation for OP_SPUT_CHAR */ 8632 8633 /* 8634 * Continuation if the field has not yet been resolved. 8635 * r1: BBBB field ref 8636 */ 8637.LOP_SPUT_CHAR_resolve: 8638 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8639 EXPORT_PC() @ resolve() could throw, so export now 8640 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8641 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8642 cmp r0, #0 @ success? 8643 bne .LOP_SPUT_CHAR_finish @ yes, finish 8644 b common_exceptionThrown @ no, handle exception 8645 8646 8647/* continuation for OP_SPUT_SHORT */ 8648 8649 /* 8650 * Continuation if the field has not yet been resolved. 8651 * r1: BBBB field ref 8652 */ 8653.LOP_SPUT_SHORT_resolve: 8654 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8655 EXPORT_PC() @ resolve() could throw, so export now 8656 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8657 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8658 cmp r0, #0 @ success? 8659 bne .LOP_SPUT_SHORT_finish @ yes, finish 8660 b common_exceptionThrown @ no, handle exception 8661 8662 8663/* continuation for OP_INVOKE_VIRTUAL */ 8664 8665 /* 8666 * At this point: 8667 * r0 = resolved base method 8668 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8669 */ 8670.LOP_INVOKE_VIRTUAL_continue: 8671 GET_VREG(r1, r10) @ r1<- "this" ptr 8672 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8673 cmp r1, #0 @ is "this" null? 8674 beq common_errNullObject @ null "this", throw exception 8675 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8676 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8677 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8678 bl common_invokeMethodNoRange @ continue on 8679 8680 8681/* continuation for OP_INVOKE_SUPER */ 8682 8683 /* 8684 * At this point: 8685 * r0 = resolved base method 8686 * r9 = method->clazz 8687 */ 8688.LOP_INVOKE_SUPER_continue: 8689 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8690 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8691 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8692 EXPORT_PC() @ must export for invoke 8693 cmp r2, r3 @ compare (methodIndex, vtableCount) 8694 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8695 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8696 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8697 bl common_invokeMethodNoRange @ continue on 8698 8699.LOP_INVOKE_SUPER_resolve: 8700 mov r0, r9 @ r0<- method->clazz 8701 mov r2, #METHOD_VIRTUAL @ resolver method type 8702 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8703 cmp r0, #0 @ got null? 8704 bne .LOP_INVOKE_SUPER_continue @ no, continue 8705 b common_exceptionThrown @ yes, handle exception 8706 8707 /* 8708 * Throw a NoSuchMethodError with the method name as the message. 8709 * r0 = resolved base method 8710 */ 8711.LOP_INVOKE_SUPER_nsm: 8712 ldr r1, [r0, #offMethod_name] @ r1<- method name 8713 b common_errNoSuchMethod 8714 8715 8716/* continuation for OP_INVOKE_DIRECT */ 8717 8718 /* 8719 * On entry: 8720 * r1 = reference (BBBB or CCCC) 8721 * r10 = "this" register 8722 */ 8723.LOP_INVOKE_DIRECT_resolve: 8724 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8725 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8726 mov r2, #METHOD_DIRECT @ resolver method type 8727 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8728 cmp r0, #0 @ got null? 8729 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8730 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8731 b common_exceptionThrown @ yes, handle exception 8732 8733 8734/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8735 8736 /* 8737 * At this point: 8738 * r0 = resolved base method 8739 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8740 */ 8741.LOP_INVOKE_VIRTUAL_RANGE_continue: 8742 GET_VREG(r1, r10) @ r1<- "this" ptr 8743 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8744 cmp r1, #0 @ is "this" null? 8745 beq common_errNullObject @ null "this", throw exception 8746 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8747 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8748 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8749 bl common_invokeMethodRange @ continue on 8750 8751 8752/* continuation for OP_INVOKE_SUPER_RANGE */ 8753 8754 /* 8755 * At this point: 8756 * r0 = resolved base method 8757 * r9 = method->clazz 8758 */ 8759.LOP_INVOKE_SUPER_RANGE_continue: 8760 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8761 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8762 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8763 EXPORT_PC() @ must export for invoke 8764 cmp r2, r3 @ compare (methodIndex, vtableCount) 8765 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8766 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8767 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8768 bl common_invokeMethodRange @ continue on 8769 8770.LOP_INVOKE_SUPER_RANGE_resolve: 8771 mov r0, r9 @ r0<- method->clazz 8772 mov r2, #METHOD_VIRTUAL @ resolver method type 8773 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8774 cmp r0, #0 @ got null? 8775 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8776 b common_exceptionThrown @ yes, handle exception 8777 8778 /* 8779 * Throw a NoSuchMethodError with the method name as the message. 8780 * r0 = resolved base method 8781 */ 8782.LOP_INVOKE_SUPER_RANGE_nsm: 8783 ldr r1, [r0, #offMethod_name] @ r1<- method name 8784 b common_errNoSuchMethod 8785 8786 8787/* continuation for OP_INVOKE_DIRECT_RANGE */ 8788 8789 /* 8790 * On entry: 8791 * r1 = reference (BBBB or CCCC) 8792 * r10 = "this" register 8793 */ 8794.LOP_INVOKE_DIRECT_RANGE_resolve: 8795 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8796 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8797 mov r2, #METHOD_DIRECT @ resolver method type 8798 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8799 cmp r0, #0 @ got null? 8800 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8801 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8802 b common_exceptionThrown @ yes, handle exception 8803 8804 8805/* continuation for OP_FLOAT_TO_LONG */ 8806/* 8807 * Convert the float in r0 to a long in r0/r1. 8808 * 8809 * We have to clip values to long min/max per the specification. The 8810 * expected common case is a "reasonable" value that converts directly 8811 * to modest integer. The EABI convert function isn't doing this for us. 8812 */ 8813f2l_doconv: 8814 stmfd sp!, {r4, lr} 8815 mov r1, #0x5f000000 @ (float)maxlong 8816 mov r4, r0 8817 bl __aeabi_fcmpge @ is arg >= maxlong? 8818 cmp r0, #0 @ nonzero == yes 8819 mvnne r0, #0 @ return maxlong (7fffffff) 8820 mvnne r1, #0x80000000 8821 ldmnefd sp!, {r4, pc} 8822 8823 mov r0, r4 @ recover arg 8824 mov r1, #0xdf000000 @ (float)minlong 8825 bl __aeabi_fcmple @ is arg <= minlong? 8826 cmp r0, #0 @ nonzero == yes 8827 movne r0, #0 @ return minlong (80000000) 8828 movne r1, #0x80000000 8829 ldmnefd sp!, {r4, pc} 8830 8831 mov r0, r4 @ recover arg 8832 mov r1, r4 8833 bl __aeabi_fcmpeq @ is arg == self? 8834 cmp r0, #0 @ zero == no 8835 moveq r1, #0 @ return zero for NaN 8836 ldmeqfd sp!, {r4, pc} 8837 8838 mov r0, r4 @ recover arg 8839 bl __aeabi_f2lz @ convert float to long 8840 ldmfd sp!, {r4, pc} 8841 8842 8843/* continuation for OP_DOUBLE_TO_LONG */ 8844/* 8845 * Convert the double in r0/r1 to a long in r0/r1. 8846 * 8847 * We have to clip values to long min/max per the specification. The 8848 * expected common case is a "reasonable" value that converts directly 8849 * to modest integer. The EABI convert function isn't doing this for us. 8850 */ 8851d2l_doconv: 8852 stmfd sp!, {r4, r5, lr} @ save regs 8853 ldr r3, .LOP_DOUBLE_TO_LONG_max @ (double)maxlong, hi 8854 sub sp, sp, #4 @ align for EABI 8855 mov r2, #0 @ (double)maxlong, lo 8856 mov r4, r0 @ save r0 8857 mov r5, r1 @ and r1 8858 bl __aeabi_dcmpge @ is arg >= maxlong? 8859 cmp r0, #0 @ nonzero == yes 8860 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8861 mvnne r1, #0x80000000 8862 bne 1f 8863 8864 mov r0, r4 @ recover arg 8865 mov r1, r5 8866 ldr r3, .LOP_DOUBLE_TO_LONG_min @ (double)minlong, hi 8867 mov r2, #0 @ (double)minlong, lo 8868 bl __aeabi_dcmple @ is arg <= minlong? 8869 cmp r0, #0 @ nonzero == yes 8870 movne r0, #0 @ return minlong (8000000000000000) 8871 movne r1, #0x80000000 8872 bne 1f 8873 8874 mov r0, r4 @ recover arg 8875 mov r1, r5 8876 mov r2, r4 @ compare against self 8877 mov r3, r5 8878 bl __aeabi_dcmpeq @ is arg == self? 8879 cmp r0, #0 @ zero == no 8880 moveq r1, #0 @ return zero for NaN 8881 beq 1f 8882 8883 mov r0, r4 @ recover arg 8884 mov r1, r5 8885 bl __aeabi_d2lz @ convert double to long 8886 88871: 8888 add sp, sp, #4 8889 ldmfd sp!, {r4, r5, pc} 8890 8891.LOP_DOUBLE_TO_LONG_max: 8892 .word 0x43e00000 @ maxlong, as a double (high word) 8893.LOP_DOUBLE_TO_LONG_min: 8894 .word 0xc3e00000 @ minlong, as a double (high word) 8895 8896 8897/* continuation for OP_MUL_LONG */ 8898 8899.LOP_MUL_LONG_finish: 8900 GET_INST_OPCODE(ip) @ extract opcode from rINST 8901 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 8902 GOTO_OPCODE(ip) @ jump to next instruction 8903 8904 8905/* continuation for OP_SHL_LONG */ 8906 8907.LOP_SHL_LONG_finish: 8908 mov r0, r0, asl r2 @ r0<- r0 << r2 8909 GET_INST_OPCODE(ip) @ extract opcode from rINST 8910 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8911 GOTO_OPCODE(ip) @ jump to next instruction 8912 8913 8914/* continuation for OP_SHR_LONG */ 8915 8916.LOP_SHR_LONG_finish: 8917 mov r1, r1, asr r2 @ r1<- r1 >> r2 8918 GET_INST_OPCODE(ip) @ extract opcode from rINST 8919 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8920 GOTO_OPCODE(ip) @ jump to next instruction 8921 8922 8923/* continuation for OP_USHR_LONG */ 8924 8925.LOP_USHR_LONG_finish: 8926 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 8927 GET_INST_OPCODE(ip) @ extract opcode from rINST 8928 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8929 GOTO_OPCODE(ip) @ jump to next instruction 8930 8931 8932/* continuation for OP_SHL_LONG_2ADDR */ 8933 8934.LOP_SHL_LONG_2ADDR_finish: 8935 GET_INST_OPCODE(ip) @ extract opcode from rINST 8936 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8937 GOTO_OPCODE(ip) @ jump to next instruction 8938 8939 8940/* continuation for OP_SHR_LONG_2ADDR */ 8941 8942.LOP_SHR_LONG_2ADDR_finish: 8943 GET_INST_OPCODE(ip) @ extract opcode from rINST 8944 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8945 GOTO_OPCODE(ip) @ jump to next instruction 8946 8947 8948/* continuation for OP_USHR_LONG_2ADDR */ 8949 8950.LOP_USHR_LONG_2ADDR_finish: 8951 GET_INST_OPCODE(ip) @ extract opcode from rINST 8952 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8953 GOTO_OPCODE(ip) @ jump to next instruction 8954 8955 8956/* continuation for OP_EXECUTE_INLINE */ 8957 8958 /* 8959 * Extract args, call function. 8960 * r0 = #of args (0-4) 8961 * r10 = call index 8962 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 8963 * 8964 * Other ideas: 8965 * - Use a jump table from the main piece to jump directly into the 8966 * AND/LDR pairs. Costs a data load, saves a branch. 8967 * - Have five separate pieces that do the loading, so we can work the 8968 * interleave a little better. Increases code size. 8969 */ 8970.LOP_EXECUTE_INLINE_continue: 8971 rsb r0, r0, #4 @ r0<- 4-r0 8972 FETCH(r9, 2) @ r9<- FEDC 8973 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 8974 bl common_abort @ (skipped due to ARM prefetch) 89754: and ip, r9, #0xf000 @ isolate F 8976 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 89773: and ip, r9, #0x0f00 @ isolate E 8978 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 89792: and ip, r9, #0x00f0 @ isolate D 8980 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 89811: and ip, r9, #0x000f @ isolate C 8982 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 89830: 8984 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 8985 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 8986 @ (not reached) 8987 8988.LOP_EXECUTE_INLINE_table: 8989 .word gDvmInlineOpsTable 8990 8991 8992 .size dvmAsmSisterStart, .-dvmAsmSisterStart 8993 .global dvmAsmSisterEnd 8994dvmAsmSisterEnd: 8995 8996/* File: armv5te/footer.S */ 8997 8998/* 8999 * =========================================================================== 9000 * Common subroutines and data 9001 * =========================================================================== 9002 */ 9003 9004 9005 9006 .text 9007 .align 2 9008 9009#if defined(WITH_JIT) 9010/* 9011 * Return from the translation cache to the interpreter when the compiler is 9012 * having issues translating/executing a Dalvik instruction. We have to skip 9013 * the code cache lookup otherwise it is possible to indefinitely bouce 9014 * between the interpreter and the code cache if the instruction that fails 9015 * to be compiled happens to be at a trace start. 9016 */ 9017 .global dvmJitToInterpPunt 9018dvmJitToInterpPunt: 9019 mov rPC, r0 9020#ifdef EXIT_STATS 9021 mov r0,lr 9022 bl dvmBumpPunt; 9023#endif 9024 EXPORT_PC() 9025 adrl rIBASE, dvmAsmInstructionStart 9026 FETCH_INST() 9027 GET_INST_OPCODE(ip) 9028 GOTO_OPCODE(ip) 9029 9030/* 9031 * Return to the interpreter to handle a single instruction. 9032 * On entry: 9033 * r0 <= PC 9034 * r1 <= PC of resume instruction 9035 * lr <= resume point in translation 9036 */ 9037 .global dvmJitToInterpSingleStep 9038dvmJitToInterpSingleStep: 9039 str lr,[rGLUE,#offGlue_jitResume] 9040 str r1,[rGLUE,#offGlue_jitResumePC] 9041 mov r1,#kInterpEntryInstr 9042 @ enum is 4 byte in aapcs-EABI 9043 str r1, [rGLUE, #offGlue_entryPoint] 9044 mov rPC,r0 9045 EXPORT_PC() 9046 adrl rIBASE, dvmAsmInstructionStart 9047 mov r2,#kJitSingleStep @ Ask for single step and then revert 9048 str r2,[rGLUE,#offGlue_jitState] 9049 mov r1,#1 @ set changeInterp to bail to debug interp 9050 b common_gotoBail 9051 9052 9053/* 9054 * Return from the translation cache and immediately request 9055 * a translation for the exit target. Commonly used following 9056 * invokes. 9057 */ 9058 .global dvmJitToTraceSelect 9059dvmJitToTraceSelect: 9060 ldr rPC,[r14, #-1] @ get our target PC 9061 add rINST,r14,#-5 @ save start of chain branch 9062 mov r0,rPC 9063 bl dvmJitGetCodeAddr @ Is there a translation? 9064 cmp r0,#0 9065 beq 2f 9066 mov r1,rINST 9067 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9068 ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code 9069 bx r0 @ continue native execution 9070 9071/* No translation, so request one if profiling isn't disabled*/ 90722: 9073 adrl rIBASE, dvmAsmInstructionStart 9074 GET_JIT_PROF_TABLE(r0) 9075 FETCH_INST() 9076 cmp r0, #0 9077 bne common_selectTrace 9078 GET_INST_OPCODE(ip) 9079 GOTO_OPCODE(ip) 9080 9081/* 9082 * Return from the translation cache to the interpreter. 9083 * The return was done with a BLX from thumb mode, and 9084 * the following 32-bit word contains the target rPC value. 9085 * Note that lr (r14) will have its low-order bit set to denote 9086 * its thumb-mode origin. 9087 * 9088 * We'll need to stash our lr origin away, recover the new 9089 * target and then check to see if there is a translation available 9090 * for our new target. If so, we do a translation chain and 9091 * go back to native execution. Otherwise, it's back to the 9092 * interpreter (after treating this entry as a potential 9093 * trace start). 9094 */ 9095 .global dvmJitToInterpNormal 9096dvmJitToInterpNormal: 9097 ldr rPC,[r14, #-1] @ get our target PC 9098 add rINST,r14,#-5 @ save start of chain branch 9099#ifdef EXIT_STATS 9100 bl dvmBumpNormal 9101#endif 9102 mov r0,rPC 9103 bl dvmJitGetCodeAddr @ Is there a translation? 9104 cmp r0,#0 9105 beq 1f @ go if not, otherwise do chain 9106 mov r1,rINST 9107 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9108 ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code 9109 bx r0 @ continue native execution 9110 9111/* 9112 * Return from the translation cache to the interpreter to do method invocation. 9113 * Check if translation exists for the callee, but don't chain to it. 9114 */ 9115 .global dvmJitToInterpNoChain 9116dvmJitToInterpNoChain: 9117#ifdef EXIT_STATS 9118 bl dvmBumpNoChain 9119#endif 9120 mov r0,rPC 9121 bl dvmJitGetCodeAddr @ Is there a translation? 9122 cmp r0,#0 9123 bxne r0 @ continue native execution if so 9124 9125/* 9126 * No translation, restore interpreter regs and start interpreting. 9127 * rGLUE & rFP were preserved in the translated code, and rPC has 9128 * already been restored by the time we get here. We'll need to set 9129 * up rIBASE & rINST, and load the address of the JitTable into r0. 9130 */ 91311: 9132 EXPORT_PC() 9133 adrl rIBASE, dvmAsmInstructionStart 9134 FETCH_INST() 9135 GET_JIT_PROF_TABLE(r0) 9136 @ NOTE: intended fallthrough 9137/* 9138 * Common code to update potential trace start counter, and initiate 9139 * a trace-build if appropriate. On entry, rPC should point to the 9140 * next instruction to execute, and rINST should be already loaded with 9141 * the next opcode word, and r0 holds a pointer to the jit profile 9142 * table (pJitProfTable). 9143 */ 9144common_testUpdateProfile: 9145 cmp r0,#0 9146 GET_INST_OPCODE(ip) 9147 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9148 9149common_updateProfile: 9150 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9151 lsl r3,r3,#23 @ shift out excess 511 9152 ldrb r1,[r0,r3,lsr #23] @ get counter 9153 GET_INST_OPCODE(ip) 9154 subs r1,r1,#1 @ decrement counter 9155 strb r1,[r0,r3,lsr #23] @ and store it 9156 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9157 9158/* 9159 * Here, we switch to the debug interpreter to request 9160 * trace selection. First, though, check to see if there 9161 * is already a native translation in place (and, if so, 9162 * jump to it now). 9163 */ 9164 mov r1,#255 9165 strb r1,[r0,r3,lsr #23] @ reset counter 9166 EXPORT_PC() 9167 mov r0,rPC 9168 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9169 cmp r0,#0 9170 ldrne rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code 9171 beq common_selectTrace 9172 bxne r0 @ jump to the translation 9173common_selectTrace: 9174 mov r2,#kJitTSelectRequest @ ask for trace selection 9175 str r2,[rGLUE,#offGlue_jitState] 9176 mov r1,#1 @ set changeInterp 9177 b common_gotoBail 9178 9179.LdvmCompilerTemplateStart: 9180 .word dvmCompilerTemplateStart 9181 9182#endif 9183 9184/* 9185 * Common code when a backward branch is taken. 9186 * 9187 * On entry: 9188 * r9 is PC adjustment *in bytes* 9189 */ 9190common_backwardBranch: 9191 mov r0, #kInterpEntryInstr 9192 bl common_periodicChecks 9193#if defined(WITH_JIT) 9194 GET_JIT_PROF_TABLE(r0) 9195 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9196 cmp r0,#0 9197 bne common_updateProfile 9198 GET_INST_OPCODE(ip) 9199 GOTO_OPCODE(ip) 9200#else 9201 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9202 GET_INST_OPCODE(ip) @ extract opcode from rINST 9203 GOTO_OPCODE(ip) @ jump to next instruction 9204#endif 9205 9206 9207/* 9208 * Need to see if the thread needs to be suspended or debugger/profiler 9209 * activity has begun. 9210 * 9211 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9212 * have to do the second ldr. 9213 * 9214 * TODO: reduce this so we're just checking a single location. 9215 * 9216 * On entry: 9217 * r0 is reentry type, e.g. kInterpEntryInstr 9218 * r9 is trampoline PC adjustment *in bytes* 9219 */ 9220common_periodicChecks: 9221 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9222 9223#if defined(WITH_DEBUGGER) 9224 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9225#endif 9226#if defined(WITH_PROFILER) 9227 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9228#endif 9229 9230 ldr r3, [r3] @ r3<- suspendCount (int) 9231 9232#if defined(WITH_DEBUGGER) 9233 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9234#endif 9235#if defined (WITH_PROFILER) 9236 ldr r2, [r2] @ r2<- activeProfilers (int) 9237#endif 9238 9239 cmp r3, #0 @ suspend pending? 9240 bne 2f @ yes, do full suspension check 9241 9242#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9243# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9244 orrs r1, r1, r2 @ r1<- r1 | r2 9245 cmp r1, #0 @ debugger attached or profiler started? 9246# elif defined(WITH_DEBUGGER) 9247 cmp r1, #0 @ debugger attached? 9248# elif defined(WITH_PROFILER) 9249 cmp r2, #0 @ profiler started? 9250# endif 9251 bne 3f @ debugger/profiler, switch interp 9252#endif 9253 9254 bx lr @ nothing to do, return 9255 92562: @ check suspend 9257 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9258 EXPORT_PC() @ need for precise GC 9259 b dvmCheckSuspendPending @ suspend if necessary, then return 9260 92613: @ debugger/profiler enabled, bail out 9262 add rPC, rPC, r9 @ update rPC 9263 str r0, [rGLUE, #offGlue_entryPoint] 9264 mov r1, #1 @ "want switch" = true 9265 b common_gotoBail 9266 9267 9268/* 9269 * The equivalent of "goto bail", this calls through the "bail handler". 9270 * 9271 * State registers will be saved to the "glue" area before bailing. 9272 * 9273 * On entry: 9274 * r1 is "bool changeInterp", indicating if we want to switch to the 9275 * other interpreter or just bail all the way out 9276 */ 9277common_gotoBail: 9278 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9279 mov r0, rGLUE @ r0<- glue ptr 9280 b dvmMterpStdBail @ call(glue, changeInterp) 9281 9282 @add r1, r1, #1 @ using (boolean+1) 9283 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9284 @bl _longjmp @ does not return 9285 @bl common_abort 9286 9287 9288/* 9289 * Common code for method invocation with range. 9290 * 9291 * On entry: 9292 * r0 is "Method* methodToCall", the method we're trying to call 9293 */ 9294common_invokeMethodRange: 9295.LinvokeNewRange: 9296 @ prepare to copy args to "outs" area of current frame 9297 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9298 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9299 beq .LinvokeArgsDone @ if no args, skip the rest 9300 FETCH(r1, 2) @ r1<- CCCC 9301 9302 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9303 @ (very few methods have > 10 args; could unroll for common cases) 9304 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9305 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9306 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 93071: ldr r1, [r3], #4 @ val = *fp++ 9308 subs r2, r2, #1 @ count-- 9309 str r1, [r10], #4 @ *outs++ = val 9310 bne 1b @ ...while count != 0 9311 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9312 b .LinvokeArgsDone 9313 9314/* 9315 * Common code for method invocation without range. 9316 * 9317 * On entry: 9318 * r0 is "Method* methodToCall", the method we're trying to call 9319 */ 9320common_invokeMethodNoRange: 9321.LinvokeNewNoRange: 9322 @ prepare to copy args to "outs" area of current frame 9323 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9324 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9325 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9326 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9327 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9328 beq .LinvokeArgsDone 9329 9330 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9331.LinvokeNonRange: 9332 rsb r2, r2, #5 @ r2<- 5-r2 9333 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9334 bl common_abort @ (skipped due to ARM prefetch) 93355: and ip, rINST, #0x0f00 @ isolate A 9336 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9337 mov r0, r0 @ nop 9338 str r2, [r10, #-4]! @ *--outs = vA 93394: and ip, r1, #0xf000 @ isolate G 9340 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9341 mov r0, r0 @ nop 9342 str r2, [r10, #-4]! @ *--outs = vG 93433: and ip, r1, #0x0f00 @ isolate F 9344 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9345 mov r0, r0 @ nop 9346 str r2, [r10, #-4]! @ *--outs = vF 93472: and ip, r1, #0x00f0 @ isolate E 9348 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9349 mov r0, r0 @ nop 9350 str r2, [r10, #-4]! @ *--outs = vE 93511: and ip, r1, #0x000f @ isolate D 9352 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9353 mov r0, r0 @ nop 9354 str r2, [r10, #-4]! @ *--outs = vD 93550: @ fall through to .LinvokeArgsDone 9356 9357.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9358 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9359 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9360 @ find space for the new stack frame, check for overflow 9361 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9362 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9363 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9364@ bl common_dumpRegs 9365 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9366 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9367 cmp r3, r9 @ bottom < interpStackEnd? 9368 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9369 blt .LstackOverflow @ yes, this frame will overflow stack 9370 9371 @ set up newSaveArea 9372#ifdef EASY_GDB 9373 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9374 str ip, [r10, #offStackSaveArea_prevSave] 9375#endif 9376 str rFP, [r10, #offStackSaveArea_prevFrame] 9377 str rPC, [r10, #offStackSaveArea_savedPc] 9378#if defined(WITH_JIT) 9379 mov r9, #0 9380 str r9, [r10, #offStackSaveArea_returnAddr] 9381#endif 9382 str r0, [r10, #offStackSaveArea_method] 9383 tst r3, #ACC_NATIVE 9384 bne .LinvokeNative 9385 9386 /* 9387 stmfd sp!, {r0-r3} 9388 bl common_printNewline 9389 mov r0, rFP 9390 mov r1, #0 9391 bl dvmDumpFp 9392 ldmfd sp!, {r0-r3} 9393 stmfd sp!, {r0-r3} 9394 mov r0, r1 9395 mov r1, r10 9396 bl dvmDumpFp 9397 bl common_printNewline 9398 ldmfd sp!, {r0-r3} 9399 */ 9400 9401 ldrh r9, [r2] @ r9 <- load INST from new PC 9402 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9403 mov rPC, r2 @ publish new rPC 9404 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9405 9406 @ Update "glue" values for the new method 9407 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9408 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9409 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9410#if defined(WITH_JIT) 9411 GET_JIT_PROF_TABLE(r0) 9412 mov rFP, r1 @ fp = newFp 9413 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9414 mov rINST, r9 @ publish new rINST 9415 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9416 cmp r0,#0 9417 bne common_updateProfile 9418 GOTO_OPCODE(ip) @ jump to next instruction 9419#else 9420 mov rFP, r1 @ fp = newFp 9421 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9422 mov rINST, r9 @ publish new rINST 9423 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9424 GOTO_OPCODE(ip) @ jump to next instruction 9425#endif 9426 9427.LinvokeNative: 9428 @ Prep for the native call 9429 @ r0=methodToCall, r1=newFp, r10=newSaveArea 9430 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9431 ldr r9, [r3, #offThread_jniLocal_nextEntry] @ r9<- thread->refNext 9432 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 9433 str r9, [r10, #offStackSaveArea_localRefTop] @newFp->localRefTop=refNext 9434 mov r9, r3 @ r9<- glue->self (preserve) 9435 9436 mov r2, r0 @ r2<- methodToCall 9437 mov r0, r1 @ r0<- newFp (points to args) 9438 add r1, rGLUE, #offGlue_retval @ r1<- &retval 9439 9440#ifdef ASSIST_DEBUGGER 9441 /* insert fake function header to help gdb find the stack frame */ 9442 b .Lskip 9443 .type dalvik_mterp, %function 9444dalvik_mterp: 9445 .fnstart 9446 MTERP_ENTRY1 9447 MTERP_ENTRY2 9448.Lskip: 9449#endif 9450 9451 @mov lr, pc @ set return addr 9452 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 9453 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 9454 9455 @ native return; r9=self, r10=newSaveArea 9456 @ equivalent to dvmPopJniLocals 9457 ldr r0, [r10, #offStackSaveArea_localRefTop] @ r0<- newSave->localRefTop 9458 ldr r1, [r9, #offThread_exception] @ check for exception 9459 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 9460 cmp r1, #0 @ null? 9461 str r0, [r9, #offThread_jniLocal_nextEntry] @ self->refNext<- r0 9462 bne common_exceptionThrown @ no, handle exception 9463 9464 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 9465 GET_INST_OPCODE(ip) @ extract opcode from rINST 9466 GOTO_OPCODE(ip) @ jump to next instruction 9467 9468.LstackOverflow: 9469 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 9470 bl dvmHandleStackOverflow 9471 b common_exceptionThrown 9472#ifdef ASSIST_DEBUGGER 9473 .fnend 9474#endif 9475 9476 9477 /* 9478 * Common code for method invocation, calling through "glue code". 9479 * 9480 * TODO: now that we have range and non-range invoke handlers, this 9481 * needs to be split into two. Maybe just create entry points 9482 * that set r9 and jump here? 9483 * 9484 * On entry: 9485 * r0 is "Method* methodToCall", the method we're trying to call 9486 * r9 is "bool methodCallRange", indicating if this is a /range variant 9487 */ 9488 .if 0 9489.LinvokeOld: 9490 sub sp, sp, #8 @ space for args + pad 9491 FETCH(ip, 2) @ ip<- FEDC or CCCC 9492 mov r2, r0 @ A2<- methodToCall 9493 mov r0, rGLUE @ A0<- glue 9494 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9495 mov r1, r9 @ A1<- methodCallRange 9496 mov r3, rINST, lsr #8 @ A3<- AA 9497 str ip, [sp, #0] @ A4<- ip 9498 bl dvmMterp_invokeMethod @ call the C invokeMethod 9499 add sp, sp, #8 @ remove arg area 9500 b common_resumeAfterGlueCall @ continue to next instruction 9501 .endif 9502 9503 9504 9505/* 9506 * Common code for handling a return instruction. 9507 * 9508 * This does not return. 9509 */ 9510common_returnFromMethod: 9511.LreturnNew: 9512 mov r0, #kInterpEntryReturn 9513 mov r9, #0 9514 bl common_periodicChecks 9515 9516 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 9517 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 9518 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 9519 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 9520 @ r2<- method we're returning to 9521 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9522 cmp r2, #0 @ is this a break frame? 9523 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 9524 mov r1, #0 @ "want switch" = false 9525 beq common_gotoBail @ break frame, bail out completely 9526 9527 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 9528 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 9529 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 9530 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 9531#if defined(WITH_JIT) 9532 ldr r3, [r0, #offStackSaveArea_returnAddr] @ r3 = saveArea->returnAddr 9533 GET_JIT_PROF_TABLE(r0) 9534 mov rPC, r9 @ publish new rPC 9535 str r1, [rGLUE, #offGlue_methodClassDex] 9536 cmp r3, #0 @ caller is compiled code 9537 bne 1f 9538 GET_INST_OPCODE(ip) @ extract opcode from rINST 9539 cmp r0,#0 9540 bne common_updateProfile 9541 GOTO_OPCODE(ip) @ jump to next instruction 95421: 9543 ldr rINST, .LdvmCompilerTemplateStart @ rINST is rCBASE in compiled code 9544 blx r3 9545#else 9546 GET_INST_OPCODE(ip) @ extract opcode from rINST 9547 mov rPC, r9 @ publish new rPC 9548 str r1, [rGLUE, #offGlue_methodClassDex] 9549 GOTO_OPCODE(ip) @ jump to next instruction 9550#endif 9551 9552 /* 9553 * Return handling, calls through "glue code". 9554 */ 9555 .if 0 9556.LreturnOld: 9557 SAVE_PC_FP_TO_GLUE() @ export state 9558 mov r0, rGLUE @ arg to function 9559 bl dvmMterp_returnFromMethod 9560 b common_resumeAfterGlueCall 9561 .endif 9562 9563 9564/* 9565 * Somebody has thrown an exception. Handle it. 9566 * 9567 * If the exception processing code returns to us (instead of falling 9568 * out of the interpreter), continue with whatever the next instruction 9569 * now happens to be. 9570 * 9571 * This does not return. 9572 */ 9573 .global dvmMterpCommonExceptionThrown 9574dvmMterpCommonExceptionThrown: 9575common_exceptionThrown: 9576.LexceptionNew: 9577 mov r0, #kInterpEntryThrow 9578 mov r9, #0 9579 bl common_periodicChecks 9580 9581#if defined(WITH_JIT) 9582 mov r2,#kJitTSelectAbort @ abandon trace selection in progress 9583 str r2,[rGLUE,#offGlue_jitState] 9584#endif 9585 9586 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 9587 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 9588 mov r1, r10 @ r1<- self 9589 mov r0, r9 @ r0<- exception 9590 bl dvmAddTrackedAlloc @ don't let the exception be GCed 9591 mov r3, #0 @ r3<- NULL 9592 str r3, [r10, #offThread_exception] @ self->exception = NULL 9593 9594 /* set up args and a local for "&fp" */ 9595 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 9596 str rFP, [sp, #-4]! @ *--sp = fp 9597 mov ip, sp @ ip<- &fp 9598 mov r3, #0 @ r3<- false 9599 str ip, [sp, #-4]! @ *--sp = &fp 9600 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 9601 mov r0, r10 @ r0<- self 9602 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 9603 mov r2, r9 @ r2<- exception 9604 sub r1, rPC, r1 @ r1<- pc - method->insns 9605 mov r1, r1, asr #1 @ r1<- offset in code units 9606 9607 /* call, r0 gets catchRelPc (a code-unit offset) */ 9608 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 9609 9610 /* fix earlier stack overflow if necessary; may trash rFP */ 9611 ldrb r1, [r10, #offThread_stackOverflowed] 9612 cmp r1, #0 @ did we overflow earlier? 9613 beq 1f @ no, skip ahead 9614 mov rFP, r0 @ save relPc result in rFP 9615 mov r0, r10 @ r0<- self 9616 bl dvmCleanupStackOverflow @ call(self) 9617 mov r0, rFP @ restore result 96181: 9619 9620 /* update frame pointer and check result from dvmFindCatchBlock */ 9621 ldr rFP, [sp, #4] @ retrieve the updated rFP 9622 cmp r0, #0 @ is catchRelPc < 0? 9623 add sp, sp, #8 @ restore stack 9624 bmi .LnotCaughtLocally 9625 9626 /* adjust locals to match self->curFrame and updated PC */ 9627 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 9628 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 9629 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 9630 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 9631 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 9632 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 9633 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 9634 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 9635 9636 /* release the tracked alloc on the exception */ 9637 mov r0, r9 @ r0<- exception 9638 mov r1, r10 @ r1<- self 9639 bl dvmReleaseTrackedAlloc @ release the exception 9640 9641 /* restore the exception if the handler wants it */ 9642 FETCH_INST() @ load rINST from rPC 9643 GET_INST_OPCODE(ip) @ extract opcode from rINST 9644 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 9645 streq r9, [r10, #offThread_exception] @ yes, restore the exception 9646 GOTO_OPCODE(ip) @ jump to next instruction 9647 9648.LnotCaughtLocally: @ r9=exception, r10=self 9649 /* fix stack overflow if necessary */ 9650 ldrb r1, [r10, #offThread_stackOverflowed] 9651 cmp r1, #0 @ did we overflow earlier? 9652 movne r0, r10 @ if yes: r0<- self 9653 blne dvmCleanupStackOverflow @ if yes: call(self) 9654 9655 @ may want to show "not caught locally" debug messages here 9656#if DVM_SHOW_EXCEPTION >= 2 9657 /* call __android_log_print(prio, tag, format, ...) */ 9658 /* "Exception %s from %s:%d not caught locally" */ 9659 @ dvmLineNumFromPC(method, pc - method->insns) 9660 ldr r0, [rGLUE, #offGlue_method] 9661 ldr r1, [r0, #offMethod_insns] 9662 sub r1, rPC, r1 9663 asr r1, r1, #1 9664 bl dvmLineNumFromPC 9665 str r0, [sp, #-4]! 9666 @ dvmGetMethodSourceFile(method) 9667 ldr r0, [rGLUE, #offGlue_method] 9668 bl dvmGetMethodSourceFile 9669 str r0, [sp, #-4]! 9670 @ exception->clazz->descriptor 9671 ldr r3, [r9, #offObject_clazz] 9672 ldr r3, [r3, #offClassObject_descriptor] 9673 @ 9674 ldr r2, strExceptionNotCaughtLocally 9675 ldr r1, strLogTag 9676 mov r0, #3 @ LOG_DEBUG 9677 bl __android_log_print 9678#endif 9679 str r9, [r10, #offThread_exception] @ restore exception 9680 mov r0, r9 @ r0<- exception 9681 mov r1, r10 @ r1<- self 9682 bl dvmReleaseTrackedAlloc @ release the exception 9683 mov r1, #0 @ "want switch" = false 9684 b common_gotoBail @ bail out 9685 9686 9687 /* 9688 * Exception handling, calls through "glue code". 9689 */ 9690 .if 0 9691.LexceptionOld: 9692 SAVE_PC_FP_TO_GLUE() @ export state 9693 mov r0, rGLUE @ arg to function 9694 bl dvmMterp_exceptionThrown 9695 b common_resumeAfterGlueCall 9696 .endif 9697 9698 9699/* 9700 * After returning from a "glued" function, pull out the updated 9701 * values and start executing at the next instruction. 9702 */ 9703common_resumeAfterGlueCall: 9704 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 9705 FETCH_INST() @ load rINST from rPC 9706 GET_INST_OPCODE(ip) @ extract opcode from rINST 9707 GOTO_OPCODE(ip) @ jump to next instruction 9708 9709/* 9710 * Invalid array index. 9711 */ 9712common_errArrayIndex: 9713 EXPORT_PC() 9714 ldr r0, strArrayIndexException 9715 mov r1, #0 9716 bl dvmThrowException 9717 b common_exceptionThrown 9718 9719/* 9720 * Invalid array value. 9721 */ 9722common_errArrayStore: 9723 EXPORT_PC() 9724 ldr r0, strArrayStoreException 9725 mov r1, #0 9726 bl dvmThrowException 9727 b common_exceptionThrown 9728 9729/* 9730 * Integer divide or mod by zero. 9731 */ 9732common_errDivideByZero: 9733 EXPORT_PC() 9734 ldr r0, strArithmeticException 9735 ldr r1, strDivideByZero 9736 bl dvmThrowException 9737 b common_exceptionThrown 9738 9739/* 9740 * Attempt to allocate an array with a negative size. 9741 */ 9742common_errNegativeArraySize: 9743 EXPORT_PC() 9744 ldr r0, strNegativeArraySizeException 9745 mov r1, #0 9746 bl dvmThrowException 9747 b common_exceptionThrown 9748 9749/* 9750 * Invocation of a non-existent method. 9751 */ 9752common_errNoSuchMethod: 9753 EXPORT_PC() 9754 ldr r0, strNoSuchMethodError 9755 mov r1, #0 9756 bl dvmThrowException 9757 b common_exceptionThrown 9758 9759/* 9760 * We encountered a null object when we weren't expecting one. We 9761 * export the PC, throw a NullPointerException, and goto the exception 9762 * processing code. 9763 */ 9764common_errNullObject: 9765 EXPORT_PC() 9766 ldr r0, strNullPointerException 9767 mov r1, #0 9768 bl dvmThrowException 9769 b common_exceptionThrown 9770 9771/* 9772 * For debugging, cause an immediate fault. The source address will 9773 * be in lr (use a bl instruction to jump here). 9774 */ 9775common_abort: 9776 ldr pc, .LdeadFood 9777.LdeadFood: 9778 .word 0xdeadf00d 9779 9780/* 9781 * Spit out a "we were here", preserving all registers. (The attempt 9782 * to save ip won't work, but we need to save an even number of 9783 * registers for EABI 64-bit stack alignment.) 9784 */ 9785 .macro SQUEAK num 9786common_squeak\num: 9787 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9788 ldr r0, strSqueak 9789 mov r1, #\num 9790 bl printf 9791 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9792 bx lr 9793 .endm 9794 9795 SQUEAK 0 9796 SQUEAK 1 9797 SQUEAK 2 9798 SQUEAK 3 9799 SQUEAK 4 9800 SQUEAK 5 9801 9802/* 9803 * Spit out the number in r0, preserving registers. 9804 */ 9805common_printNum: 9806 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9807 mov r1, r0 9808 ldr r0, strSqueak 9809 bl printf 9810 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9811 bx lr 9812 9813/* 9814 * Print a newline, preserving registers. 9815 */ 9816common_printNewline: 9817 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9818 ldr r0, strNewline 9819 bl printf 9820 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9821 bx lr 9822 9823 /* 9824 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 9825 */ 9826common_printHex: 9827 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9828 mov r1, r0 9829 ldr r0, strPrintHex 9830 bl printf 9831 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9832 bx lr 9833 9834/* 9835 * Print the 64-bit quantity in r0-r1, preserving registers. 9836 */ 9837common_printLong: 9838 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9839 mov r3, r1 9840 mov r2, r0 9841 ldr r0, strPrintLong 9842 bl printf 9843 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9844 bx lr 9845 9846/* 9847 * Print full method info. Pass the Method* in r0. Preserves regs. 9848 */ 9849common_printMethod: 9850 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9851 bl dvmMterpPrintMethod 9852 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9853 bx lr 9854 9855/* 9856 * Call a C helper function that dumps regs and possibly some 9857 * additional info. Requires the C function to be compiled in. 9858 */ 9859 .if 0 9860common_dumpRegs: 9861 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9862 bl dvmMterpDumpArmRegs 9863 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9864 bx lr 9865 .endif 9866 9867 9868/* 9869 * String references, must be close to the code that uses them. 9870 */ 9871 .align 2 9872strArithmeticException: 9873 .word .LstrArithmeticException 9874strArrayIndexException: 9875 .word .LstrArrayIndexException 9876strArrayStoreException: 9877 .word .LstrArrayStoreException 9878strDivideByZero: 9879 .word .LstrDivideByZero 9880strNegativeArraySizeException: 9881 .word .LstrNegativeArraySizeException 9882strNoSuchMethodError: 9883 .word .LstrNoSuchMethodError 9884strNullPointerException: 9885 .word .LstrNullPointerException 9886 9887strLogTag: 9888 .word .LstrLogTag 9889strExceptionNotCaughtLocally: 9890 .word .LstrExceptionNotCaughtLocally 9891 9892strNewline: 9893 .word .LstrNewline 9894strSqueak: 9895 .word .LstrSqueak 9896strPrintHex: 9897 .word .LstrPrintHex 9898strPrintLong: 9899 .word .LstrPrintLong 9900 9901/* 9902 * Zero-terminated ASCII string data. 9903 * 9904 * On ARM we have two choices: do like gcc does, and LDR from a .word 9905 * with the address, or use an ADR pseudo-op to get the address 9906 * directly. ADR saves 4 bytes and an indirection, but it's using a 9907 * PC-relative addressing mode and hence has a limited range, which 9908 * makes it not work well with mergeable string sections. 9909 */ 9910 .section .rodata.str1.4,"aMS",%progbits,1 9911 9912.LstrBadEntryPoint: 9913 .asciz "Bad entry point %d\n" 9914.LstrArithmeticException: 9915 .asciz "Ljava/lang/ArithmeticException;" 9916.LstrArrayIndexException: 9917 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 9918.LstrArrayStoreException: 9919 .asciz "Ljava/lang/ArrayStoreException;" 9920.LstrClassCastException: 9921 .asciz "Ljava/lang/ClassCastException;" 9922.LstrDivideByZero: 9923 .asciz "divide by zero" 9924.LstrFilledNewArrayNotImpl: 9925 .asciz "filled-new-array only implemented for objects and 'int'" 9926.LstrInternalError: 9927 .asciz "Ljava/lang/InternalError;" 9928.LstrInstantiationError: 9929 .asciz "Ljava/lang/InstantiationError;" 9930.LstrNegativeArraySizeException: 9931 .asciz "Ljava/lang/NegativeArraySizeException;" 9932.LstrNoSuchMethodError: 9933 .asciz "Ljava/lang/NoSuchMethodError;" 9934.LstrNullPointerException: 9935 .asciz "Ljava/lang/NullPointerException;" 9936 9937.LstrLogTag: 9938 .asciz "mterp" 9939.LstrExceptionNotCaughtLocally: 9940 .asciz "Exception %s from %s:%d not caught locally\n" 9941 9942.LstrNewline: 9943 .asciz "\n" 9944.LstrSqueak: 9945 .asciz "<%d>" 9946.LstrPrintHex: 9947 .asciz "<0x%x>" 9948.LstrPrintLong: 9949 .asciz "<%lld>" 9950 9951 9952