InterpAsm-armv7-a.S revision 7a0bcd0de6c4da6499a088a18d1750e51204c2a6
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv7-a'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204 205/* File: armv5te/platform.S */ 206/* 207 * =========================================================================== 208 * CPU-version-specific defines 209 * =========================================================================== 210 */ 211 212/* 213 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 214 * one-way branch. 215 * 216 * May modify IP. Does not modify LR. 217 */ 218.macro LDR_PC source 219 ldr pc, \source 220.endm 221 222/* 223 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 224 * Jump to subroutine. 225 * 226 * May modify IP and LR. 227 */ 228.macro LDR_PC_LR source 229 mov lr, pc 230 ldr pc, \source 231.endm 232 233/* 234 * Macro for "LDMFD SP!, {...regs...,PC}". 235 * 236 * May modify IP and LR. 237 */ 238.macro LDMFD_PC regs 239 ldmfd sp!, {\regs,pc} 240.endm 241 242 243/* File: armv5te/entry.S */ 244/* 245 * Copyright (C) 2008 The Android Open Source Project 246 * 247 * Licensed under the Apache License, Version 2.0 (the "License"); 248 * you may not use this file except in compliance with the License. 249 * You may obtain a copy of the License at 250 * 251 * http://www.apache.org/licenses/LICENSE-2.0 252 * 253 * Unless required by applicable law or agreed to in writing, software 254 * distributed under the License is distributed on an "AS IS" BASIS, 255 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 256 * See the License for the specific language governing permissions and 257 * limitations under the License. 258 */ 259/* 260 * Interpreter entry point. 261 */ 262 263/* 264 * We don't have formal stack frames, so gdb scans upward in the code 265 * to find the start of the function (a label with the %function type), 266 * and then looks at the next few instructions to figure out what 267 * got pushed onto the stack. From this it figures out how to restore 268 * the registers, including PC, for the previous stack frame. If gdb 269 * sees a non-function label, it stops scanning, so either we need to 270 * have nothing but assembler-local labels between the entry point and 271 * the break, or we need to fake it out. 272 * 273 * When this is defined, we add some stuff to make gdb less confused. 274 */ 275#define ASSIST_DEBUGGER 1 276 277 .text 278 .align 2 279 .global dvmMterpStdRun 280 .type dvmMterpStdRun, %function 281 282/* 283 * On entry: 284 * r0 MterpGlue* glue 285 * 286 * This function returns a boolean "changeInterp" value. The return comes 287 * via a call to dvmMterpStdBail(). 288 */ 289dvmMterpStdRun: 290#define MTERP_ENTRY1 \ 291 .save {r4-r10,fp,lr}; \ 292 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 293#define MTERP_ENTRY2 \ 294 .pad #4; \ 295 sub sp, sp, #4 @ align 64 296 297 .fnstart 298 MTERP_ENTRY1 299 MTERP_ENTRY2 300 301 /* save stack pointer, add magic word for debuggerd */ 302 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 303 304 /* set up "named" registers, figure out entry point */ 305 mov rGLUE, r0 @ set rGLUE 306 ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char 307 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 308 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 309 cmp r1, #kInterpEntryInstr @ usual case? 310 bne .Lnot_instr @ no, handle it 311 312#if defined(WITH_JIT) 313.Lno_singleStep: 314 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 315 /* Entry is always a possible trace start */ 316 GET_JIT_PROF_TABLE(r0) 317 FETCH_INST() 318 mov r1, #0 @ prepare the value for the new state 319 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 320 cmp r0,#0 321 bne common_updateProfile 322 GET_INST_OPCODE(ip) 323 GOTO_OPCODE(ip) 324#else 325 /* start executing the instruction at rPC */ 326 FETCH_INST() @ load rINST from rPC 327 GET_INST_OPCODE(ip) @ extract opcode from rINST 328 GOTO_OPCODE(ip) @ jump to next instruction 329#endif 330 331.Lnot_instr: 332 cmp r1, #kInterpEntryReturn @ were we returning from a method? 333 beq common_returnFromMethod 334 335.Lnot_return: 336 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 337 beq common_exceptionThrown 338 339#if defined(WITH_JIT) 340.Lnot_throw: 341 ldr r0,[rGLUE, #offGlue_jitResume] 342 ldr r2,[rGLUE, #offGlue_jitResumePC] 343 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 344 bne .Lbad_arg 345 cmp rPC,r2 346 bne .Lno_singleStep @ must have branched, don't resume 347 mov r1, #kInterpEntryInstr 348 strb r1, [rGLUE, #offGlue_entryPoint] 349 ldr rINST, .LdvmCompilerTemplate 350 bx r0 @ re-enter the translation 351.LdvmCompilerTemplate: 352 .word dvmCompilerTemplateStart 353#endif 354 355.Lbad_arg: 356 ldr r0, strBadEntryPoint 357 @ r1 holds value of entryPoint 358 bl printf 359 bl dvmAbort 360 .fnend 361 362 363 .global dvmMterpStdBail 364 .type dvmMterpStdBail, %function 365 366/* 367 * Restore the stack pointer and PC from the save point established on entry. 368 * This is essentially the same as a longjmp, but should be cheaper. The 369 * last instruction causes us to return to whoever called dvmMterpStdRun. 370 * 371 * We pushed some registers on the stack in dvmMterpStdRun, then saved 372 * SP and LR. Here we restore SP, restore the registers, and then restore 373 * LR to PC. 374 * 375 * On entry: 376 * r0 MterpGlue* glue 377 * r1 bool changeInterp 378 */ 379dvmMterpStdBail: 380 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 381 mov r0, r1 @ return the changeInterp value 382 add sp, sp, #4 @ un-align 64 383 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 384 385 386/* 387 * String references. 388 */ 389strBadEntryPoint: 390 .word .LstrBadEntryPoint 391 392 393 394 .global dvmAsmInstructionStart 395 .type dvmAsmInstructionStart, %function 396dvmAsmInstructionStart = .L_OP_NOP 397 .text 398 399/* ------------------------------ */ 400 .balign 64 401.L_OP_NOP: /* 0x00 */ 402/* File: armv5te/OP_NOP.S */ 403 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 404 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 405 GOTO_OPCODE(ip) @ execute it 406 407#ifdef ASSIST_DEBUGGER 408 /* insert fake function header to help gdb find the stack frame */ 409 .type dalvik_inst, %function 410dalvik_inst: 411 .fnstart 412 MTERP_ENTRY1 413 MTERP_ENTRY2 414 .fnend 415#endif 416 417 418/* ------------------------------ */ 419 .balign 64 420.L_OP_MOVE: /* 0x01 */ 421/* File: armv6t2/OP_MOVE.S */ 422 /* for move, move-object, long-to-int */ 423 /* op vA, vB */ 424 mov r1, rINST, lsr #12 @ r1<- B from 15:12 425 ubfx r0, rINST, #8, #4 @ r0<- A from 11:8 426 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 427 GET_VREG(r2, r1) @ r2<- fp[B] 428 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 429 SET_VREG(r2, r0) @ fp[A]<- r2 430 GOTO_OPCODE(ip) @ execute next instruction 431 432 433/* ------------------------------ */ 434 .balign 64 435.L_OP_MOVE_FROM16: /* 0x02 */ 436/* File: armv5te/OP_MOVE_FROM16.S */ 437 /* for: move/from16, move-object/from16 */ 438 /* op vAA, vBBBB */ 439 FETCH(r1, 1) @ r1<- BBBB 440 mov r0, rINST, lsr #8 @ r0<- AA 441 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 442 GET_VREG(r2, r1) @ r2<- fp[BBBB] 443 GET_INST_OPCODE(ip) @ extract opcode from rINST 444 SET_VREG(r2, r0) @ fp[AA]<- r2 445 GOTO_OPCODE(ip) @ jump to next instruction 446 447 448/* ------------------------------ */ 449 .balign 64 450.L_OP_MOVE_16: /* 0x03 */ 451/* File: armv5te/OP_MOVE_16.S */ 452 /* for: move/16, move-object/16 */ 453 /* op vAAAA, vBBBB */ 454 FETCH(r1, 2) @ r1<- BBBB 455 FETCH(r0, 1) @ r0<- AAAA 456 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 457 GET_VREG(r2, r1) @ r2<- fp[BBBB] 458 GET_INST_OPCODE(ip) @ extract opcode from rINST 459 SET_VREG(r2, r0) @ fp[AAAA]<- r2 460 GOTO_OPCODE(ip) @ jump to next instruction 461 462 463/* ------------------------------ */ 464 .balign 64 465.L_OP_MOVE_WIDE: /* 0x04 */ 466/* File: armv6t2/OP_MOVE_WIDE.S */ 467 /* move-wide vA, vB */ 468 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 469 mov r3, rINST, lsr #12 @ r3<- B 470 ubfx r2, rINST, #8, #4 @ r2<- A 471 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 472 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 473 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 474 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 475 GET_INST_OPCODE(ip) @ extract opcode from rINST 476 stmia r2, {r0-r1} @ fp[A]<- r0/r1 477 GOTO_OPCODE(ip) @ jump to next instruction 478 479 480/* ------------------------------ */ 481 .balign 64 482.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 483/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 484 /* move-wide/from16 vAA, vBBBB */ 485 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 486 FETCH(r3, 1) @ r3<- BBBB 487 mov r2, rINST, lsr #8 @ r2<- AA 488 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 489 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 490 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 491 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 492 GET_INST_OPCODE(ip) @ extract opcode from rINST 493 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 494 GOTO_OPCODE(ip) @ jump to next instruction 495 496 497/* ------------------------------ */ 498 .balign 64 499.L_OP_MOVE_WIDE_16: /* 0x06 */ 500/* File: armv5te/OP_MOVE_WIDE_16.S */ 501 /* move-wide/16 vAAAA, vBBBB */ 502 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 503 FETCH(r3, 2) @ r3<- BBBB 504 FETCH(r2, 1) @ r2<- AAAA 505 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 506 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 507 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 508 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 509 GET_INST_OPCODE(ip) @ extract opcode from rINST 510 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 511 GOTO_OPCODE(ip) @ jump to next instruction 512 513 514/* ------------------------------ */ 515 .balign 64 516.L_OP_MOVE_OBJECT: /* 0x07 */ 517/* File: armv5te/OP_MOVE_OBJECT.S */ 518/* File: armv5te/OP_MOVE.S */ 519 /* for move, move-object, long-to-int */ 520 /* op vA, vB */ 521 mov r1, rINST, lsr #12 @ r1<- B from 15:12 522 mov r0, rINST, lsr #8 @ r0<- A from 11:8 523 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 524 GET_VREG(r2, r1) @ r2<- fp[B] 525 and r0, r0, #15 526 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 527 SET_VREG(r2, r0) @ fp[A]<- r2 528 GOTO_OPCODE(ip) @ execute next instruction 529 530 531 532/* ------------------------------ */ 533 .balign 64 534.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 535/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 536/* File: armv5te/OP_MOVE_FROM16.S */ 537 /* for: move/from16, move-object/from16 */ 538 /* op vAA, vBBBB */ 539 FETCH(r1, 1) @ r1<- BBBB 540 mov r0, rINST, lsr #8 @ r0<- AA 541 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 542 GET_VREG(r2, r1) @ r2<- fp[BBBB] 543 GET_INST_OPCODE(ip) @ extract opcode from rINST 544 SET_VREG(r2, r0) @ fp[AA]<- r2 545 GOTO_OPCODE(ip) @ jump to next instruction 546 547 548 549/* ------------------------------ */ 550 .balign 64 551.L_OP_MOVE_OBJECT_16: /* 0x09 */ 552/* File: armv5te/OP_MOVE_OBJECT_16.S */ 553/* File: armv5te/OP_MOVE_16.S */ 554 /* for: move/16, move-object/16 */ 555 /* op vAAAA, vBBBB */ 556 FETCH(r1, 2) @ r1<- BBBB 557 FETCH(r0, 1) @ r0<- AAAA 558 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 559 GET_VREG(r2, r1) @ r2<- fp[BBBB] 560 GET_INST_OPCODE(ip) @ extract opcode from rINST 561 SET_VREG(r2, r0) @ fp[AAAA]<- r2 562 GOTO_OPCODE(ip) @ jump to next instruction 563 564 565 566/* ------------------------------ */ 567 .balign 64 568.L_OP_MOVE_RESULT: /* 0x0a */ 569/* File: armv5te/OP_MOVE_RESULT.S */ 570 /* for: move-result, move-result-object */ 571 /* op vAA */ 572 mov r2, rINST, lsr #8 @ r2<- AA 573 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 574 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 575 GET_INST_OPCODE(ip) @ extract opcode from rINST 576 SET_VREG(r0, r2) @ fp[AA]<- r0 577 GOTO_OPCODE(ip) @ jump to next instruction 578 579 580/* ------------------------------ */ 581 .balign 64 582.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 583/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 584 /* move-result-wide vAA */ 585 mov r2, rINST, lsr #8 @ r2<- AA 586 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 587 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 588 ldmia r3, {r0-r1} @ r0/r1<- retval.j 589 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 590 GET_INST_OPCODE(ip) @ extract opcode from rINST 591 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 592 GOTO_OPCODE(ip) @ jump to next instruction 593 594 595/* ------------------------------ */ 596 .balign 64 597.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 598/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 599/* File: armv5te/OP_MOVE_RESULT.S */ 600 /* for: move-result, move-result-object */ 601 /* op vAA */ 602 mov r2, rINST, lsr #8 @ r2<- AA 603 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 604 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 605 GET_INST_OPCODE(ip) @ extract opcode from rINST 606 SET_VREG(r0, r2) @ fp[AA]<- r0 607 GOTO_OPCODE(ip) @ jump to next instruction 608 609 610 611/* ------------------------------ */ 612 .balign 64 613.L_OP_MOVE_EXCEPTION: /* 0x0d */ 614/* File: armv5te/OP_MOVE_EXCEPTION.S */ 615 /* move-exception vAA */ 616 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 617 mov r2, rINST, lsr #8 @ r2<- AA 618 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 619 mov r1, #0 @ r1<- 0 620 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 621 SET_VREG(r3, r2) @ fp[AA]<- exception obj 622 GET_INST_OPCODE(ip) @ extract opcode from rINST 623 str r1, [r0, #offThread_exception] @ dvmClearException bypass 624 GOTO_OPCODE(ip) @ jump to next instruction 625 626 627/* ------------------------------ */ 628 .balign 64 629.L_OP_RETURN_VOID: /* 0x0e */ 630/* File: armv5te/OP_RETURN_VOID.S */ 631 b common_returnFromMethod 632 633 634/* ------------------------------ */ 635 .balign 64 636.L_OP_RETURN: /* 0x0f */ 637/* File: armv5te/OP_RETURN.S */ 638 /* 639 * Return a 32-bit value. Copies the return value into the "glue" 640 * structure, then jumps to the return handler. 641 * 642 * for: return, return-object 643 */ 644 /* op vAA */ 645 mov r2, rINST, lsr #8 @ r2<- AA 646 GET_VREG(r0, r2) @ r0<- vAA 647 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 648 b common_returnFromMethod 649 650 651/* ------------------------------ */ 652 .balign 64 653.L_OP_RETURN_WIDE: /* 0x10 */ 654/* File: armv5te/OP_RETURN_WIDE.S */ 655 /* 656 * Return a 64-bit value. Copies the return value into the "glue" 657 * structure, then jumps to the return handler. 658 */ 659 /* return-wide vAA */ 660 mov r2, rINST, lsr #8 @ r2<- AA 661 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 662 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 663 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 664 stmia r3, {r0-r1} @ retval<- r0/r1 665 b common_returnFromMethod 666 667 668/* ------------------------------ */ 669 .balign 64 670.L_OP_RETURN_OBJECT: /* 0x11 */ 671/* File: armv5te/OP_RETURN_OBJECT.S */ 672/* File: armv5te/OP_RETURN.S */ 673 /* 674 * Return a 32-bit value. Copies the return value into the "glue" 675 * structure, then jumps to the return handler. 676 * 677 * for: return, return-object 678 */ 679 /* op vAA */ 680 mov r2, rINST, lsr #8 @ r2<- AA 681 GET_VREG(r0, r2) @ r0<- vAA 682 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 683 b common_returnFromMethod 684 685 686 687/* ------------------------------ */ 688 .balign 64 689.L_OP_CONST_4: /* 0x12 */ 690/* File: armv6t2/OP_CONST_4.S */ 691 /* const/4 vA, #+B */ 692 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 693 ubfx r0, rINST, #8, #4 @ r0<- A 694 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 695 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 696 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 697 SET_VREG(r1, r0) @ fp[A]<- r1 698 GOTO_OPCODE(ip) @ execute next instruction 699 700 701/* ------------------------------ */ 702 .balign 64 703.L_OP_CONST_16: /* 0x13 */ 704/* File: armv5te/OP_CONST_16.S */ 705 /* const/16 vAA, #+BBBB */ 706 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 707 mov r3, rINST, lsr #8 @ r3<- AA 708 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 709 SET_VREG(r0, r3) @ vAA<- r0 710 GET_INST_OPCODE(ip) @ extract opcode from rINST 711 GOTO_OPCODE(ip) @ jump to next instruction 712 713 714/* ------------------------------ */ 715 .balign 64 716.L_OP_CONST: /* 0x14 */ 717/* File: armv5te/OP_CONST.S */ 718 /* const vAA, #+BBBBbbbb */ 719 mov r3, rINST, lsr #8 @ r3<- AA 720 FETCH(r0, 1) @ r0<- bbbb (low) 721 FETCH(r1, 2) @ r1<- BBBB (high) 722 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 723 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 724 GET_INST_OPCODE(ip) @ extract opcode from rINST 725 SET_VREG(r0, r3) @ vAA<- r0 726 GOTO_OPCODE(ip) @ jump to next instruction 727 728 729/* ------------------------------ */ 730 .balign 64 731.L_OP_CONST_HIGH16: /* 0x15 */ 732/* File: armv5te/OP_CONST_HIGH16.S */ 733 /* const/high16 vAA, #+BBBB0000 */ 734 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 735 mov r3, rINST, lsr #8 @ r3<- AA 736 mov r0, r0, lsl #16 @ r0<- BBBB0000 737 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 738 SET_VREG(r0, r3) @ vAA<- r0 739 GET_INST_OPCODE(ip) @ extract opcode from rINST 740 GOTO_OPCODE(ip) @ jump to next instruction 741 742 743/* ------------------------------ */ 744 .balign 64 745.L_OP_CONST_WIDE_16: /* 0x16 */ 746/* File: armv5te/OP_CONST_WIDE_16.S */ 747 /* const-wide/16 vAA, #+BBBB */ 748 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 749 mov r3, rINST, lsr #8 @ r3<- AA 750 mov r1, r0, asr #31 @ r1<- ssssssss 751 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 752 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 753 GET_INST_OPCODE(ip) @ extract opcode from rINST 754 stmia r3, {r0-r1} @ vAA<- r0/r1 755 GOTO_OPCODE(ip) @ jump to next instruction 756 757 758/* ------------------------------ */ 759 .balign 64 760.L_OP_CONST_WIDE_32: /* 0x17 */ 761/* File: armv5te/OP_CONST_WIDE_32.S */ 762 /* const-wide/32 vAA, #+BBBBbbbb */ 763 FETCH(r0, 1) @ r0<- 0000bbbb (low) 764 mov r3, rINST, lsr #8 @ r3<- AA 765 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 766 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 767 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 768 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 769 mov r1, r0, asr #31 @ r1<- ssssssss 770 GET_INST_OPCODE(ip) @ extract opcode from rINST 771 stmia r3, {r0-r1} @ vAA<- r0/r1 772 GOTO_OPCODE(ip) @ jump to next instruction 773 774 775/* ------------------------------ */ 776 .balign 64 777.L_OP_CONST_WIDE: /* 0x18 */ 778/* File: armv5te/OP_CONST_WIDE.S */ 779 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 780 FETCH(r0, 1) @ r0<- bbbb (low) 781 FETCH(r1, 2) @ r1<- BBBB (low middle) 782 FETCH(r2, 3) @ r2<- hhhh (high middle) 783 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 784 FETCH(r3, 4) @ r3<- HHHH (high) 785 mov r9, rINST, lsr #8 @ r9<- AA 786 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 787 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 788 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 789 GET_INST_OPCODE(ip) @ extract opcode from rINST 790 stmia r9, {r0-r1} @ vAA<- r0/r1 791 GOTO_OPCODE(ip) @ jump to next instruction 792 793 794/* ------------------------------ */ 795 .balign 64 796.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 797/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 798 /* const-wide/high16 vAA, #+BBBB000000000000 */ 799 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 800 mov r3, rINST, lsr #8 @ r3<- AA 801 mov r0, #0 @ r0<- 00000000 802 mov r1, r1, lsl #16 @ r1<- BBBB0000 803 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 804 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 805 GET_INST_OPCODE(ip) @ extract opcode from rINST 806 stmia r3, {r0-r1} @ vAA<- r0/r1 807 GOTO_OPCODE(ip) @ jump to next instruction 808 809 810/* ------------------------------ */ 811 .balign 64 812.L_OP_CONST_STRING: /* 0x1a */ 813/* File: armv5te/OP_CONST_STRING.S */ 814 /* const/string vAA, String@BBBB */ 815 FETCH(r1, 1) @ r1<- BBBB 816 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 817 mov r9, rINST, lsr #8 @ r9<- AA 818 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 819 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 820 cmp r0, #0 @ not yet resolved? 821 beq .LOP_CONST_STRING_resolve 822 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 823 GET_INST_OPCODE(ip) @ extract opcode from rINST 824 SET_VREG(r0, r9) @ vAA<- r0 825 GOTO_OPCODE(ip) @ jump to next instruction 826 827/* ------------------------------ */ 828 .balign 64 829.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 830/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 831 /* const/string vAA, String@BBBBBBBB */ 832 FETCH(r0, 1) @ r0<- bbbb (low) 833 FETCH(r1, 2) @ r1<- BBBB (high) 834 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 835 mov r9, rINST, lsr #8 @ r9<- AA 836 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 837 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 838 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 839 cmp r0, #0 840 beq .LOP_CONST_STRING_JUMBO_resolve 841 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 842 GET_INST_OPCODE(ip) @ extract opcode from rINST 843 SET_VREG(r0, r9) @ vAA<- r0 844 GOTO_OPCODE(ip) @ jump to next instruction 845 846/* ------------------------------ */ 847 .balign 64 848.L_OP_CONST_CLASS: /* 0x1c */ 849/* File: armv5te/OP_CONST_CLASS.S */ 850 /* const/class vAA, Class@BBBB */ 851 FETCH(r1, 1) @ r1<- BBBB 852 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 853 mov r9, rINST, lsr #8 @ r9<- AA 854 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 855 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 856 cmp r0, #0 @ not yet resolved? 857 beq .LOP_CONST_CLASS_resolve 858 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 859 GET_INST_OPCODE(ip) @ extract opcode from rINST 860 SET_VREG(r0, r9) @ vAA<- r0 861 GOTO_OPCODE(ip) @ jump to next instruction 862 863/* ------------------------------ */ 864 .balign 64 865.L_OP_MONITOR_ENTER: /* 0x1d */ 866/* File: armv5te/OP_MONITOR_ENTER.S */ 867 /* 868 * Synchronize on an object. 869 */ 870 /* monitor-enter vAA */ 871 mov r2, rINST, lsr #8 @ r2<- AA 872 GET_VREG(r1, r2) @ r1<- vAA (object) 873 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 874 cmp r1, #0 @ null object? 875 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 876 beq common_errNullObject @ null object, throw an exception 877 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 878 bl dvmLockObject @ call(self, obj) 879#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 880 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 881 ldr r1, [r0, #offThread_exception] @ check for exception 882 cmp r1, #0 883 bne common_exceptionThrown @ exception raised, bail out 884#endif 885 GET_INST_OPCODE(ip) @ extract opcode from rINST 886 GOTO_OPCODE(ip) @ jump to next instruction 887 888 889/* ------------------------------ */ 890 .balign 64 891.L_OP_MONITOR_EXIT: /* 0x1e */ 892/* File: armv5te/OP_MONITOR_EXIT.S */ 893 /* 894 * Unlock an object. 895 * 896 * Exceptions that occur when unlocking a monitor need to appear as 897 * if they happened at the following instruction. See the Dalvik 898 * instruction spec. 899 */ 900 /* monitor-exit vAA */ 901 mov r2, rINST, lsr #8 @ r2<- AA 902 EXPORT_PC() @ before fetch: export the PC 903 GET_VREG(r1, r2) @ r1<- vAA (object) 904 cmp r1, #0 @ null object? 905 beq common_errNullObject @ yes 906 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 907 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 908 cmp r0, #0 @ failed? 909 beq common_exceptionThrown @ yes, exception is pending 910 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 911 GET_INST_OPCODE(ip) @ extract opcode from rINST 912 GOTO_OPCODE(ip) @ jump to next instruction 913 914 915/* ------------------------------ */ 916 .balign 64 917.L_OP_CHECK_CAST: /* 0x1f */ 918/* File: armv5te/OP_CHECK_CAST.S */ 919 /* 920 * Check to see if a cast from one class to another is allowed. 921 */ 922 /* check-cast vAA, class@BBBB */ 923 mov r3, rINST, lsr #8 @ r3<- AA 924 FETCH(r2, 1) @ r2<- BBBB 925 GET_VREG(r9, r3) @ r9<- object 926 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 927 cmp r9, #0 @ is object null? 928 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 929 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 930 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 931 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 932 cmp r1, #0 @ have we resolved this before? 933 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 934.LOP_CHECK_CAST_resolved: 935 cmp r0, r1 @ same class (trivial success)? 936 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 937.LOP_CHECK_CAST_okay: 938 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 939 GET_INST_OPCODE(ip) @ extract opcode from rINST 940 GOTO_OPCODE(ip) @ jump to next instruction 941 942/* ------------------------------ */ 943 .balign 64 944.L_OP_INSTANCE_OF: /* 0x20 */ 945/* File: armv5te/OP_INSTANCE_OF.S */ 946 /* 947 * Check to see if an object reference is an instance of a class. 948 * 949 * Most common situation is a non-null object, being compared against 950 * an already-resolved class. 951 */ 952 /* instance-of vA, vB, class@CCCC */ 953 mov r3, rINST, lsr #12 @ r3<- B 954 mov r9, rINST, lsr #8 @ r9<- A+ 955 GET_VREG(r0, r3) @ r0<- vB (object) 956 and r9, r9, #15 @ r9<- A 957 cmp r0, #0 @ is object null? 958 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 959 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 960 FETCH(r3, 1) @ r3<- CCCC 961 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 962 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 963 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 964 cmp r1, #0 @ have we resolved this before? 965 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 966.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 967 cmp r0, r1 @ same class (trivial success)? 968 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 969 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 970 971/* ------------------------------ */ 972 .balign 64 973.L_OP_ARRAY_LENGTH: /* 0x21 */ 974/* File: armv6t2/OP_ARRAY_LENGTH.S */ 975 /* 976 * Return the length of an array. 977 */ 978 mov r1, rINST, lsr #12 @ r1<- B 979 ubfx r2, rINST, #8, #4 @ r2<- A 980 GET_VREG(r0, r1) @ r0<- vB (object ref) 981 cmp r0, #0 @ is object null? 982 beq common_errNullObject @ yup, fail 983 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 984 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 985 GET_INST_OPCODE(ip) @ extract opcode from rINST 986 SET_VREG(r3, r2) @ vB<- length 987 GOTO_OPCODE(ip) @ jump to next instruction 988 989 990/* ------------------------------ */ 991 .balign 64 992.L_OP_NEW_INSTANCE: /* 0x22 */ 993/* File: armv5te/OP_NEW_INSTANCE.S */ 994 /* 995 * Create a new instance of a class. 996 */ 997 /* new-instance vAA, class@BBBB */ 998 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 999 FETCH(r1, 1) @ r1<- BBBB 1000 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1001 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1002 EXPORT_PC() @ req'd for init, resolve, alloc 1003 cmp r0, #0 @ already resolved? 1004 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1005.LOP_NEW_INSTANCE_resolved: @ r0=class 1006 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1007 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1008 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1009.LOP_NEW_INSTANCE_initialized: @ r0=class 1010 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1011 bl dvmAllocObject @ r0<- new object 1012 b .LOP_NEW_INSTANCE_finish @ continue 1013 1014/* ------------------------------ */ 1015 .balign 64 1016.L_OP_NEW_ARRAY: /* 0x23 */ 1017/* File: armv5te/OP_NEW_ARRAY.S */ 1018 /* 1019 * Allocate an array of objects, specified with the array class 1020 * and a count. 1021 * 1022 * The verifier guarantees that this is an array class, so we don't 1023 * check for it here. 1024 */ 1025 /* new-array vA, vB, class@CCCC */ 1026 mov r0, rINST, lsr #12 @ r0<- B 1027 FETCH(r2, 1) @ r2<- CCCC 1028 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1029 GET_VREG(r1, r0) @ r1<- vB (array length) 1030 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1031 cmp r1, #0 @ check length 1032 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1033 bmi common_errNegativeArraySize @ negative length, bail 1034 cmp r0, #0 @ already resolved? 1035 EXPORT_PC() @ req'd for resolve, alloc 1036 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1037 b .LOP_NEW_ARRAY_resolve @ do resolve now 1038 1039/* ------------------------------ */ 1040 .balign 64 1041.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1042/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1043 /* 1044 * Create a new array with elements filled from registers. 1045 * 1046 * for: filled-new-array, filled-new-array/range 1047 */ 1048 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1049 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1050 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1051 FETCH(r1, 1) @ r1<- BBBB 1052 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1053 EXPORT_PC() @ need for resolve and alloc 1054 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1055 mov r10, rINST, lsr #8 @ r10<- AA or BA 1056 cmp r0, #0 @ already resolved? 1057 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10588: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1059 mov r2, #0 @ r2<- false 1060 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1061 bl dvmResolveClass @ r0<- call(clazz, ref) 1062 cmp r0, #0 @ got null? 1063 beq common_exceptionThrown @ yes, handle exception 1064 b .LOP_FILLED_NEW_ARRAY_continue 1065 1066/* ------------------------------ */ 1067 .balign 64 1068.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1069/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1070/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1071 /* 1072 * Create a new array with elements filled from registers. 1073 * 1074 * for: filled-new-array, filled-new-array/range 1075 */ 1076 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1077 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1078 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1079 FETCH(r1, 1) @ r1<- BBBB 1080 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1081 EXPORT_PC() @ need for resolve and alloc 1082 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1083 mov r10, rINST, lsr #8 @ r10<- AA or BA 1084 cmp r0, #0 @ already resolved? 1085 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10868: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1087 mov r2, #0 @ r2<- false 1088 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1089 bl dvmResolveClass @ r0<- call(clazz, ref) 1090 cmp r0, #0 @ got null? 1091 beq common_exceptionThrown @ yes, handle exception 1092 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1093 1094 1095/* ------------------------------ */ 1096 .balign 64 1097.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1098/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1099 /* fill-array-data vAA, +BBBBBBBB */ 1100 FETCH(r0, 1) @ r0<- bbbb (lo) 1101 FETCH(r1, 2) @ r1<- BBBB (hi) 1102 mov r3, rINST, lsr #8 @ r3<- AA 1103 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1104 GET_VREG(r0, r3) @ r0<- vAA (array object) 1105 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1106 EXPORT_PC(); 1107 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1108 cmp r0, #0 @ 0 means an exception is thrown 1109 beq common_exceptionThrown @ has exception 1110 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1111 GET_INST_OPCODE(ip) @ extract opcode from rINST 1112 GOTO_OPCODE(ip) @ jump to next instruction 1113 1114/* ------------------------------ */ 1115 .balign 64 1116.L_OP_THROW: /* 0x27 */ 1117/* File: armv5te/OP_THROW.S */ 1118 /* 1119 * Throw an exception object in the current thread. 1120 */ 1121 /* throw vAA */ 1122 mov r2, rINST, lsr #8 @ r2<- AA 1123 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1124 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1125 cmp r1, #0 @ null object? 1126 beq common_errNullObject @ yes, throw an NPE instead 1127 @ bypass dvmSetException, just store it 1128 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1129 b common_exceptionThrown 1130 1131 1132/* ------------------------------ */ 1133 .balign 64 1134.L_OP_GOTO: /* 0x28 */ 1135/* File: armv5te/OP_GOTO.S */ 1136 /* 1137 * Unconditional branch, 8-bit offset. 1138 * 1139 * The branch distance is a signed code-unit offset, which we need to 1140 * double to get a byte offset. 1141 */ 1142 /* goto +AA */ 1143 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1144 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1145 mov r9, r9, lsl #1 @ r9<- byte offset 1146 bmi common_backwardBranch @ backward branch, do periodic checks 1147#if defined(WITH_JIT) 1148 GET_JIT_PROF_TABLE(r0) 1149 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1150 cmp r0,#0 1151 bne common_updateProfile 1152 GET_INST_OPCODE(ip) @ extract opcode from rINST 1153 GOTO_OPCODE(ip) @ jump to next instruction 1154#else 1155 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1156 GET_INST_OPCODE(ip) @ extract opcode from rINST 1157 GOTO_OPCODE(ip) @ jump to next instruction 1158#endif 1159 1160/* ------------------------------ */ 1161 .balign 64 1162.L_OP_GOTO_16: /* 0x29 */ 1163/* File: armv5te/OP_GOTO_16.S */ 1164 /* 1165 * Unconditional branch, 16-bit offset. 1166 * 1167 * The branch distance is a signed code-unit offset, which we need to 1168 * double to get a byte offset. 1169 */ 1170 /* goto/16 +AAAA */ 1171 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1172 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1173 bmi common_backwardBranch @ backward branch, do periodic checks 1174#if defined(WITH_JIT) 1175 GET_JIT_PROF_TABLE(r0) 1176 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1177 cmp r0,#0 1178 bne common_updateProfile 1179 GET_INST_OPCODE(ip) @ extract opcode from rINST 1180 GOTO_OPCODE(ip) @ jump to next instruction 1181#else 1182 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1183 GET_INST_OPCODE(ip) @ extract opcode from rINST 1184 GOTO_OPCODE(ip) @ jump to next instruction 1185#endif 1186 1187 1188/* ------------------------------ */ 1189 .balign 64 1190.L_OP_GOTO_32: /* 0x2a */ 1191/* File: armv5te/OP_GOTO_32.S */ 1192 /* 1193 * Unconditional branch, 32-bit offset. 1194 * 1195 * The branch distance is a signed code-unit offset, which we need to 1196 * double to get a byte offset. 1197 * 1198 * Unlike most opcodes, this one is allowed to branch to itself, so 1199 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1200 * instruction doesn't affect the V flag, so we need to clear it 1201 * explicitly. 1202 */ 1203 /* goto/32 +AAAAAAAA */ 1204 FETCH(r0, 1) @ r0<- aaaa (lo) 1205 FETCH(r1, 2) @ r1<- AAAA (hi) 1206 cmp ip, ip @ (clear V flag during stall) 1207 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1208 mov r9, r0, asl #1 @ r9<- byte offset 1209 ble common_backwardBranch @ backward branch, do periodic checks 1210#if defined(WITH_JIT) 1211 GET_JIT_PROF_TABLE(r0) 1212 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1213 cmp r0,#0 1214 bne common_updateProfile 1215 GET_INST_OPCODE(ip) @ extract opcode from rINST 1216 GOTO_OPCODE(ip) @ jump to next instruction 1217#else 1218 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1219 GET_INST_OPCODE(ip) @ extract opcode from rINST 1220 GOTO_OPCODE(ip) @ jump to next instruction 1221#endif 1222 1223/* ------------------------------ */ 1224 .balign 64 1225.L_OP_PACKED_SWITCH: /* 0x2b */ 1226/* File: armv5te/OP_PACKED_SWITCH.S */ 1227 /* 1228 * Handle a packed-switch or sparse-switch instruction. In both cases 1229 * we decode it and hand it off to a helper function. 1230 * 1231 * We don't really expect backward branches in a switch statement, but 1232 * they're perfectly legal, so we check for them here. 1233 * 1234 * for: packed-switch, sparse-switch 1235 */ 1236 /* op vAA, +BBBB */ 1237 FETCH(r0, 1) @ r0<- bbbb (lo) 1238 FETCH(r1, 2) @ r1<- BBBB (hi) 1239 mov r3, rINST, lsr #8 @ r3<- AA 1240 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1241 GET_VREG(r1, r3) @ r1<- vAA 1242 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1243 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1244 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1245 bmi common_backwardBranch @ backward branch, do periodic checks 1246 beq common_backwardBranch @ (want to use BLE but V is unknown) 1247#if defined(WITH_JIT) 1248 GET_JIT_PROF_TABLE(r0) 1249 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1250 cmp r0,#0 1251 bne common_updateProfile 1252 GET_INST_OPCODE(ip) @ extract opcode from rINST 1253 GOTO_OPCODE(ip) @ jump to next instruction 1254#else 1255 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1256 GET_INST_OPCODE(ip) @ extract opcode from rINST 1257 GOTO_OPCODE(ip) @ jump to next instruction 1258#endif 1259 1260 1261/* ------------------------------ */ 1262 .balign 64 1263.L_OP_SPARSE_SWITCH: /* 0x2c */ 1264/* File: armv5te/OP_SPARSE_SWITCH.S */ 1265/* File: armv5te/OP_PACKED_SWITCH.S */ 1266 /* 1267 * Handle a packed-switch or sparse-switch instruction. In both cases 1268 * we decode it and hand it off to a helper function. 1269 * 1270 * We don't really expect backward branches in a switch statement, but 1271 * they're perfectly legal, so we check for them here. 1272 * 1273 * for: packed-switch, sparse-switch 1274 */ 1275 /* op vAA, +BBBB */ 1276 FETCH(r0, 1) @ r0<- bbbb (lo) 1277 FETCH(r1, 2) @ r1<- BBBB (hi) 1278 mov r3, rINST, lsr #8 @ r3<- AA 1279 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1280 GET_VREG(r1, r3) @ r1<- vAA 1281 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1282 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1283 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1284 bmi common_backwardBranch @ backward branch, do periodic checks 1285 beq common_backwardBranch @ (want to use BLE but V is unknown) 1286#if defined(WITH_JIT) 1287 GET_JIT_PROF_TABLE(r0) 1288 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1289 cmp r0,#0 1290 bne common_updateProfile 1291 GET_INST_OPCODE(ip) @ extract opcode from rINST 1292 GOTO_OPCODE(ip) @ jump to next instruction 1293#else 1294 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1295 GET_INST_OPCODE(ip) @ extract opcode from rINST 1296 GOTO_OPCODE(ip) @ jump to next instruction 1297#endif 1298 1299 1300 1301/* ------------------------------ */ 1302 .balign 64 1303.L_OP_CMPL_FLOAT: /* 0x2d */ 1304/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1305 /* 1306 * Compare two floating-point values. Puts 0, 1, or -1 into the 1307 * destination register based on the results of the comparison. 1308 * 1309 * int compare(x, y) { 1310 * if (x == y) { 1311 * return 0; 1312 * } else if (x > y) { 1313 * return 1; 1314 * } else if (x < y) { 1315 * return -1; 1316 * } else { 1317 * return -1; 1318 * } 1319 * } 1320 */ 1321 /* op vAA, vBB, vCC */ 1322 FETCH(r0, 1) @ r0<- CCBB 1323 mov r9, rINST, lsr #8 @ r9<- AA 1324 and r2, r0, #255 @ r2<- BB 1325 mov r3, r0, lsr #8 @ r3<- CC 1326 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1327 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1328 flds s0, [r2] @ s0<- vBB 1329 flds s1, [r3] @ s1<- vCC 1330 fcmpes s0, s1 @ compare (vBB, vCC) 1331 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1332 mvn r0, #0 @ r0<- -1 (default) 1333 GET_INST_OPCODE(ip) @ extract opcode from rINST 1334 fmstat @ export status flags 1335 movgt r0, #1 @ (greater than) r1<- 1 1336 moveq r0, #0 @ (equal) r1<- 0 1337 b .LOP_CMPL_FLOAT_finish @ argh 1338 1339 1340/* ------------------------------ */ 1341 .balign 64 1342.L_OP_CMPG_FLOAT: /* 0x2e */ 1343/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1344 /* 1345 * Compare two floating-point values. Puts 0, 1, or -1 into the 1346 * destination register based on the results of the comparison. 1347 * 1348 * int compare(x, y) { 1349 * if (x == y) { 1350 * return 0; 1351 * } else if (x < y) { 1352 * return -1; 1353 * } else if (x > y) { 1354 * return 1; 1355 * } else { 1356 * return 1; 1357 * } 1358 * } 1359 */ 1360 /* op vAA, vBB, vCC */ 1361 FETCH(r0, 1) @ r0<- CCBB 1362 mov r9, rINST, lsr #8 @ r9<- AA 1363 and r2, r0, #255 @ r2<- BB 1364 mov r3, r0, lsr #8 @ r3<- CC 1365 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1366 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1367 flds s0, [r2] @ s0<- vBB 1368 flds s1, [r3] @ s1<- vCC 1369 fcmpes s0, s1 @ compare (vBB, vCC) 1370 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1371 mov r0, #1 @ r0<- 1 (default) 1372 GET_INST_OPCODE(ip) @ extract opcode from rINST 1373 fmstat @ export status flags 1374 mvnmi r0, #0 @ (less than) r1<- -1 1375 moveq r0, #0 @ (equal) r1<- 0 1376 b .LOP_CMPG_FLOAT_finish @ argh 1377 1378 1379/* ------------------------------ */ 1380 .balign 64 1381.L_OP_CMPL_DOUBLE: /* 0x2f */ 1382/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1383 /* 1384 * Compare two floating-point values. Puts 0, 1, or -1 into the 1385 * destination register based on the results of the comparison. 1386 * 1387 * int compare(x, y) { 1388 * if (x == y) { 1389 * return 0; 1390 * } else if (x > y) { 1391 * return 1; 1392 * } else if (x < y) { 1393 * return -1; 1394 * } else { 1395 * return -1; 1396 * } 1397 * } 1398 */ 1399 /* op vAA, vBB, vCC */ 1400 FETCH(r0, 1) @ r0<- CCBB 1401 mov r9, rINST, lsr #8 @ r9<- AA 1402 and r2, r0, #255 @ r2<- BB 1403 mov r3, r0, lsr #8 @ r3<- CC 1404 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1405 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1406 fldd d0, [r2] @ d0<- vBB 1407 fldd d1, [r3] @ d1<- vCC 1408 fcmped d0, d1 @ compare (vBB, vCC) 1409 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1410 mvn r0, #0 @ r0<- -1 (default) 1411 GET_INST_OPCODE(ip) @ extract opcode from rINST 1412 fmstat @ export status flags 1413 movgt r0, #1 @ (greater than) r1<- 1 1414 moveq r0, #0 @ (equal) r1<- 0 1415 b .LOP_CMPL_DOUBLE_finish @ argh 1416 1417 1418/* ------------------------------ */ 1419 .balign 64 1420.L_OP_CMPG_DOUBLE: /* 0x30 */ 1421/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1422 /* 1423 * Compare two floating-point values. Puts 0, 1, or -1 into the 1424 * destination register based on the results of the comparison. 1425 * 1426 * int compare(x, y) { 1427 * if (x == y) { 1428 * return 0; 1429 * } else if (x < y) { 1430 * return -1; 1431 * } else if (x > y) { 1432 * return 1; 1433 * } else { 1434 * return 1; 1435 * } 1436 * } 1437 */ 1438 /* op vAA, vBB, vCC */ 1439 FETCH(r0, 1) @ r0<- CCBB 1440 mov r9, rINST, lsr #8 @ r9<- AA 1441 and r2, r0, #255 @ r2<- BB 1442 mov r3, r0, lsr #8 @ r3<- CC 1443 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1444 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1445 fldd d0, [r2] @ d0<- vBB 1446 fldd d1, [r3] @ d1<- vCC 1447 fcmped d0, d1 @ compare (vBB, vCC) 1448 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1449 mov r0, #1 @ r0<- 1 (default) 1450 GET_INST_OPCODE(ip) @ extract opcode from rINST 1451 fmstat @ export status flags 1452 mvnmi r0, #0 @ (less than) r1<- -1 1453 moveq r0, #0 @ (equal) r1<- 0 1454 b .LOP_CMPG_DOUBLE_finish @ argh 1455 1456 1457/* ------------------------------ */ 1458 .balign 64 1459.L_OP_CMP_LONG: /* 0x31 */ 1460/* File: armv5te/OP_CMP_LONG.S */ 1461 /* 1462 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1463 * register based on the results of the comparison. 1464 * 1465 * We load the full values with LDM, but in practice many values could 1466 * be resolved by only looking at the high word. This could be made 1467 * faster or slower by splitting the LDM into a pair of LDRs. 1468 * 1469 * If we just wanted to set condition flags, we could do this: 1470 * subs ip, r0, r2 1471 * sbcs ip, r1, r3 1472 * subeqs ip, r0, r2 1473 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1474 * integer value, which we can do with 2 conditional mov/mvn instructions 1475 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1476 * us a constant 5-cycle path plus a branch at the end to the 1477 * instruction epilogue code. The multi-compare approach below needs 1478 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1479 * in the worst case (the 64-bit values are equal). 1480 */ 1481 /* cmp-long vAA, vBB, vCC */ 1482 FETCH(r0, 1) @ r0<- CCBB 1483 mov r9, rINST, lsr #8 @ r9<- AA 1484 and r2, r0, #255 @ r2<- BB 1485 mov r3, r0, lsr #8 @ r3<- CC 1486 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1487 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1488 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1489 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1490 cmp r1, r3 @ compare (vBB+1, vCC+1) 1491 blt .LOP_CMP_LONG_less @ signed compare on high part 1492 bgt .LOP_CMP_LONG_greater 1493 subs r1, r0, r2 @ r1<- r0 - r2 1494 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1495 bne .LOP_CMP_LONG_less 1496 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1497 1498/* ------------------------------ */ 1499 .balign 64 1500.L_OP_IF_EQ: /* 0x32 */ 1501/* File: armv6t2/OP_IF_EQ.S */ 1502/* File: armv6t2/bincmp.S */ 1503 /* 1504 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1505 * fragment that specifies the *reverse* comparison to perform, e.g. 1506 * for "if-le" you would use "gt". 1507 * 1508 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1509 */ 1510 /* if-cmp vA, vB, +CCCC */ 1511 mov r1, rINST, lsr #12 @ r1<- B 1512 ubfx r0, rINST, #8, #4 @ r0<- A 1513 GET_VREG(r3, r1) @ r3<- vB 1514 GET_VREG(r2, r0) @ r2<- vA 1515 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1516 cmp r2, r3 @ compare (vA, vB) 1517 bne 1f @ branch to 1 if comparison failed 1518 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1519 movs r9, r9, asl #1 @ convert to bytes, check sign 1520 bmi common_backwardBranch @ yes, do periodic checks 15211: 1522#if defined(WITH_JIT) 1523 GET_JIT_PROF_TABLE(r0) 1524 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1525 b common_testUpdateProfile 1526#else 1527 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1528 GET_INST_OPCODE(ip) @ extract opcode from rINST 1529 GOTO_OPCODE(ip) @ jump to next instruction 1530#endif 1531 1532 1533 1534/* ------------------------------ */ 1535 .balign 64 1536.L_OP_IF_NE: /* 0x33 */ 1537/* File: armv6t2/OP_IF_NE.S */ 1538/* File: armv6t2/bincmp.S */ 1539 /* 1540 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1541 * fragment that specifies the *reverse* comparison to perform, e.g. 1542 * for "if-le" you would use "gt". 1543 * 1544 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1545 */ 1546 /* if-cmp vA, vB, +CCCC */ 1547 mov r1, rINST, lsr #12 @ r1<- B 1548 ubfx r0, rINST, #8, #4 @ r0<- A 1549 GET_VREG(r3, r1) @ r3<- vB 1550 GET_VREG(r2, r0) @ r2<- vA 1551 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1552 cmp r2, r3 @ compare (vA, vB) 1553 beq 1f @ branch to 1 if comparison failed 1554 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1555 movs r9, r9, asl #1 @ convert to bytes, check sign 1556 bmi common_backwardBranch @ yes, do periodic checks 15571: 1558#if defined(WITH_JIT) 1559 GET_JIT_PROF_TABLE(r0) 1560 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1561 b common_testUpdateProfile 1562#else 1563 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1564 GET_INST_OPCODE(ip) @ extract opcode from rINST 1565 GOTO_OPCODE(ip) @ jump to next instruction 1566#endif 1567 1568 1569 1570/* ------------------------------ */ 1571 .balign 64 1572.L_OP_IF_LT: /* 0x34 */ 1573/* File: armv6t2/OP_IF_LT.S */ 1574/* File: armv6t2/bincmp.S */ 1575 /* 1576 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1577 * fragment that specifies the *reverse* comparison to perform, e.g. 1578 * for "if-le" you would use "gt". 1579 * 1580 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1581 */ 1582 /* if-cmp vA, vB, +CCCC */ 1583 mov r1, rINST, lsr #12 @ r1<- B 1584 ubfx r0, rINST, #8, #4 @ r0<- A 1585 GET_VREG(r3, r1) @ r3<- vB 1586 GET_VREG(r2, r0) @ r2<- vA 1587 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1588 cmp r2, r3 @ compare (vA, vB) 1589 bge 1f @ branch to 1 if comparison failed 1590 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1591 movs r9, r9, asl #1 @ convert to bytes, check sign 1592 bmi common_backwardBranch @ yes, do periodic checks 15931: 1594#if defined(WITH_JIT) 1595 GET_JIT_PROF_TABLE(r0) 1596 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1597 b common_testUpdateProfile 1598#else 1599 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1600 GET_INST_OPCODE(ip) @ extract opcode from rINST 1601 GOTO_OPCODE(ip) @ jump to next instruction 1602#endif 1603 1604 1605 1606/* ------------------------------ */ 1607 .balign 64 1608.L_OP_IF_GE: /* 0x35 */ 1609/* File: armv6t2/OP_IF_GE.S */ 1610/* File: armv6t2/bincmp.S */ 1611 /* 1612 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1613 * fragment that specifies the *reverse* comparison to perform, e.g. 1614 * for "if-le" you would use "gt". 1615 * 1616 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1617 */ 1618 /* if-cmp vA, vB, +CCCC */ 1619 mov r1, rINST, lsr #12 @ r1<- B 1620 ubfx r0, rINST, #8, #4 @ r0<- A 1621 GET_VREG(r3, r1) @ r3<- vB 1622 GET_VREG(r2, r0) @ r2<- vA 1623 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1624 cmp r2, r3 @ compare (vA, vB) 1625 blt 1f @ branch to 1 if comparison failed 1626 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1627 movs r9, r9, asl #1 @ convert to bytes, check sign 1628 bmi common_backwardBranch @ yes, do periodic checks 16291: 1630#if defined(WITH_JIT) 1631 GET_JIT_PROF_TABLE(r0) 1632 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1633 b common_testUpdateProfile 1634#else 1635 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1636 GET_INST_OPCODE(ip) @ extract opcode from rINST 1637 GOTO_OPCODE(ip) @ jump to next instruction 1638#endif 1639 1640 1641 1642/* ------------------------------ */ 1643 .balign 64 1644.L_OP_IF_GT: /* 0x36 */ 1645/* File: armv6t2/OP_IF_GT.S */ 1646/* File: armv6t2/bincmp.S */ 1647 /* 1648 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1649 * fragment that specifies the *reverse* comparison to perform, e.g. 1650 * for "if-le" you would use "gt". 1651 * 1652 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1653 */ 1654 /* if-cmp vA, vB, +CCCC */ 1655 mov r1, rINST, lsr #12 @ r1<- B 1656 ubfx r0, rINST, #8, #4 @ r0<- A 1657 GET_VREG(r3, r1) @ r3<- vB 1658 GET_VREG(r2, r0) @ r2<- vA 1659 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1660 cmp r2, r3 @ compare (vA, vB) 1661 ble 1f @ branch to 1 if comparison failed 1662 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1663 movs r9, r9, asl #1 @ convert to bytes, check sign 1664 bmi common_backwardBranch @ yes, do periodic checks 16651: 1666#if defined(WITH_JIT) 1667 GET_JIT_PROF_TABLE(r0) 1668 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1669 b common_testUpdateProfile 1670#else 1671 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1672 GET_INST_OPCODE(ip) @ extract opcode from rINST 1673 GOTO_OPCODE(ip) @ jump to next instruction 1674#endif 1675 1676 1677 1678/* ------------------------------ */ 1679 .balign 64 1680.L_OP_IF_LE: /* 0x37 */ 1681/* File: armv6t2/OP_IF_LE.S */ 1682/* File: armv6t2/bincmp.S */ 1683 /* 1684 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1685 * fragment that specifies the *reverse* comparison to perform, e.g. 1686 * for "if-le" you would use "gt". 1687 * 1688 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1689 */ 1690 /* if-cmp vA, vB, +CCCC */ 1691 mov r1, rINST, lsr #12 @ r1<- B 1692 ubfx r0, rINST, #8, #4 @ r0<- A 1693 GET_VREG(r3, r1) @ r3<- vB 1694 GET_VREG(r2, r0) @ r2<- vA 1695 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1696 cmp r2, r3 @ compare (vA, vB) 1697 bgt 1f @ branch to 1 if comparison failed 1698 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1699 movs r9, r9, asl #1 @ convert to bytes, check sign 1700 bmi common_backwardBranch @ yes, do periodic checks 17011: 1702#if defined(WITH_JIT) 1703 GET_JIT_PROF_TABLE(r0) 1704 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1705 b common_testUpdateProfile 1706#else 1707 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1708 GET_INST_OPCODE(ip) @ extract opcode from rINST 1709 GOTO_OPCODE(ip) @ jump to next instruction 1710#endif 1711 1712 1713 1714/* ------------------------------ */ 1715 .balign 64 1716.L_OP_IF_EQZ: /* 0x38 */ 1717/* File: armv5te/OP_IF_EQZ.S */ 1718/* File: armv5te/zcmp.S */ 1719 /* 1720 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1721 * fragment that specifies the *reverse* comparison to perform, e.g. 1722 * for "if-le" you would use "gt". 1723 * 1724 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1725 */ 1726 /* if-cmp vAA, +BBBB */ 1727 mov r0, rINST, lsr #8 @ r0<- AA 1728 GET_VREG(r2, r0) @ r2<- vAA 1729 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1730 cmp r2, #0 @ compare (vA, 0) 1731 bne 1f @ branch to 1 if comparison failed 1732 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1733 movs r9, r9, asl #1 @ convert to bytes, check sign 1734 bmi common_backwardBranch @ backward branch, do periodic checks 17351: 1736#if defined(WITH_JIT) 1737 GET_JIT_PROF_TABLE(r0) 1738 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1739 cmp r0,#0 1740 bne common_updateProfile 1741 GET_INST_OPCODE(ip) @ extract opcode from rINST 1742 GOTO_OPCODE(ip) @ jump to next instruction 1743#else 1744 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1745 GET_INST_OPCODE(ip) @ extract opcode from rINST 1746 GOTO_OPCODE(ip) @ jump to next instruction 1747#endif 1748 1749 1750 1751/* ------------------------------ */ 1752 .balign 64 1753.L_OP_IF_NEZ: /* 0x39 */ 1754/* File: armv5te/OP_IF_NEZ.S */ 1755/* File: armv5te/zcmp.S */ 1756 /* 1757 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1758 * fragment that specifies the *reverse* comparison to perform, e.g. 1759 * for "if-le" you would use "gt". 1760 * 1761 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1762 */ 1763 /* if-cmp vAA, +BBBB */ 1764 mov r0, rINST, lsr #8 @ r0<- AA 1765 GET_VREG(r2, r0) @ r2<- vAA 1766 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1767 cmp r2, #0 @ compare (vA, 0) 1768 beq 1f @ branch to 1 if comparison failed 1769 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1770 movs r9, r9, asl #1 @ convert to bytes, check sign 1771 bmi common_backwardBranch @ backward branch, do periodic checks 17721: 1773#if defined(WITH_JIT) 1774 GET_JIT_PROF_TABLE(r0) 1775 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1776 cmp r0,#0 1777 bne common_updateProfile 1778 GET_INST_OPCODE(ip) @ extract opcode from rINST 1779 GOTO_OPCODE(ip) @ jump to next instruction 1780#else 1781 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1782 GET_INST_OPCODE(ip) @ extract opcode from rINST 1783 GOTO_OPCODE(ip) @ jump to next instruction 1784#endif 1785 1786 1787 1788/* ------------------------------ */ 1789 .balign 64 1790.L_OP_IF_LTZ: /* 0x3a */ 1791/* File: armv5te/OP_IF_LTZ.S */ 1792/* File: armv5te/zcmp.S */ 1793 /* 1794 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1795 * fragment that specifies the *reverse* comparison to perform, e.g. 1796 * for "if-le" you would use "gt". 1797 * 1798 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1799 */ 1800 /* if-cmp vAA, +BBBB */ 1801 mov r0, rINST, lsr #8 @ r0<- AA 1802 GET_VREG(r2, r0) @ r2<- vAA 1803 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1804 cmp r2, #0 @ compare (vA, 0) 1805 bge 1f @ branch to 1 if comparison failed 1806 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1807 movs r9, r9, asl #1 @ convert to bytes, check sign 1808 bmi common_backwardBranch @ backward branch, do periodic checks 18091: 1810#if defined(WITH_JIT) 1811 GET_JIT_PROF_TABLE(r0) 1812 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1813 cmp r0,#0 1814 bne common_updateProfile 1815 GET_INST_OPCODE(ip) @ extract opcode from rINST 1816 GOTO_OPCODE(ip) @ jump to next instruction 1817#else 1818 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1819 GET_INST_OPCODE(ip) @ extract opcode from rINST 1820 GOTO_OPCODE(ip) @ jump to next instruction 1821#endif 1822 1823 1824 1825/* ------------------------------ */ 1826 .balign 64 1827.L_OP_IF_GEZ: /* 0x3b */ 1828/* File: armv5te/OP_IF_GEZ.S */ 1829/* File: armv5te/zcmp.S */ 1830 /* 1831 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1832 * fragment that specifies the *reverse* comparison to perform, e.g. 1833 * for "if-le" you would use "gt". 1834 * 1835 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1836 */ 1837 /* if-cmp vAA, +BBBB */ 1838 mov r0, rINST, lsr #8 @ r0<- AA 1839 GET_VREG(r2, r0) @ r2<- vAA 1840 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1841 cmp r2, #0 @ compare (vA, 0) 1842 blt 1f @ branch to 1 if comparison failed 1843 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1844 movs r9, r9, asl #1 @ convert to bytes, check sign 1845 bmi common_backwardBranch @ backward branch, do periodic checks 18461: 1847#if defined(WITH_JIT) 1848 GET_JIT_PROF_TABLE(r0) 1849 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1850 cmp r0,#0 1851 bne common_updateProfile 1852 GET_INST_OPCODE(ip) @ extract opcode from rINST 1853 GOTO_OPCODE(ip) @ jump to next instruction 1854#else 1855 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1856 GET_INST_OPCODE(ip) @ extract opcode from rINST 1857 GOTO_OPCODE(ip) @ jump to next instruction 1858#endif 1859 1860 1861 1862/* ------------------------------ */ 1863 .balign 64 1864.L_OP_IF_GTZ: /* 0x3c */ 1865/* File: armv5te/OP_IF_GTZ.S */ 1866/* File: armv5te/zcmp.S */ 1867 /* 1868 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1869 * fragment that specifies the *reverse* comparison to perform, e.g. 1870 * for "if-le" you would use "gt". 1871 * 1872 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1873 */ 1874 /* if-cmp vAA, +BBBB */ 1875 mov r0, rINST, lsr #8 @ r0<- AA 1876 GET_VREG(r2, r0) @ r2<- vAA 1877 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1878 cmp r2, #0 @ compare (vA, 0) 1879 ble 1f @ branch to 1 if comparison failed 1880 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1881 movs r9, r9, asl #1 @ convert to bytes, check sign 1882 bmi common_backwardBranch @ backward branch, do periodic checks 18831: 1884#if defined(WITH_JIT) 1885 GET_JIT_PROF_TABLE(r0) 1886 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1887 cmp r0,#0 1888 bne common_updateProfile 1889 GET_INST_OPCODE(ip) @ extract opcode from rINST 1890 GOTO_OPCODE(ip) @ jump to next instruction 1891#else 1892 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1893 GET_INST_OPCODE(ip) @ extract opcode from rINST 1894 GOTO_OPCODE(ip) @ jump to next instruction 1895#endif 1896 1897 1898 1899/* ------------------------------ */ 1900 .balign 64 1901.L_OP_IF_LEZ: /* 0x3d */ 1902/* File: armv5te/OP_IF_LEZ.S */ 1903/* File: armv5te/zcmp.S */ 1904 /* 1905 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1906 * fragment that specifies the *reverse* comparison to perform, e.g. 1907 * for "if-le" you would use "gt". 1908 * 1909 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1910 */ 1911 /* if-cmp vAA, +BBBB */ 1912 mov r0, rINST, lsr #8 @ r0<- AA 1913 GET_VREG(r2, r0) @ r2<- vAA 1914 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1915 cmp r2, #0 @ compare (vA, 0) 1916 bgt 1f @ branch to 1 if comparison failed 1917 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1918 movs r9, r9, asl #1 @ convert to bytes, check sign 1919 bmi common_backwardBranch @ backward branch, do periodic checks 19201: 1921#if defined(WITH_JIT) 1922 GET_JIT_PROF_TABLE(r0) 1923 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1924 cmp r0,#0 1925 bne common_updateProfile 1926 GET_INST_OPCODE(ip) @ extract opcode from rINST 1927 GOTO_OPCODE(ip) @ jump to next instruction 1928#else 1929 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1930 GET_INST_OPCODE(ip) @ extract opcode from rINST 1931 GOTO_OPCODE(ip) @ jump to next instruction 1932#endif 1933 1934 1935 1936/* ------------------------------ */ 1937 .balign 64 1938.L_OP_UNUSED_3E: /* 0x3e */ 1939/* File: armv5te/OP_UNUSED_3E.S */ 1940/* File: armv5te/unused.S */ 1941 bl common_abort 1942 1943 1944 1945/* ------------------------------ */ 1946 .balign 64 1947.L_OP_UNUSED_3F: /* 0x3f */ 1948/* File: armv5te/OP_UNUSED_3F.S */ 1949/* File: armv5te/unused.S */ 1950 bl common_abort 1951 1952 1953 1954/* ------------------------------ */ 1955 .balign 64 1956.L_OP_UNUSED_40: /* 0x40 */ 1957/* File: armv5te/OP_UNUSED_40.S */ 1958/* File: armv5te/unused.S */ 1959 bl common_abort 1960 1961 1962 1963/* ------------------------------ */ 1964 .balign 64 1965.L_OP_UNUSED_41: /* 0x41 */ 1966/* File: armv5te/OP_UNUSED_41.S */ 1967/* File: armv5te/unused.S */ 1968 bl common_abort 1969 1970 1971 1972/* ------------------------------ */ 1973 .balign 64 1974.L_OP_UNUSED_42: /* 0x42 */ 1975/* File: armv5te/OP_UNUSED_42.S */ 1976/* File: armv5te/unused.S */ 1977 bl common_abort 1978 1979 1980 1981/* ------------------------------ */ 1982 .balign 64 1983.L_OP_UNUSED_43: /* 0x43 */ 1984/* File: armv5te/OP_UNUSED_43.S */ 1985/* File: armv5te/unused.S */ 1986 bl common_abort 1987 1988 1989 1990/* ------------------------------ */ 1991 .balign 64 1992.L_OP_AGET: /* 0x44 */ 1993/* File: armv5te/OP_AGET.S */ 1994 /* 1995 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1996 * 1997 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1998 * instructions. We use a pair of FETCH_Bs instead. 1999 * 2000 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2001 */ 2002 /* op vAA, vBB, vCC */ 2003 FETCH_B(r2, 1, 0) @ r2<- BB 2004 mov r9, rINST, lsr #8 @ r9<- AA 2005 FETCH_B(r3, 1, 1) @ r3<- CC 2006 GET_VREG(r0, r2) @ r0<- vBB (array object) 2007 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2008 cmp r0, #0 @ null array object? 2009 beq common_errNullObject @ yes, bail 2010 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2011 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2012 cmp r1, r3 @ compare unsigned index, length 2013 bcs common_errArrayIndex @ index >= length, bail 2014 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2015 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2016 GET_INST_OPCODE(ip) @ extract opcode from rINST 2017 SET_VREG(r2, r9) @ vAA<- r2 2018 GOTO_OPCODE(ip) @ jump to next instruction 2019 2020 2021/* ------------------------------ */ 2022 .balign 64 2023.L_OP_AGET_WIDE: /* 0x45 */ 2024/* File: armv5te/OP_AGET_WIDE.S */ 2025 /* 2026 * Array get, 64 bits. vAA <- vBB[vCC]. 2027 * 2028 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2029 */ 2030 /* aget-wide vAA, vBB, vCC */ 2031 FETCH(r0, 1) @ r0<- CCBB 2032 mov r9, rINST, lsr #8 @ r9<- AA 2033 and r2, r0, #255 @ r2<- BB 2034 mov r3, r0, lsr #8 @ r3<- CC 2035 GET_VREG(r0, r2) @ r0<- vBB (array object) 2036 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2037 cmp r0, #0 @ null array object? 2038 beq common_errNullObject @ yes, bail 2039 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2040 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2041 cmp r1, r3 @ compare unsigned index, length 2042 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2043 b common_errArrayIndex @ index >= length, bail 2044 @ May want to swap the order of these two branches depending on how the 2045 @ branch prediction (if any) handles conditional forward branches vs. 2046 @ unconditional forward branches. 2047 2048/* ------------------------------ */ 2049 .balign 64 2050.L_OP_AGET_OBJECT: /* 0x46 */ 2051/* File: armv5te/OP_AGET_OBJECT.S */ 2052/* File: armv5te/OP_AGET.S */ 2053 /* 2054 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2055 * 2056 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2057 * instructions. We use a pair of FETCH_Bs instead. 2058 * 2059 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2060 */ 2061 /* op vAA, vBB, vCC */ 2062 FETCH_B(r2, 1, 0) @ r2<- BB 2063 mov r9, rINST, lsr #8 @ r9<- AA 2064 FETCH_B(r3, 1, 1) @ r3<- CC 2065 GET_VREG(r0, r2) @ r0<- vBB (array object) 2066 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2067 cmp r0, #0 @ null array object? 2068 beq common_errNullObject @ yes, bail 2069 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2070 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2071 cmp r1, r3 @ compare unsigned index, length 2072 bcs common_errArrayIndex @ index >= length, bail 2073 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2074 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2075 GET_INST_OPCODE(ip) @ extract opcode from rINST 2076 SET_VREG(r2, r9) @ vAA<- r2 2077 GOTO_OPCODE(ip) @ jump to next instruction 2078 2079 2080 2081/* ------------------------------ */ 2082 .balign 64 2083.L_OP_AGET_BOOLEAN: /* 0x47 */ 2084/* File: armv5te/OP_AGET_BOOLEAN.S */ 2085/* File: armv5te/OP_AGET.S */ 2086 /* 2087 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2088 * 2089 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2090 * instructions. We use a pair of FETCH_Bs instead. 2091 * 2092 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2093 */ 2094 /* op vAA, vBB, vCC */ 2095 FETCH_B(r2, 1, 0) @ r2<- BB 2096 mov r9, rINST, lsr #8 @ r9<- AA 2097 FETCH_B(r3, 1, 1) @ r3<- CC 2098 GET_VREG(r0, r2) @ r0<- vBB (array object) 2099 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2100 cmp r0, #0 @ null array object? 2101 beq common_errNullObject @ yes, bail 2102 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2103 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2104 cmp r1, r3 @ compare unsigned index, length 2105 bcs common_errArrayIndex @ index >= length, bail 2106 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2107 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2108 GET_INST_OPCODE(ip) @ extract opcode from rINST 2109 SET_VREG(r2, r9) @ vAA<- r2 2110 GOTO_OPCODE(ip) @ jump to next instruction 2111 2112 2113 2114/* ------------------------------ */ 2115 .balign 64 2116.L_OP_AGET_BYTE: /* 0x48 */ 2117/* File: armv5te/OP_AGET_BYTE.S */ 2118/* File: armv5te/OP_AGET.S */ 2119 /* 2120 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2121 * 2122 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2123 * instructions. We use a pair of FETCH_Bs instead. 2124 * 2125 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2126 */ 2127 /* op vAA, vBB, vCC */ 2128 FETCH_B(r2, 1, 0) @ r2<- BB 2129 mov r9, rINST, lsr #8 @ r9<- AA 2130 FETCH_B(r3, 1, 1) @ r3<- CC 2131 GET_VREG(r0, r2) @ r0<- vBB (array object) 2132 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2133 cmp r0, #0 @ null array object? 2134 beq common_errNullObject @ yes, bail 2135 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2136 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2137 cmp r1, r3 @ compare unsigned index, length 2138 bcs common_errArrayIndex @ index >= length, bail 2139 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2140 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2141 GET_INST_OPCODE(ip) @ extract opcode from rINST 2142 SET_VREG(r2, r9) @ vAA<- r2 2143 GOTO_OPCODE(ip) @ jump to next instruction 2144 2145 2146 2147/* ------------------------------ */ 2148 .balign 64 2149.L_OP_AGET_CHAR: /* 0x49 */ 2150/* File: armv5te/OP_AGET_CHAR.S */ 2151/* File: armv5te/OP_AGET.S */ 2152 /* 2153 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2154 * 2155 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2156 * instructions. We use a pair of FETCH_Bs instead. 2157 * 2158 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2159 */ 2160 /* op vAA, vBB, vCC */ 2161 FETCH_B(r2, 1, 0) @ r2<- BB 2162 mov r9, rINST, lsr #8 @ r9<- AA 2163 FETCH_B(r3, 1, 1) @ r3<- CC 2164 GET_VREG(r0, r2) @ r0<- vBB (array object) 2165 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2166 cmp r0, #0 @ null array object? 2167 beq common_errNullObject @ yes, bail 2168 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2169 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2170 cmp r1, r3 @ compare unsigned index, length 2171 bcs common_errArrayIndex @ index >= length, bail 2172 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2173 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2174 GET_INST_OPCODE(ip) @ extract opcode from rINST 2175 SET_VREG(r2, r9) @ vAA<- r2 2176 GOTO_OPCODE(ip) @ jump to next instruction 2177 2178 2179 2180/* ------------------------------ */ 2181 .balign 64 2182.L_OP_AGET_SHORT: /* 0x4a */ 2183/* File: armv5te/OP_AGET_SHORT.S */ 2184/* File: armv5te/OP_AGET.S */ 2185 /* 2186 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2187 * 2188 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2189 * instructions. We use a pair of FETCH_Bs instead. 2190 * 2191 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2192 */ 2193 /* op vAA, vBB, vCC */ 2194 FETCH_B(r2, 1, 0) @ r2<- BB 2195 mov r9, rINST, lsr #8 @ r9<- AA 2196 FETCH_B(r3, 1, 1) @ r3<- CC 2197 GET_VREG(r0, r2) @ r0<- vBB (array object) 2198 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2199 cmp r0, #0 @ null array object? 2200 beq common_errNullObject @ yes, bail 2201 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2202 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2203 cmp r1, r3 @ compare unsigned index, length 2204 bcs common_errArrayIndex @ index >= length, bail 2205 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2206 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2207 GET_INST_OPCODE(ip) @ extract opcode from rINST 2208 SET_VREG(r2, r9) @ vAA<- r2 2209 GOTO_OPCODE(ip) @ jump to next instruction 2210 2211 2212 2213/* ------------------------------ */ 2214 .balign 64 2215.L_OP_APUT: /* 0x4b */ 2216/* File: armv5te/OP_APUT.S */ 2217 /* 2218 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2219 * 2220 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2221 * instructions. We use a pair of FETCH_Bs instead. 2222 * 2223 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2224 */ 2225 /* op vAA, vBB, vCC */ 2226 FETCH_B(r2, 1, 0) @ r2<- BB 2227 mov r9, rINST, lsr #8 @ r9<- AA 2228 FETCH_B(r3, 1, 1) @ r3<- CC 2229 GET_VREG(r0, r2) @ r0<- vBB (array object) 2230 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2231 cmp r0, #0 @ null array object? 2232 beq common_errNullObject @ yes, bail 2233 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2234 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2235 cmp r1, r3 @ compare unsigned index, length 2236 bcs common_errArrayIndex @ index >= length, bail 2237 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2238 GET_VREG(r2, r9) @ r2<- vAA 2239 GET_INST_OPCODE(ip) @ extract opcode from rINST 2240 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2241 GOTO_OPCODE(ip) @ jump to next instruction 2242 2243 2244/* ------------------------------ */ 2245 .balign 64 2246.L_OP_APUT_WIDE: /* 0x4c */ 2247/* File: armv5te/OP_APUT_WIDE.S */ 2248 /* 2249 * Array put, 64 bits. vBB[vCC] <- vAA. 2250 * 2251 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2252 */ 2253 /* aput-wide vAA, vBB, vCC */ 2254 FETCH(r0, 1) @ r0<- CCBB 2255 mov r9, rINST, lsr #8 @ r9<- AA 2256 and r2, r0, #255 @ r2<- BB 2257 mov r3, r0, lsr #8 @ r3<- CC 2258 GET_VREG(r0, r2) @ r0<- vBB (array object) 2259 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2260 cmp r0, #0 @ null array object? 2261 beq common_errNullObject @ yes, bail 2262 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2263 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2264 cmp r1, r3 @ compare unsigned index, length 2265 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2266 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2267 b common_errArrayIndex @ index >= length, bail 2268 @ May want to swap the order of these two branches depending on how the 2269 @ branch prediction (if any) handles conditional forward branches vs. 2270 @ unconditional forward branches. 2271 2272/* ------------------------------ */ 2273 .balign 64 2274.L_OP_APUT_OBJECT: /* 0x4d */ 2275/* File: armv5te/OP_APUT_OBJECT.S */ 2276 /* 2277 * Store an object into an array. vBB[vCC] <- vAA. 2278 * 2279 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2280 * instructions. We use a pair of FETCH_Bs instead. 2281 */ 2282 /* op vAA, vBB, vCC */ 2283 FETCH(r0, 1) @ r0<- CCBB 2284 mov r9, rINST, lsr #8 @ r9<- AA 2285 and r2, r0, #255 @ r2<- BB 2286 mov r3, r0, lsr #8 @ r3<- CC 2287 GET_VREG(r1, r2) @ r1<- vBB (array object) 2288 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2289 cmp r1, #0 @ null array object? 2290 GET_VREG(r9, r9) @ r9<- vAA 2291 beq common_errNullObject @ yes, bail 2292 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2293 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2294 cmp r0, r3 @ compare unsigned index, length 2295 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2296 b common_errArrayIndex @ index >= length, bail 2297 2298 2299/* ------------------------------ */ 2300 .balign 64 2301.L_OP_APUT_BOOLEAN: /* 0x4e */ 2302/* File: armv5te/OP_APUT_BOOLEAN.S */ 2303/* File: armv5te/OP_APUT.S */ 2304 /* 2305 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2306 * 2307 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2308 * instructions. We use a pair of FETCH_Bs instead. 2309 * 2310 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2311 */ 2312 /* op vAA, vBB, vCC */ 2313 FETCH_B(r2, 1, 0) @ r2<- BB 2314 mov r9, rINST, lsr #8 @ r9<- AA 2315 FETCH_B(r3, 1, 1) @ r3<- CC 2316 GET_VREG(r0, r2) @ r0<- vBB (array object) 2317 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2318 cmp r0, #0 @ null array object? 2319 beq common_errNullObject @ yes, bail 2320 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2321 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2322 cmp r1, r3 @ compare unsigned index, length 2323 bcs common_errArrayIndex @ index >= length, bail 2324 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2325 GET_VREG(r2, r9) @ r2<- vAA 2326 GET_INST_OPCODE(ip) @ extract opcode from rINST 2327 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2328 GOTO_OPCODE(ip) @ jump to next instruction 2329 2330 2331 2332/* ------------------------------ */ 2333 .balign 64 2334.L_OP_APUT_BYTE: /* 0x4f */ 2335/* File: armv5te/OP_APUT_BYTE.S */ 2336/* File: armv5te/OP_APUT.S */ 2337 /* 2338 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2339 * 2340 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2341 * instructions. We use a pair of FETCH_Bs instead. 2342 * 2343 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2344 */ 2345 /* op vAA, vBB, vCC */ 2346 FETCH_B(r2, 1, 0) @ r2<- BB 2347 mov r9, rINST, lsr #8 @ r9<- AA 2348 FETCH_B(r3, 1, 1) @ r3<- CC 2349 GET_VREG(r0, r2) @ r0<- vBB (array object) 2350 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2351 cmp r0, #0 @ null array object? 2352 beq common_errNullObject @ yes, bail 2353 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2354 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2355 cmp r1, r3 @ compare unsigned index, length 2356 bcs common_errArrayIndex @ index >= length, bail 2357 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2358 GET_VREG(r2, r9) @ r2<- vAA 2359 GET_INST_OPCODE(ip) @ extract opcode from rINST 2360 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2361 GOTO_OPCODE(ip) @ jump to next instruction 2362 2363 2364 2365/* ------------------------------ */ 2366 .balign 64 2367.L_OP_APUT_CHAR: /* 0x50 */ 2368/* File: armv5te/OP_APUT_CHAR.S */ 2369/* File: armv5te/OP_APUT.S */ 2370 /* 2371 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2372 * 2373 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2374 * instructions. We use a pair of FETCH_Bs instead. 2375 * 2376 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2377 */ 2378 /* op vAA, vBB, vCC */ 2379 FETCH_B(r2, 1, 0) @ r2<- BB 2380 mov r9, rINST, lsr #8 @ r9<- AA 2381 FETCH_B(r3, 1, 1) @ r3<- CC 2382 GET_VREG(r0, r2) @ r0<- vBB (array object) 2383 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2384 cmp r0, #0 @ null array object? 2385 beq common_errNullObject @ yes, bail 2386 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2387 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2388 cmp r1, r3 @ compare unsigned index, length 2389 bcs common_errArrayIndex @ index >= length, bail 2390 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2391 GET_VREG(r2, r9) @ r2<- vAA 2392 GET_INST_OPCODE(ip) @ extract opcode from rINST 2393 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2394 GOTO_OPCODE(ip) @ jump to next instruction 2395 2396 2397 2398/* ------------------------------ */ 2399 .balign 64 2400.L_OP_APUT_SHORT: /* 0x51 */ 2401/* File: armv5te/OP_APUT_SHORT.S */ 2402/* File: armv5te/OP_APUT.S */ 2403 /* 2404 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2405 * 2406 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2407 * instructions. We use a pair of FETCH_Bs instead. 2408 * 2409 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2410 */ 2411 /* op vAA, vBB, vCC */ 2412 FETCH_B(r2, 1, 0) @ r2<- BB 2413 mov r9, rINST, lsr #8 @ r9<- AA 2414 FETCH_B(r3, 1, 1) @ r3<- CC 2415 GET_VREG(r0, r2) @ r0<- vBB (array object) 2416 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2417 cmp r0, #0 @ null array object? 2418 beq common_errNullObject @ yes, bail 2419 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2420 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2421 cmp r1, r3 @ compare unsigned index, length 2422 bcs common_errArrayIndex @ index >= length, bail 2423 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2424 GET_VREG(r2, r9) @ r2<- vAA 2425 GET_INST_OPCODE(ip) @ extract opcode from rINST 2426 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2427 GOTO_OPCODE(ip) @ jump to next instruction 2428 2429 2430 2431/* ------------------------------ */ 2432 .balign 64 2433.L_OP_IGET: /* 0x52 */ 2434/* File: armv6t2/OP_IGET.S */ 2435 /* 2436 * General 32-bit instance field get. 2437 * 2438 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2439 */ 2440 /* op vA, vB, field@CCCC */ 2441 mov r0, rINST, lsr #12 @ r0<- B 2442 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2443 FETCH(r1, 1) @ r1<- field ref CCCC 2444 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2445 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2446 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2447 cmp r0, #0 @ is resolved entry null? 2448 bne .LOP_IGET_finish @ no, already resolved 24498: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2450 EXPORT_PC() @ resolve() could throw 2451 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2452 bl dvmResolveInstField @ r0<- resolved InstField ptr 2453 cmp r0, #0 2454 bne .LOP_IGET_finish 2455 b common_exceptionThrown 2456 2457/* ------------------------------ */ 2458 .balign 64 2459.L_OP_IGET_WIDE: /* 0x53 */ 2460/* File: armv6t2/OP_IGET_WIDE.S */ 2461 /* 2462 * Wide 32-bit instance field get. 2463 */ 2464 /* iget-wide vA, vB, field@CCCC */ 2465 mov r0, rINST, lsr #12 @ r0<- B 2466 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2467 FETCH(r1, 1) @ r1<- field ref CCCC 2468 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2469 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2470 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2471 cmp r0, #0 @ is resolved entry null? 2472 bne .LOP_IGET_WIDE_finish @ no, already resolved 24738: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2474 EXPORT_PC() @ resolve() could throw 2475 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2476 bl dvmResolveInstField @ r0<- resolved InstField ptr 2477 cmp r0, #0 2478 bne .LOP_IGET_WIDE_finish 2479 b common_exceptionThrown 2480 2481/* ------------------------------ */ 2482 .balign 64 2483.L_OP_IGET_OBJECT: /* 0x54 */ 2484/* File: armv5te/OP_IGET_OBJECT.S */ 2485/* File: armv5te/OP_IGET.S */ 2486 /* 2487 * General 32-bit instance field get. 2488 * 2489 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2490 */ 2491 /* op vA, vB, field@CCCC */ 2492 mov r0, rINST, lsr #12 @ r0<- B 2493 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2494 FETCH(r1, 1) @ r1<- field ref CCCC 2495 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2496 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2497 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2498 cmp r0, #0 @ is resolved entry null? 2499 bne .LOP_IGET_OBJECT_finish @ no, already resolved 25008: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2501 EXPORT_PC() @ resolve() could throw 2502 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2503 bl dvmResolveInstField @ r0<- resolved InstField ptr 2504 cmp r0, #0 2505 bne .LOP_IGET_OBJECT_finish 2506 b common_exceptionThrown 2507 2508 2509/* ------------------------------ */ 2510 .balign 64 2511.L_OP_IGET_BOOLEAN: /* 0x55 */ 2512/* File: armv5te/OP_IGET_BOOLEAN.S */ 2513@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2514/* File: armv5te/OP_IGET.S */ 2515 /* 2516 * General 32-bit instance field get. 2517 * 2518 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2519 */ 2520 /* op vA, vB, field@CCCC */ 2521 mov r0, rINST, lsr #12 @ r0<- B 2522 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2523 FETCH(r1, 1) @ r1<- field ref CCCC 2524 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2525 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2526 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2527 cmp r0, #0 @ is resolved entry null? 2528 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25298: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2530 EXPORT_PC() @ resolve() could throw 2531 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2532 bl dvmResolveInstField @ r0<- resolved InstField ptr 2533 cmp r0, #0 2534 bne .LOP_IGET_BOOLEAN_finish 2535 b common_exceptionThrown 2536 2537 2538/* ------------------------------ */ 2539 .balign 64 2540.L_OP_IGET_BYTE: /* 0x56 */ 2541/* File: armv5te/OP_IGET_BYTE.S */ 2542@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2543/* File: armv5te/OP_IGET.S */ 2544 /* 2545 * General 32-bit instance field get. 2546 * 2547 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2548 */ 2549 /* op vA, vB, field@CCCC */ 2550 mov r0, rINST, lsr #12 @ r0<- B 2551 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2552 FETCH(r1, 1) @ r1<- field ref CCCC 2553 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2554 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2555 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2556 cmp r0, #0 @ is resolved entry null? 2557 bne .LOP_IGET_BYTE_finish @ no, already resolved 25588: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2559 EXPORT_PC() @ resolve() could throw 2560 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2561 bl dvmResolveInstField @ r0<- resolved InstField ptr 2562 cmp r0, #0 2563 bne .LOP_IGET_BYTE_finish 2564 b common_exceptionThrown 2565 2566 2567/* ------------------------------ */ 2568 .balign 64 2569.L_OP_IGET_CHAR: /* 0x57 */ 2570/* File: armv5te/OP_IGET_CHAR.S */ 2571@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2572/* File: armv5te/OP_IGET.S */ 2573 /* 2574 * General 32-bit instance field get. 2575 * 2576 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2577 */ 2578 /* op vA, vB, field@CCCC */ 2579 mov r0, rINST, lsr #12 @ r0<- B 2580 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2581 FETCH(r1, 1) @ r1<- field ref CCCC 2582 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2583 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2584 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2585 cmp r0, #0 @ is resolved entry null? 2586 bne .LOP_IGET_CHAR_finish @ no, already resolved 25878: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2588 EXPORT_PC() @ resolve() could throw 2589 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2590 bl dvmResolveInstField @ r0<- resolved InstField ptr 2591 cmp r0, #0 2592 bne .LOP_IGET_CHAR_finish 2593 b common_exceptionThrown 2594 2595 2596/* ------------------------------ */ 2597 .balign 64 2598.L_OP_IGET_SHORT: /* 0x58 */ 2599/* File: armv5te/OP_IGET_SHORT.S */ 2600@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2601/* File: armv5te/OP_IGET.S */ 2602 /* 2603 * General 32-bit instance field get. 2604 * 2605 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2606 */ 2607 /* op vA, vB, field@CCCC */ 2608 mov r0, rINST, lsr #12 @ r0<- B 2609 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2610 FETCH(r1, 1) @ r1<- field ref CCCC 2611 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2612 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2613 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2614 cmp r0, #0 @ is resolved entry null? 2615 bne .LOP_IGET_SHORT_finish @ no, already resolved 26168: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2617 EXPORT_PC() @ resolve() could throw 2618 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2619 bl dvmResolveInstField @ r0<- resolved InstField ptr 2620 cmp r0, #0 2621 bne .LOP_IGET_SHORT_finish 2622 b common_exceptionThrown 2623 2624 2625/* ------------------------------ */ 2626 .balign 64 2627.L_OP_IPUT: /* 0x59 */ 2628/* File: armv6t2/OP_IPUT.S */ 2629 /* 2630 * General 32-bit instance field put. 2631 * 2632 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2633 */ 2634 /* op vA, vB, field@CCCC */ 2635 mov r0, rINST, lsr #12 @ r0<- B 2636 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2637 FETCH(r1, 1) @ r1<- field ref CCCC 2638 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2639 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2640 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2641 cmp r0, #0 @ is resolved entry null? 2642 bne .LOP_IPUT_finish @ no, already resolved 26438: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2644 EXPORT_PC() @ resolve() could throw 2645 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2646 bl dvmResolveInstField @ r0<- resolved InstField ptr 2647 cmp r0, #0 @ success? 2648 bne .LOP_IPUT_finish @ yes, finish up 2649 b common_exceptionThrown 2650 2651/* ------------------------------ */ 2652 .balign 64 2653.L_OP_IPUT_WIDE: /* 0x5a */ 2654/* File: armv6t2/OP_IPUT_WIDE.S */ 2655 /* iput-wide vA, vB, field@CCCC */ 2656 mov r0, rINST, lsr #12 @ r0<- B 2657 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2658 FETCH(r1, 1) @ r1<- field ref CCCC 2659 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2660 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2661 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2662 cmp r0, #0 @ is resolved entry null? 2663 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26648: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2665 EXPORT_PC() @ resolve() could throw 2666 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2667 bl dvmResolveInstField @ r0<- resolved InstField ptr 2668 cmp r0, #0 @ success? 2669 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2670 b common_exceptionThrown 2671 2672/* ------------------------------ */ 2673 .balign 64 2674.L_OP_IPUT_OBJECT: /* 0x5b */ 2675/* File: armv5te/OP_IPUT_OBJECT.S */ 2676/* File: armv5te/OP_IPUT.S */ 2677 /* 2678 * General 32-bit instance field put. 2679 * 2680 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2681 */ 2682 /* op vA, vB, field@CCCC */ 2683 mov r0, rINST, lsr #12 @ r0<- B 2684 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2685 FETCH(r1, 1) @ r1<- field ref CCCC 2686 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2687 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2688 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2689 cmp r0, #0 @ is resolved entry null? 2690 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26918: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2692 EXPORT_PC() @ resolve() could throw 2693 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2694 bl dvmResolveInstField @ r0<- resolved InstField ptr 2695 cmp r0, #0 @ success? 2696 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2697 b common_exceptionThrown 2698 2699 2700/* ------------------------------ */ 2701 .balign 64 2702.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2703/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2704@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2705/* File: armv5te/OP_IPUT.S */ 2706 /* 2707 * General 32-bit instance field put. 2708 * 2709 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2710 */ 2711 /* op vA, vB, field@CCCC */ 2712 mov r0, rINST, lsr #12 @ r0<- B 2713 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2714 FETCH(r1, 1) @ r1<- field ref CCCC 2715 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2716 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2717 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2718 cmp r0, #0 @ is resolved entry null? 2719 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27208: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2721 EXPORT_PC() @ resolve() could throw 2722 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2723 bl dvmResolveInstField @ r0<- resolved InstField ptr 2724 cmp r0, #0 @ success? 2725 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2726 b common_exceptionThrown 2727 2728 2729/* ------------------------------ */ 2730 .balign 64 2731.L_OP_IPUT_BYTE: /* 0x5d */ 2732/* File: armv5te/OP_IPUT_BYTE.S */ 2733@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2734/* File: armv5te/OP_IPUT.S */ 2735 /* 2736 * General 32-bit instance field put. 2737 * 2738 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2739 */ 2740 /* op vA, vB, field@CCCC */ 2741 mov r0, rINST, lsr #12 @ r0<- B 2742 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2743 FETCH(r1, 1) @ r1<- field ref CCCC 2744 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2745 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2746 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2747 cmp r0, #0 @ is resolved entry null? 2748 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27498: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2750 EXPORT_PC() @ resolve() could throw 2751 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2752 bl dvmResolveInstField @ r0<- resolved InstField ptr 2753 cmp r0, #0 @ success? 2754 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2755 b common_exceptionThrown 2756 2757 2758/* ------------------------------ */ 2759 .balign 64 2760.L_OP_IPUT_CHAR: /* 0x5e */ 2761/* File: armv5te/OP_IPUT_CHAR.S */ 2762@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2763/* File: armv5te/OP_IPUT.S */ 2764 /* 2765 * General 32-bit instance field put. 2766 * 2767 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2768 */ 2769 /* op vA, vB, field@CCCC */ 2770 mov r0, rINST, lsr #12 @ r0<- B 2771 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2772 FETCH(r1, 1) @ r1<- field ref CCCC 2773 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2774 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2775 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2776 cmp r0, #0 @ is resolved entry null? 2777 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27788: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2779 EXPORT_PC() @ resolve() could throw 2780 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2781 bl dvmResolveInstField @ r0<- resolved InstField ptr 2782 cmp r0, #0 @ success? 2783 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2784 b common_exceptionThrown 2785 2786 2787/* ------------------------------ */ 2788 .balign 64 2789.L_OP_IPUT_SHORT: /* 0x5f */ 2790/* File: armv5te/OP_IPUT_SHORT.S */ 2791@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2792/* File: armv5te/OP_IPUT.S */ 2793 /* 2794 * General 32-bit instance field put. 2795 * 2796 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2797 */ 2798 /* op vA, vB, field@CCCC */ 2799 mov r0, rINST, lsr #12 @ r0<- B 2800 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2801 FETCH(r1, 1) @ r1<- field ref CCCC 2802 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2803 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2804 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2805 cmp r0, #0 @ is resolved entry null? 2806 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28078: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2808 EXPORT_PC() @ resolve() could throw 2809 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2810 bl dvmResolveInstField @ r0<- resolved InstField ptr 2811 cmp r0, #0 @ success? 2812 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2813 b common_exceptionThrown 2814 2815 2816/* ------------------------------ */ 2817 .balign 64 2818.L_OP_SGET: /* 0x60 */ 2819/* File: armv5te/OP_SGET.S */ 2820 /* 2821 * General 32-bit SGET handler. 2822 * 2823 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2824 */ 2825 /* op vAA, field@BBBB */ 2826 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2827 FETCH(r1, 1) @ r1<- field ref BBBB 2828 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2829 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2830 cmp r0, #0 @ is resolved entry null? 2831 beq .LOP_SGET_resolve @ yes, do resolve 2832.LOP_SGET_finish: @ field ptr in r0 2833 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2834 mov r2, rINST, lsr #8 @ r2<- AA 2835 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2836 SET_VREG(r1, r2) @ fp[AA]<- r1 2837 GET_INST_OPCODE(ip) @ extract opcode from rINST 2838 GOTO_OPCODE(ip) @ jump to next instruction 2839 2840/* ------------------------------ */ 2841 .balign 64 2842.L_OP_SGET_WIDE: /* 0x61 */ 2843/* File: armv5te/OP_SGET_WIDE.S */ 2844 /* 2845 * 64-bit SGET handler. 2846 */ 2847 /* sget-wide vAA, field@BBBB */ 2848 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2849 FETCH(r1, 1) @ r1<- field ref BBBB 2850 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2851 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2852 cmp r0, #0 @ is resolved entry null? 2853 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2854.LOP_SGET_WIDE_finish: 2855 mov r1, rINST, lsr #8 @ r1<- AA 2856 ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned) 2857 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2858 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2859 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2860 GET_INST_OPCODE(ip) @ extract opcode from rINST 2861 GOTO_OPCODE(ip) @ jump to next instruction 2862 2863/* ------------------------------ */ 2864 .balign 64 2865.L_OP_SGET_OBJECT: /* 0x62 */ 2866/* File: armv5te/OP_SGET_OBJECT.S */ 2867/* File: armv5te/OP_SGET.S */ 2868 /* 2869 * General 32-bit SGET handler. 2870 * 2871 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2872 */ 2873 /* op vAA, field@BBBB */ 2874 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2875 FETCH(r1, 1) @ r1<- field ref BBBB 2876 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2877 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2878 cmp r0, #0 @ is resolved entry null? 2879 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2880.LOP_SGET_OBJECT_finish: @ field ptr in r0 2881 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2882 mov r2, rINST, lsr #8 @ r2<- AA 2883 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2884 SET_VREG(r1, r2) @ fp[AA]<- r1 2885 GET_INST_OPCODE(ip) @ extract opcode from rINST 2886 GOTO_OPCODE(ip) @ jump to next instruction 2887 2888 2889/* ------------------------------ */ 2890 .balign 64 2891.L_OP_SGET_BOOLEAN: /* 0x63 */ 2892/* File: armv5te/OP_SGET_BOOLEAN.S */ 2893/* File: armv5te/OP_SGET.S */ 2894 /* 2895 * General 32-bit SGET handler. 2896 * 2897 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2898 */ 2899 /* op vAA, field@BBBB */ 2900 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2901 FETCH(r1, 1) @ r1<- field ref BBBB 2902 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2903 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2904 cmp r0, #0 @ is resolved entry null? 2905 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2906.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2907 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2908 mov r2, rINST, lsr #8 @ r2<- AA 2909 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2910 SET_VREG(r1, r2) @ fp[AA]<- r1 2911 GET_INST_OPCODE(ip) @ extract opcode from rINST 2912 GOTO_OPCODE(ip) @ jump to next instruction 2913 2914 2915/* ------------------------------ */ 2916 .balign 64 2917.L_OP_SGET_BYTE: /* 0x64 */ 2918/* File: armv5te/OP_SGET_BYTE.S */ 2919/* File: armv5te/OP_SGET.S */ 2920 /* 2921 * General 32-bit SGET handler. 2922 * 2923 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2924 */ 2925 /* op vAA, field@BBBB */ 2926 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2927 FETCH(r1, 1) @ r1<- field ref BBBB 2928 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2929 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2930 cmp r0, #0 @ is resolved entry null? 2931 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2932.LOP_SGET_BYTE_finish: @ field ptr in r0 2933 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2934 mov r2, rINST, lsr #8 @ r2<- AA 2935 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2936 SET_VREG(r1, r2) @ fp[AA]<- r1 2937 GET_INST_OPCODE(ip) @ extract opcode from rINST 2938 GOTO_OPCODE(ip) @ jump to next instruction 2939 2940 2941/* ------------------------------ */ 2942 .balign 64 2943.L_OP_SGET_CHAR: /* 0x65 */ 2944/* File: armv5te/OP_SGET_CHAR.S */ 2945/* File: armv5te/OP_SGET.S */ 2946 /* 2947 * General 32-bit SGET handler. 2948 * 2949 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2950 */ 2951 /* op vAA, field@BBBB */ 2952 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2953 FETCH(r1, 1) @ r1<- field ref BBBB 2954 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2955 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2956 cmp r0, #0 @ is resolved entry null? 2957 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2958.LOP_SGET_CHAR_finish: @ field ptr in r0 2959 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2960 mov r2, rINST, lsr #8 @ r2<- AA 2961 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2962 SET_VREG(r1, r2) @ fp[AA]<- r1 2963 GET_INST_OPCODE(ip) @ extract opcode from rINST 2964 GOTO_OPCODE(ip) @ jump to next instruction 2965 2966 2967/* ------------------------------ */ 2968 .balign 64 2969.L_OP_SGET_SHORT: /* 0x66 */ 2970/* File: armv5te/OP_SGET_SHORT.S */ 2971/* File: armv5te/OP_SGET.S */ 2972 /* 2973 * General 32-bit SGET handler. 2974 * 2975 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2976 */ 2977 /* op vAA, field@BBBB */ 2978 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2979 FETCH(r1, 1) @ r1<- field ref BBBB 2980 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2981 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2982 cmp r0, #0 @ is resolved entry null? 2983 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2984.LOP_SGET_SHORT_finish: @ field ptr in r0 2985 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2986 mov r2, rINST, lsr #8 @ r2<- AA 2987 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2988 SET_VREG(r1, r2) @ fp[AA]<- r1 2989 GET_INST_OPCODE(ip) @ extract opcode from rINST 2990 GOTO_OPCODE(ip) @ jump to next instruction 2991 2992 2993/* ------------------------------ */ 2994 .balign 64 2995.L_OP_SPUT: /* 0x67 */ 2996/* File: armv5te/OP_SPUT.S */ 2997 /* 2998 * General 32-bit SPUT handler. 2999 * 3000 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3001 */ 3002 /* op vAA, field@BBBB */ 3003 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3004 FETCH(r1, 1) @ r1<- field ref BBBB 3005 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3006 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3007 cmp r0, #0 @ is resolved entry null? 3008 beq .LOP_SPUT_resolve @ yes, do resolve 3009.LOP_SPUT_finish: @ field ptr in r0 3010 mov r2, rINST, lsr #8 @ r2<- AA 3011 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3012 GET_VREG(r1, r2) @ r1<- fp[AA] 3013 GET_INST_OPCODE(ip) @ extract opcode from rINST 3014 str r1, [r0, #offStaticField_value] @ field<- vAA 3015 GOTO_OPCODE(ip) @ jump to next instruction 3016 3017/* ------------------------------ */ 3018 .balign 64 3019.L_OP_SPUT_WIDE: /* 0x68 */ 3020/* File: armv5te/OP_SPUT_WIDE.S */ 3021 /* 3022 * 64-bit SPUT handler. 3023 */ 3024 /* sput-wide vAA, field@BBBB */ 3025 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3026 FETCH(r1, 1) @ r1<- field ref BBBB 3027 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3028 mov r9, rINST, lsr #8 @ r9<- AA 3029 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3030 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3031 cmp r0, #0 @ is resolved entry null? 3032 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3033.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3034 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3035 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3036 GET_INST_OPCODE(ip) @ extract opcode from rINST 3037 strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1 3038 GOTO_OPCODE(ip) @ jump to next instruction 3039 3040/* ------------------------------ */ 3041 .balign 64 3042.L_OP_SPUT_OBJECT: /* 0x69 */ 3043/* File: armv5te/OP_SPUT_OBJECT.S */ 3044/* File: armv5te/OP_SPUT.S */ 3045 /* 3046 * General 32-bit SPUT handler. 3047 * 3048 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3049 */ 3050 /* op vAA, field@BBBB */ 3051 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3052 FETCH(r1, 1) @ r1<- field ref BBBB 3053 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3054 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3055 cmp r0, #0 @ is resolved entry null? 3056 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3057.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3058 mov r2, rINST, lsr #8 @ r2<- AA 3059 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3060 GET_VREG(r1, r2) @ r1<- fp[AA] 3061 GET_INST_OPCODE(ip) @ extract opcode from rINST 3062 str r1, [r0, #offStaticField_value] @ field<- vAA 3063 GOTO_OPCODE(ip) @ jump to next instruction 3064 3065 3066/* ------------------------------ */ 3067 .balign 64 3068.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3069/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3070/* File: armv5te/OP_SPUT.S */ 3071 /* 3072 * General 32-bit SPUT handler. 3073 * 3074 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3075 */ 3076 /* op vAA, field@BBBB */ 3077 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3078 FETCH(r1, 1) @ r1<- field ref BBBB 3079 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3080 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3081 cmp r0, #0 @ is resolved entry null? 3082 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3083.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3084 mov r2, rINST, lsr #8 @ r2<- AA 3085 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3086 GET_VREG(r1, r2) @ r1<- fp[AA] 3087 GET_INST_OPCODE(ip) @ extract opcode from rINST 3088 str r1, [r0, #offStaticField_value] @ field<- vAA 3089 GOTO_OPCODE(ip) @ jump to next instruction 3090 3091 3092/* ------------------------------ */ 3093 .balign 64 3094.L_OP_SPUT_BYTE: /* 0x6b */ 3095/* File: armv5te/OP_SPUT_BYTE.S */ 3096/* File: armv5te/OP_SPUT.S */ 3097 /* 3098 * General 32-bit SPUT handler. 3099 * 3100 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3101 */ 3102 /* op vAA, field@BBBB */ 3103 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3104 FETCH(r1, 1) @ r1<- field ref BBBB 3105 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3106 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3107 cmp r0, #0 @ is resolved entry null? 3108 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3109.LOP_SPUT_BYTE_finish: @ field ptr in r0 3110 mov r2, rINST, lsr #8 @ r2<- AA 3111 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3112 GET_VREG(r1, r2) @ r1<- fp[AA] 3113 GET_INST_OPCODE(ip) @ extract opcode from rINST 3114 str r1, [r0, #offStaticField_value] @ field<- vAA 3115 GOTO_OPCODE(ip) @ jump to next instruction 3116 3117 3118/* ------------------------------ */ 3119 .balign 64 3120.L_OP_SPUT_CHAR: /* 0x6c */ 3121/* File: armv5te/OP_SPUT_CHAR.S */ 3122/* File: armv5te/OP_SPUT.S */ 3123 /* 3124 * General 32-bit SPUT handler. 3125 * 3126 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3127 */ 3128 /* op vAA, field@BBBB */ 3129 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3130 FETCH(r1, 1) @ r1<- field ref BBBB 3131 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3132 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3133 cmp r0, #0 @ is resolved entry null? 3134 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3135.LOP_SPUT_CHAR_finish: @ field ptr in r0 3136 mov r2, rINST, lsr #8 @ r2<- AA 3137 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3138 GET_VREG(r1, r2) @ r1<- fp[AA] 3139 GET_INST_OPCODE(ip) @ extract opcode from rINST 3140 str r1, [r0, #offStaticField_value] @ field<- vAA 3141 GOTO_OPCODE(ip) @ jump to next instruction 3142 3143 3144/* ------------------------------ */ 3145 .balign 64 3146.L_OP_SPUT_SHORT: /* 0x6d */ 3147/* File: armv5te/OP_SPUT_SHORT.S */ 3148/* File: armv5te/OP_SPUT.S */ 3149 /* 3150 * General 32-bit SPUT handler. 3151 * 3152 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3153 */ 3154 /* op vAA, field@BBBB */ 3155 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3156 FETCH(r1, 1) @ r1<- field ref BBBB 3157 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3158 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3159 cmp r0, #0 @ is resolved entry null? 3160 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3161.LOP_SPUT_SHORT_finish: @ field ptr in r0 3162 mov r2, rINST, lsr #8 @ r2<- AA 3163 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3164 GET_VREG(r1, r2) @ r1<- fp[AA] 3165 GET_INST_OPCODE(ip) @ extract opcode from rINST 3166 str r1, [r0, #offStaticField_value] @ field<- vAA 3167 GOTO_OPCODE(ip) @ jump to next instruction 3168 3169 3170/* ------------------------------ */ 3171 .balign 64 3172.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3173/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3174 /* 3175 * Handle a virtual method call. 3176 * 3177 * for: invoke-virtual, invoke-virtual/range 3178 */ 3179 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3180 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3181 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3182 FETCH(r1, 1) @ r1<- BBBB 3183 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3184 FETCH(r10, 2) @ r10<- GFED or CCCC 3185 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3186 .if (!0) 3187 and r10, r10, #15 @ r10<- D (or stays CCCC) 3188 .endif 3189 cmp r0, #0 @ already resolved? 3190 EXPORT_PC() @ must export for invoke 3191 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3192 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3193 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3194 mov r2, #METHOD_VIRTUAL @ resolver method type 3195 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3196 cmp r0, #0 @ got null? 3197 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3198 b common_exceptionThrown @ yes, handle exception 3199 3200/* ------------------------------ */ 3201 .balign 64 3202.L_OP_INVOKE_SUPER: /* 0x6f */ 3203/* File: armv5te/OP_INVOKE_SUPER.S */ 3204 /* 3205 * Handle a "super" method call. 3206 * 3207 * for: invoke-super, invoke-super/range 3208 */ 3209 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3210 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3211 FETCH(r10, 2) @ r10<- GFED or CCCC 3212 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3213 .if (!0) 3214 and r10, r10, #15 @ r10<- D (or stays CCCC) 3215 .endif 3216 FETCH(r1, 1) @ r1<- BBBB 3217 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3218 GET_VREG(r2, r10) @ r2<- "this" ptr 3219 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3220 cmp r2, #0 @ null "this"? 3221 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3222 beq common_errNullObject @ null "this", throw exception 3223 cmp r0, #0 @ already resolved? 3224 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3225 EXPORT_PC() @ must export for invoke 3226 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3227 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3228 3229/* ------------------------------ */ 3230 .balign 64 3231.L_OP_INVOKE_DIRECT: /* 0x70 */ 3232/* File: armv5te/OP_INVOKE_DIRECT.S */ 3233 /* 3234 * Handle a direct method call. 3235 * 3236 * (We could defer the "is 'this' pointer null" test to the common 3237 * method invocation code, and use a flag to indicate that static 3238 * calls don't count. If we do this as part of copying the arguments 3239 * out we could avoiding loading the first arg twice.) 3240 * 3241 * for: invoke-direct, invoke-direct/range 3242 */ 3243 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3244 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3245 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3246 FETCH(r1, 1) @ r1<- BBBB 3247 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3248 FETCH(r10, 2) @ r10<- GFED or CCCC 3249 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3250 .if (!0) 3251 and r10, r10, #15 @ r10<- D (or stays CCCC) 3252 .endif 3253 cmp r0, #0 @ already resolved? 3254 EXPORT_PC() @ must export for invoke 3255 GET_VREG(r2, r10) @ r2<- "this" ptr 3256 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3257.LOP_INVOKE_DIRECT_finish: 3258 cmp r2, #0 @ null "this" ref? 3259 bne common_invokeMethodNoRange @ no, continue on 3260 b common_errNullObject @ yes, throw exception 3261 3262/* ------------------------------ */ 3263 .balign 64 3264.L_OP_INVOKE_STATIC: /* 0x71 */ 3265/* File: armv5te/OP_INVOKE_STATIC.S */ 3266 /* 3267 * Handle a static method call. 3268 * 3269 * for: invoke-static, invoke-static/range 3270 */ 3271 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3272 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3273 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3274 FETCH(r1, 1) @ r1<- BBBB 3275 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3276 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3277 cmp r0, #0 @ already resolved? 3278 EXPORT_PC() @ must export for invoke 3279 bne common_invokeMethodNoRange @ yes, continue on 32800: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3281 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3282 mov r2, #METHOD_STATIC @ resolver method type 3283 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3284 cmp r0, #0 @ got null? 3285 bne common_invokeMethodNoRange @ no, continue 3286 b common_exceptionThrown @ yes, handle exception 3287 3288 3289/* ------------------------------ */ 3290 .balign 64 3291.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3292/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3293 /* 3294 * Handle an interface method call. 3295 * 3296 * for: invoke-interface, invoke-interface/range 3297 */ 3298 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3299 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3300 FETCH(r2, 2) @ r2<- FEDC or CCCC 3301 FETCH(r1, 1) @ r1<- BBBB 3302 .if (!0) 3303 and r2, r2, #15 @ r2<- C (or stays CCCC) 3304 .endif 3305 EXPORT_PC() @ must export for invoke 3306 GET_VREG(r0, r2) @ r0<- first arg ("this") 3307 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3308 cmp r0, #0 @ null obj? 3309 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3310 beq common_errNullObject @ yes, fail 3311 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3312 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3313 cmp r0, #0 @ failed? 3314 beq common_exceptionThrown @ yes, handle exception 3315 b common_invokeMethodNoRange @ jump to common handler 3316 3317 3318/* ------------------------------ */ 3319 .balign 64 3320.L_OP_UNUSED_73: /* 0x73 */ 3321/* File: armv5te/OP_UNUSED_73.S */ 3322/* File: armv5te/unused.S */ 3323 bl common_abort 3324 3325 3326 3327/* ------------------------------ */ 3328 .balign 64 3329.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3330/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3331/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3332 /* 3333 * Handle a virtual method call. 3334 * 3335 * for: invoke-virtual, invoke-virtual/range 3336 */ 3337 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3338 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3339 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3340 FETCH(r1, 1) @ r1<- BBBB 3341 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3342 FETCH(r10, 2) @ r10<- GFED or CCCC 3343 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3344 .if (!1) 3345 and r10, r10, #15 @ r10<- D (or stays CCCC) 3346 .endif 3347 cmp r0, #0 @ already resolved? 3348 EXPORT_PC() @ must export for invoke 3349 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3350 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3351 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3352 mov r2, #METHOD_VIRTUAL @ resolver method type 3353 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3354 cmp r0, #0 @ got null? 3355 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3356 b common_exceptionThrown @ yes, handle exception 3357 3358 3359/* ------------------------------ */ 3360 .balign 64 3361.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3362/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3363/* File: armv5te/OP_INVOKE_SUPER.S */ 3364 /* 3365 * Handle a "super" method call. 3366 * 3367 * for: invoke-super, invoke-super/range 3368 */ 3369 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3370 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3371 FETCH(r10, 2) @ r10<- GFED or CCCC 3372 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3373 .if (!1) 3374 and r10, r10, #15 @ r10<- D (or stays CCCC) 3375 .endif 3376 FETCH(r1, 1) @ r1<- BBBB 3377 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3378 GET_VREG(r2, r10) @ r2<- "this" ptr 3379 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3380 cmp r2, #0 @ null "this"? 3381 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3382 beq common_errNullObject @ null "this", throw exception 3383 cmp r0, #0 @ already resolved? 3384 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3385 EXPORT_PC() @ must export for invoke 3386 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3387 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3388 3389 3390/* ------------------------------ */ 3391 .balign 64 3392.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3393/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3394/* File: armv5te/OP_INVOKE_DIRECT.S */ 3395 /* 3396 * Handle a direct method call. 3397 * 3398 * (We could defer the "is 'this' pointer null" test to the common 3399 * method invocation code, and use a flag to indicate that static 3400 * calls don't count. If we do this as part of copying the arguments 3401 * out we could avoiding loading the first arg twice.) 3402 * 3403 * for: invoke-direct, invoke-direct/range 3404 */ 3405 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3406 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3407 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3408 FETCH(r1, 1) @ r1<- BBBB 3409 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3410 FETCH(r10, 2) @ r10<- GFED or CCCC 3411 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3412 .if (!1) 3413 and r10, r10, #15 @ r10<- D (or stays CCCC) 3414 .endif 3415 cmp r0, #0 @ already resolved? 3416 EXPORT_PC() @ must export for invoke 3417 GET_VREG(r2, r10) @ r2<- "this" ptr 3418 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3419.LOP_INVOKE_DIRECT_RANGE_finish: 3420 cmp r2, #0 @ null "this" ref? 3421 bne common_invokeMethodRange @ no, continue on 3422 b common_errNullObject @ yes, throw exception 3423 3424 3425/* ------------------------------ */ 3426 .balign 64 3427.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3428/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3429/* File: armv5te/OP_INVOKE_STATIC.S */ 3430 /* 3431 * Handle a static method call. 3432 * 3433 * for: invoke-static, invoke-static/range 3434 */ 3435 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3436 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3437 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3438 FETCH(r1, 1) @ r1<- BBBB 3439 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3440 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3441 cmp r0, #0 @ already resolved? 3442 EXPORT_PC() @ must export for invoke 3443 bne common_invokeMethodRange @ yes, continue on 34440: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3445 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3446 mov r2, #METHOD_STATIC @ resolver method type 3447 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3448 cmp r0, #0 @ got null? 3449 bne common_invokeMethodRange @ no, continue 3450 b common_exceptionThrown @ yes, handle exception 3451 3452 3453 3454/* ------------------------------ */ 3455 .balign 64 3456.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3457/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3458/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3459 /* 3460 * Handle an interface method call. 3461 * 3462 * for: invoke-interface, invoke-interface/range 3463 */ 3464 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3465 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3466 FETCH(r2, 2) @ r2<- FEDC or CCCC 3467 FETCH(r1, 1) @ r1<- BBBB 3468 .if (!1) 3469 and r2, r2, #15 @ r2<- C (or stays CCCC) 3470 .endif 3471 EXPORT_PC() @ must export for invoke 3472 GET_VREG(r0, r2) @ r0<- first arg ("this") 3473 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3474 cmp r0, #0 @ null obj? 3475 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3476 beq common_errNullObject @ yes, fail 3477 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3478 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3479 cmp r0, #0 @ failed? 3480 beq common_exceptionThrown @ yes, handle exception 3481 b common_invokeMethodRange @ jump to common handler 3482 3483 3484 3485/* ------------------------------ */ 3486 .balign 64 3487.L_OP_UNUSED_79: /* 0x79 */ 3488/* File: armv5te/OP_UNUSED_79.S */ 3489/* File: armv5te/unused.S */ 3490 bl common_abort 3491 3492 3493 3494/* ------------------------------ */ 3495 .balign 64 3496.L_OP_UNUSED_7A: /* 0x7a */ 3497/* File: armv5te/OP_UNUSED_7A.S */ 3498/* File: armv5te/unused.S */ 3499 bl common_abort 3500 3501 3502 3503/* ------------------------------ */ 3504 .balign 64 3505.L_OP_NEG_INT: /* 0x7b */ 3506/* File: armv6t2/OP_NEG_INT.S */ 3507/* File: armv6t2/unop.S */ 3508 /* 3509 * Generic 32-bit unary operation. Provide an "instr" line that 3510 * specifies an instruction that performs "result = op r0". 3511 * This could be an ARM instruction or a function call. 3512 * 3513 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3514 * int-to-byte, int-to-char, int-to-short 3515 */ 3516 /* unop vA, vB */ 3517 mov r3, rINST, lsr #12 @ r3<- B 3518 ubfx r9, rINST, #8, #4 @ r9<- A 3519 GET_VREG(r0, r3) @ r0<- vB 3520 @ optional op; may set condition codes 3521 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3522 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3523 GET_INST_OPCODE(ip) @ extract opcode from rINST 3524 SET_VREG(r0, r9) @ vAA<- r0 3525 GOTO_OPCODE(ip) @ jump to next instruction 3526 /* 8-9 instructions */ 3527 3528 3529/* ------------------------------ */ 3530 .balign 64 3531.L_OP_NOT_INT: /* 0x7c */ 3532/* File: armv6t2/OP_NOT_INT.S */ 3533/* File: armv6t2/unop.S */ 3534 /* 3535 * Generic 32-bit unary operation. Provide an "instr" line that 3536 * specifies an instruction that performs "result = op r0". 3537 * This could be an ARM instruction or a function call. 3538 * 3539 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3540 * int-to-byte, int-to-char, int-to-short 3541 */ 3542 /* unop vA, vB */ 3543 mov r3, rINST, lsr #12 @ r3<- B 3544 ubfx r9, rINST, #8, #4 @ r9<- A 3545 GET_VREG(r0, r3) @ r0<- vB 3546 @ optional op; may set condition codes 3547 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3548 mvn r0, r0 @ r0<- op, r0-r3 changed 3549 GET_INST_OPCODE(ip) @ extract opcode from rINST 3550 SET_VREG(r0, r9) @ vAA<- r0 3551 GOTO_OPCODE(ip) @ jump to next instruction 3552 /* 8-9 instructions */ 3553 3554 3555/* ------------------------------ */ 3556 .balign 64 3557.L_OP_NEG_LONG: /* 0x7d */ 3558/* File: armv6t2/OP_NEG_LONG.S */ 3559/* File: armv6t2/unopWide.S */ 3560 /* 3561 * Generic 64-bit unary operation. Provide an "instr" line that 3562 * specifies an instruction that performs "result = op r0/r1". 3563 * This could be an ARM instruction or a function call. 3564 * 3565 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3566 */ 3567 /* unop vA, vB */ 3568 mov r3, rINST, lsr #12 @ r3<- B 3569 ubfx r9, rINST, #8, #4 @ r9<- A 3570 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3571 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3572 ldmia r3, {r0-r1} @ r0/r1<- vAA 3573 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3574 rsbs r0, r0, #0 @ optional op; may set condition codes 3575 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3576 GET_INST_OPCODE(ip) @ extract opcode from rINST 3577 stmia r9, {r0-r1} @ vAA<- r0/r1 3578 GOTO_OPCODE(ip) @ jump to next instruction 3579 /* 10-11 instructions */ 3580 3581 3582 3583/* ------------------------------ */ 3584 .balign 64 3585.L_OP_NOT_LONG: /* 0x7e */ 3586/* File: armv6t2/OP_NOT_LONG.S */ 3587/* File: armv6t2/unopWide.S */ 3588 /* 3589 * Generic 64-bit unary operation. Provide an "instr" line that 3590 * specifies an instruction that performs "result = op r0/r1". 3591 * This could be an ARM instruction or a function call. 3592 * 3593 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3594 */ 3595 /* unop vA, vB */ 3596 mov r3, rINST, lsr #12 @ r3<- B 3597 ubfx r9, rINST, #8, #4 @ r9<- A 3598 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3599 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3600 ldmia r3, {r0-r1} @ r0/r1<- vAA 3601 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3602 mvn r0, r0 @ optional op; may set condition codes 3603 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3604 GET_INST_OPCODE(ip) @ extract opcode from rINST 3605 stmia r9, {r0-r1} @ vAA<- r0/r1 3606 GOTO_OPCODE(ip) @ jump to next instruction 3607 /* 10-11 instructions */ 3608 3609 3610 3611/* ------------------------------ */ 3612 .balign 64 3613.L_OP_NEG_FLOAT: /* 0x7f */ 3614/* File: armv6t2/OP_NEG_FLOAT.S */ 3615/* File: armv6t2/unop.S */ 3616 /* 3617 * Generic 32-bit unary operation. Provide an "instr" line that 3618 * specifies an instruction that performs "result = op r0". 3619 * This could be an ARM instruction or a function call. 3620 * 3621 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3622 * int-to-byte, int-to-char, int-to-short 3623 */ 3624 /* unop vA, vB */ 3625 mov r3, rINST, lsr #12 @ r3<- B 3626 ubfx r9, rINST, #8, #4 @ r9<- A 3627 GET_VREG(r0, r3) @ r0<- vB 3628 @ optional op; may set condition codes 3629 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3630 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3631 GET_INST_OPCODE(ip) @ extract opcode from rINST 3632 SET_VREG(r0, r9) @ vAA<- r0 3633 GOTO_OPCODE(ip) @ jump to next instruction 3634 /* 8-9 instructions */ 3635 3636 3637/* ------------------------------ */ 3638 .balign 64 3639.L_OP_NEG_DOUBLE: /* 0x80 */ 3640/* File: armv6t2/OP_NEG_DOUBLE.S */ 3641/* File: armv6t2/unopWide.S */ 3642 /* 3643 * Generic 64-bit unary operation. Provide an "instr" line that 3644 * specifies an instruction that performs "result = op r0/r1". 3645 * This could be an ARM instruction or a function call. 3646 * 3647 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3648 */ 3649 /* unop vA, vB */ 3650 mov r3, rINST, lsr #12 @ r3<- B 3651 ubfx r9, rINST, #8, #4 @ r9<- A 3652 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3653 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3654 ldmia r3, {r0-r1} @ r0/r1<- vAA 3655 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3656 @ optional op; may set condition codes 3657 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3658 GET_INST_OPCODE(ip) @ extract opcode from rINST 3659 stmia r9, {r0-r1} @ vAA<- r0/r1 3660 GOTO_OPCODE(ip) @ jump to next instruction 3661 /* 10-11 instructions */ 3662 3663 3664 3665/* ------------------------------ */ 3666 .balign 64 3667.L_OP_INT_TO_LONG: /* 0x81 */ 3668/* File: armv6t2/OP_INT_TO_LONG.S */ 3669/* File: armv6t2/unopWider.S */ 3670 /* 3671 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3672 * that specifies an instruction that performs "result = op r0", where 3673 * "result" is a 64-bit quantity in r0/r1. 3674 * 3675 * For: int-to-long, int-to-double, float-to-long, float-to-double 3676 */ 3677 /* unop vA, vB */ 3678 mov r3, rINST, lsr #12 @ r3<- B 3679 ubfx r9, rINST, #8, #4 @ r9<- A 3680 GET_VREG(r0, r3) @ r0<- vB 3681 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3682 @ optional op; may set condition codes 3683 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3684 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3685 GET_INST_OPCODE(ip) @ extract opcode from rINST 3686 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3687 GOTO_OPCODE(ip) @ jump to next instruction 3688 /* 9-10 instructions */ 3689 3690 3691/* ------------------------------ */ 3692 .balign 64 3693.L_OP_INT_TO_FLOAT: /* 0x82 */ 3694/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3695/* File: arm-vfp/funop.S */ 3696 /* 3697 * Generic 32-bit unary floating-point operation. Provide an "instr" 3698 * line that specifies an instruction that performs "s1 = op s0". 3699 * 3700 * for: int-to-float, float-to-int 3701 */ 3702 /* unop vA, vB */ 3703 mov r3, rINST, lsr #12 @ r3<- B 3704 mov r9, rINST, lsr #8 @ r9<- A+ 3705 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3706 flds s0, [r3] @ s0<- vB 3707 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3708 and r9, r9, #15 @ r9<- A 3709 fsitos s1, s0 @ s1<- op 3710 GET_INST_OPCODE(ip) @ extract opcode from rINST 3711 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3712 fsts s1, [r9] @ vA<- s1 3713 GOTO_OPCODE(ip) @ jump to next instruction 3714 3715 3716/* ------------------------------ */ 3717 .balign 64 3718.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3719/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3720/* File: arm-vfp/funopWider.S */ 3721 /* 3722 * Generic 32bit-to-64bit floating point unary operation. Provide an 3723 * "instr" line that specifies an instruction that performs "d0 = op s0". 3724 * 3725 * For: int-to-double, float-to-double 3726 */ 3727 /* unop vA, vB */ 3728 mov r3, rINST, lsr #12 @ r3<- B 3729 mov r9, rINST, lsr #8 @ r9<- A+ 3730 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3731 flds s0, [r3] @ s0<- vB 3732 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3733 and r9, r9, #15 @ r9<- A 3734 fsitod d0, s0 @ d0<- op 3735 GET_INST_OPCODE(ip) @ extract opcode from rINST 3736 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3737 fstd d0, [r9] @ vA<- d0 3738 GOTO_OPCODE(ip) @ jump to next instruction 3739 3740 3741/* ------------------------------ */ 3742 .balign 64 3743.L_OP_LONG_TO_INT: /* 0x84 */ 3744/* File: armv5te/OP_LONG_TO_INT.S */ 3745/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3746/* File: armv5te/OP_MOVE.S */ 3747 /* for move, move-object, long-to-int */ 3748 /* op vA, vB */ 3749 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3750 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3751 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3752 GET_VREG(r2, r1) @ r2<- fp[B] 3753 and r0, r0, #15 3754 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3755 SET_VREG(r2, r0) @ fp[A]<- r2 3756 GOTO_OPCODE(ip) @ execute next instruction 3757 3758 3759 3760/* ------------------------------ */ 3761 .balign 64 3762.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3763/* File: armv6t2/OP_LONG_TO_FLOAT.S */ 3764/* File: armv6t2/unopNarrower.S */ 3765 /* 3766 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3767 * that specifies an instruction that performs "result = op r0/r1", where 3768 * "result" is a 32-bit quantity in r0. 3769 * 3770 * For: long-to-float, double-to-int, double-to-float 3771 * 3772 * (This would work for long-to-int, but that instruction is actually 3773 * an exact match for OP_MOVE.) 3774 */ 3775 /* unop vA, vB */ 3776 mov r3, rINST, lsr #12 @ r3<- B 3777 ubfx r9, rINST, #8, #4 @ r9<- A 3778 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3779 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3780 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3781 @ optional op; may set condition codes 3782 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3783 GET_INST_OPCODE(ip) @ extract opcode from rINST 3784 SET_VREG(r0, r9) @ vA<- r0 3785 GOTO_OPCODE(ip) @ jump to next instruction 3786 /* 9-10 instructions */ 3787 3788 3789/* ------------------------------ */ 3790 .balign 64 3791.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3792/* File: armv6t2/OP_LONG_TO_DOUBLE.S */ 3793/* File: armv6t2/unopWide.S */ 3794 /* 3795 * Generic 64-bit unary operation. Provide an "instr" line that 3796 * specifies an instruction that performs "result = op r0/r1". 3797 * This could be an ARM instruction or a function call. 3798 * 3799 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3800 */ 3801 /* unop vA, vB */ 3802 mov r3, rINST, lsr #12 @ r3<- B 3803 ubfx r9, rINST, #8, #4 @ r9<- A 3804 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3805 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3806 ldmia r3, {r0-r1} @ r0/r1<- vAA 3807 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3808 @ optional op; may set condition codes 3809 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3810 GET_INST_OPCODE(ip) @ extract opcode from rINST 3811 stmia r9, {r0-r1} @ vAA<- r0/r1 3812 GOTO_OPCODE(ip) @ jump to next instruction 3813 /* 10-11 instructions */ 3814 3815 3816 3817/* ------------------------------ */ 3818 .balign 64 3819.L_OP_FLOAT_TO_INT: /* 0x87 */ 3820/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3821/* File: arm-vfp/funop.S */ 3822 /* 3823 * Generic 32-bit unary floating-point operation. Provide an "instr" 3824 * line that specifies an instruction that performs "s1 = op s0". 3825 * 3826 * for: int-to-float, float-to-int 3827 */ 3828 /* unop vA, vB */ 3829 mov r3, rINST, lsr #12 @ r3<- B 3830 mov r9, rINST, lsr #8 @ r9<- A+ 3831 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3832 flds s0, [r3] @ s0<- vB 3833 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3834 and r9, r9, #15 @ r9<- A 3835 ftosizs s1, s0 @ s1<- op 3836 GET_INST_OPCODE(ip) @ extract opcode from rINST 3837 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3838 fsts s1, [r9] @ vA<- s1 3839 GOTO_OPCODE(ip) @ jump to next instruction 3840 3841 3842/* ------------------------------ */ 3843 .balign 64 3844.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3845/* File: armv6t2/OP_FLOAT_TO_LONG.S */ 3846@include "armv6t2/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3847/* File: armv6t2/unopWider.S */ 3848 /* 3849 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3850 * that specifies an instruction that performs "result = op r0", where 3851 * "result" is a 64-bit quantity in r0/r1. 3852 * 3853 * For: int-to-long, int-to-double, float-to-long, float-to-double 3854 */ 3855 /* unop vA, vB */ 3856 mov r3, rINST, lsr #12 @ r3<- B 3857 ubfx r9, rINST, #8, #4 @ r9<- A 3858 GET_VREG(r0, r3) @ r0<- vB 3859 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3860 @ optional op; may set condition codes 3861 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3862 bl f2l_doconv @ r0<- op, r0-r3 changed 3863 GET_INST_OPCODE(ip) @ extract opcode from rINST 3864 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3865 GOTO_OPCODE(ip) @ jump to next instruction 3866 /* 9-10 instructions */ 3867 3868 3869 3870/* ------------------------------ */ 3871 .balign 64 3872.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3873/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3874/* File: arm-vfp/funopWider.S */ 3875 /* 3876 * Generic 32bit-to-64bit floating point unary operation. Provide an 3877 * "instr" line that specifies an instruction that performs "d0 = op s0". 3878 * 3879 * For: int-to-double, float-to-double 3880 */ 3881 /* unop vA, vB */ 3882 mov r3, rINST, lsr #12 @ r3<- B 3883 mov r9, rINST, lsr #8 @ r9<- A+ 3884 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3885 flds s0, [r3] @ s0<- vB 3886 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3887 and r9, r9, #15 @ r9<- A 3888 fcvtds d0, s0 @ d0<- op 3889 GET_INST_OPCODE(ip) @ extract opcode from rINST 3890 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3891 fstd d0, [r9] @ vA<- d0 3892 GOTO_OPCODE(ip) @ jump to next instruction 3893 3894 3895/* ------------------------------ */ 3896 .balign 64 3897.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3898/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3899/* File: arm-vfp/funopNarrower.S */ 3900 /* 3901 * Generic 64bit-to-32bit unary floating point operation. Provide an 3902 * "instr" line that specifies an instruction that performs "s0 = op d0". 3903 * 3904 * For: double-to-int, double-to-float 3905 */ 3906 /* unop vA, vB */ 3907 mov r3, rINST, lsr #12 @ r3<- B 3908 mov r9, rINST, lsr #8 @ r9<- A+ 3909 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3910 fldd d0, [r3] @ d0<- vB 3911 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3912 and r9, r9, #15 @ r9<- A 3913 ftosizd s0, d0 @ s0<- op 3914 GET_INST_OPCODE(ip) @ extract opcode from rINST 3915 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3916 fsts s0, [r9] @ vA<- s0 3917 GOTO_OPCODE(ip) @ jump to next instruction 3918 3919 3920/* ------------------------------ */ 3921 .balign 64 3922.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3923/* File: armv6t2/OP_DOUBLE_TO_LONG.S */ 3924@include "armv6t2/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3925/* File: armv6t2/unopWide.S */ 3926 /* 3927 * Generic 64-bit unary operation. Provide an "instr" line that 3928 * specifies an instruction that performs "result = op r0/r1". 3929 * This could be an ARM instruction or a function call. 3930 * 3931 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3932 */ 3933 /* unop vA, vB */ 3934 mov r3, rINST, lsr #12 @ r3<- B 3935 ubfx r9, rINST, #8, #4 @ r9<- A 3936 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3937 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3938 ldmia r3, {r0-r1} @ r0/r1<- vAA 3939 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3940 @ optional op; may set condition codes 3941 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3942 GET_INST_OPCODE(ip) @ extract opcode from rINST 3943 stmia r9, {r0-r1} @ vAA<- r0/r1 3944 GOTO_OPCODE(ip) @ jump to next instruction 3945 /* 10-11 instructions */ 3946 3947 3948 3949 3950/* ------------------------------ */ 3951 .balign 64 3952.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3953/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3954/* File: arm-vfp/funopNarrower.S */ 3955 /* 3956 * Generic 64bit-to-32bit unary floating point operation. Provide an 3957 * "instr" line that specifies an instruction that performs "s0 = op d0". 3958 * 3959 * For: double-to-int, double-to-float 3960 */ 3961 /* unop vA, vB */ 3962 mov r3, rINST, lsr #12 @ r3<- B 3963 mov r9, rINST, lsr #8 @ r9<- A+ 3964 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3965 fldd d0, [r3] @ d0<- vB 3966 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3967 and r9, r9, #15 @ r9<- A 3968 fcvtsd s0, d0 @ s0<- op 3969 GET_INST_OPCODE(ip) @ extract opcode from rINST 3970 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3971 fsts s0, [r9] @ vA<- s0 3972 GOTO_OPCODE(ip) @ jump to next instruction 3973 3974 3975/* ------------------------------ */ 3976 .balign 64 3977.L_OP_INT_TO_BYTE: /* 0x8d */ 3978/* File: armv6t2/OP_INT_TO_BYTE.S */ 3979/* File: armv6t2/unop.S */ 3980 /* 3981 * Generic 32-bit unary operation. Provide an "instr" line that 3982 * specifies an instruction that performs "result = op r0". 3983 * This could be an ARM instruction or a function call. 3984 * 3985 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3986 * int-to-byte, int-to-char, int-to-short 3987 */ 3988 /* unop vA, vB */ 3989 mov r3, rINST, lsr #12 @ r3<- B 3990 ubfx r9, rINST, #8, #4 @ r9<- A 3991 GET_VREG(r0, r3) @ r0<- vB 3992 @ optional op; may set condition codes 3993 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3994 sxtb r0, r0 @ r0<- op, r0-r3 changed 3995 GET_INST_OPCODE(ip) @ extract opcode from rINST 3996 SET_VREG(r0, r9) @ vAA<- r0 3997 GOTO_OPCODE(ip) @ jump to next instruction 3998 /* 8-9 instructions */ 3999 4000 4001/* ------------------------------ */ 4002 .balign 64 4003.L_OP_INT_TO_CHAR: /* 0x8e */ 4004/* File: armv6t2/OP_INT_TO_CHAR.S */ 4005/* File: armv6t2/unop.S */ 4006 /* 4007 * Generic 32-bit unary operation. Provide an "instr" line that 4008 * specifies an instruction that performs "result = op r0". 4009 * This could be an ARM instruction or a function call. 4010 * 4011 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4012 * int-to-byte, int-to-char, int-to-short 4013 */ 4014 /* unop vA, vB */ 4015 mov r3, rINST, lsr #12 @ r3<- B 4016 ubfx r9, rINST, #8, #4 @ r9<- A 4017 GET_VREG(r0, r3) @ r0<- vB 4018 @ optional op; may set condition codes 4019 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4020 uxth r0, r0 @ r0<- op, r0-r3 changed 4021 GET_INST_OPCODE(ip) @ extract opcode from rINST 4022 SET_VREG(r0, r9) @ vAA<- r0 4023 GOTO_OPCODE(ip) @ jump to next instruction 4024 /* 8-9 instructions */ 4025 4026 4027/* ------------------------------ */ 4028 .balign 64 4029.L_OP_INT_TO_SHORT: /* 0x8f */ 4030/* File: armv6t2/OP_INT_TO_SHORT.S */ 4031/* File: armv6t2/unop.S */ 4032 /* 4033 * Generic 32-bit unary operation. Provide an "instr" line that 4034 * specifies an instruction that performs "result = op r0". 4035 * This could be an ARM instruction or a function call. 4036 * 4037 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4038 * int-to-byte, int-to-char, int-to-short 4039 */ 4040 /* unop vA, vB */ 4041 mov r3, rINST, lsr #12 @ r3<- B 4042 ubfx r9, rINST, #8, #4 @ r9<- A 4043 GET_VREG(r0, r3) @ r0<- vB 4044 @ optional op; may set condition codes 4045 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4046 sxth r0, r0 @ r0<- op, r0-r3 changed 4047 GET_INST_OPCODE(ip) @ extract opcode from rINST 4048 SET_VREG(r0, r9) @ vAA<- r0 4049 GOTO_OPCODE(ip) @ jump to next instruction 4050 /* 8-9 instructions */ 4051 4052 4053/* ------------------------------ */ 4054 .balign 64 4055.L_OP_ADD_INT: /* 0x90 */ 4056/* File: armv5te/OP_ADD_INT.S */ 4057/* File: armv5te/binop.S */ 4058 /* 4059 * Generic 32-bit binary operation. Provide an "instr" line that 4060 * specifies an instruction that performs "result = r0 op r1". 4061 * This could be an ARM instruction or a function call. (If the result 4062 * comes back in a register other than r0, you can override "result".) 4063 * 4064 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4065 * vCC (r1). Useful for integer division and modulus. Note that we 4066 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4067 * handles it correctly. 4068 * 4069 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4070 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4071 * mul-float, div-float, rem-float 4072 */ 4073 /* binop vAA, vBB, vCC */ 4074 FETCH(r0, 1) @ r0<- CCBB 4075 mov r9, rINST, lsr #8 @ r9<- AA 4076 mov r3, r0, lsr #8 @ r3<- CC 4077 and r2, r0, #255 @ r2<- BB 4078 GET_VREG(r1, r3) @ r1<- vCC 4079 GET_VREG(r0, r2) @ r0<- vBB 4080 .if 0 4081 cmp r1, #0 @ is second operand zero? 4082 beq common_errDivideByZero 4083 .endif 4084 4085 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4086 @ optional op; may set condition codes 4087 add r0, r0, r1 @ r0<- op, r0-r3 changed 4088 GET_INST_OPCODE(ip) @ extract opcode from rINST 4089 SET_VREG(r0, r9) @ vAA<- r0 4090 GOTO_OPCODE(ip) @ jump to next instruction 4091 /* 11-14 instructions */ 4092 4093 4094 4095/* ------------------------------ */ 4096 .balign 64 4097.L_OP_SUB_INT: /* 0x91 */ 4098/* File: armv5te/OP_SUB_INT.S */ 4099/* File: armv5te/binop.S */ 4100 /* 4101 * Generic 32-bit binary operation. Provide an "instr" line that 4102 * specifies an instruction that performs "result = r0 op r1". 4103 * This could be an ARM instruction or a function call. (If the result 4104 * comes back in a register other than r0, you can override "result".) 4105 * 4106 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4107 * vCC (r1). Useful for integer division and modulus. Note that we 4108 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4109 * handles it correctly. 4110 * 4111 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4112 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4113 * mul-float, div-float, rem-float 4114 */ 4115 /* binop vAA, vBB, vCC */ 4116 FETCH(r0, 1) @ r0<- CCBB 4117 mov r9, rINST, lsr #8 @ r9<- AA 4118 mov r3, r0, lsr #8 @ r3<- CC 4119 and r2, r0, #255 @ r2<- BB 4120 GET_VREG(r1, r3) @ r1<- vCC 4121 GET_VREG(r0, r2) @ r0<- vBB 4122 .if 0 4123 cmp r1, #0 @ is second operand zero? 4124 beq common_errDivideByZero 4125 .endif 4126 4127 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4128 @ optional op; may set condition codes 4129 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4130 GET_INST_OPCODE(ip) @ extract opcode from rINST 4131 SET_VREG(r0, r9) @ vAA<- r0 4132 GOTO_OPCODE(ip) @ jump to next instruction 4133 /* 11-14 instructions */ 4134 4135 4136 4137/* ------------------------------ */ 4138 .balign 64 4139.L_OP_MUL_INT: /* 0x92 */ 4140/* File: armv5te/OP_MUL_INT.S */ 4141/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4142/* File: armv5te/binop.S */ 4143 /* 4144 * Generic 32-bit binary operation. Provide an "instr" line that 4145 * specifies an instruction that performs "result = r0 op r1". 4146 * This could be an ARM instruction or a function call. (If the result 4147 * comes back in a register other than r0, you can override "result".) 4148 * 4149 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4150 * vCC (r1). Useful for integer division and modulus. Note that we 4151 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4152 * handles it correctly. 4153 * 4154 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4155 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4156 * mul-float, div-float, rem-float 4157 */ 4158 /* binop vAA, vBB, vCC */ 4159 FETCH(r0, 1) @ r0<- CCBB 4160 mov r9, rINST, lsr #8 @ r9<- AA 4161 mov r3, r0, lsr #8 @ r3<- CC 4162 and r2, r0, #255 @ r2<- BB 4163 GET_VREG(r1, r3) @ r1<- vCC 4164 GET_VREG(r0, r2) @ r0<- vBB 4165 .if 0 4166 cmp r1, #0 @ is second operand zero? 4167 beq common_errDivideByZero 4168 .endif 4169 4170 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4171 @ optional op; may set condition codes 4172 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4173 GET_INST_OPCODE(ip) @ extract opcode from rINST 4174 SET_VREG(r0, r9) @ vAA<- r0 4175 GOTO_OPCODE(ip) @ jump to next instruction 4176 /* 11-14 instructions */ 4177 4178 4179 4180/* ------------------------------ */ 4181 .balign 64 4182.L_OP_DIV_INT: /* 0x93 */ 4183/* File: armv5te/OP_DIV_INT.S */ 4184/* File: armv5te/binop.S */ 4185 /* 4186 * Generic 32-bit binary operation. Provide an "instr" line that 4187 * specifies an instruction that performs "result = r0 op r1". 4188 * This could be an ARM instruction or a function call. (If the result 4189 * comes back in a register other than r0, you can override "result".) 4190 * 4191 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4192 * vCC (r1). Useful for integer division and modulus. Note that we 4193 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4194 * handles it correctly. 4195 * 4196 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4197 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4198 * mul-float, div-float, rem-float 4199 */ 4200 /* binop vAA, vBB, vCC */ 4201 FETCH(r0, 1) @ r0<- CCBB 4202 mov r9, rINST, lsr #8 @ r9<- AA 4203 mov r3, r0, lsr #8 @ r3<- CC 4204 and r2, r0, #255 @ r2<- BB 4205 GET_VREG(r1, r3) @ r1<- vCC 4206 GET_VREG(r0, r2) @ r0<- vBB 4207 .if 1 4208 cmp r1, #0 @ is second operand zero? 4209 beq common_errDivideByZero 4210 .endif 4211 4212 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4213 @ optional op; may set condition codes 4214 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4215 GET_INST_OPCODE(ip) @ extract opcode from rINST 4216 SET_VREG(r0, r9) @ vAA<- r0 4217 GOTO_OPCODE(ip) @ jump to next instruction 4218 /* 11-14 instructions */ 4219 4220 4221 4222/* ------------------------------ */ 4223 .balign 64 4224.L_OP_REM_INT: /* 0x94 */ 4225/* File: armv5te/OP_REM_INT.S */ 4226/* idivmod returns quotient in r0 and remainder in r1 */ 4227/* File: armv5te/binop.S */ 4228 /* 4229 * Generic 32-bit binary operation. Provide an "instr" line that 4230 * specifies an instruction that performs "result = r0 op r1". 4231 * This could be an ARM instruction or a function call. (If the result 4232 * comes back in a register other than r0, you can override "result".) 4233 * 4234 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4235 * vCC (r1). Useful for integer division and modulus. Note that we 4236 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4237 * handles it correctly. 4238 * 4239 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4240 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4241 * mul-float, div-float, rem-float 4242 */ 4243 /* binop vAA, vBB, vCC */ 4244 FETCH(r0, 1) @ r0<- CCBB 4245 mov r9, rINST, lsr #8 @ r9<- AA 4246 mov r3, r0, lsr #8 @ r3<- CC 4247 and r2, r0, #255 @ r2<- BB 4248 GET_VREG(r1, r3) @ r1<- vCC 4249 GET_VREG(r0, r2) @ r0<- vBB 4250 .if 1 4251 cmp r1, #0 @ is second operand zero? 4252 beq common_errDivideByZero 4253 .endif 4254 4255 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4256 @ optional op; may set condition codes 4257 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4258 GET_INST_OPCODE(ip) @ extract opcode from rINST 4259 SET_VREG(r1, r9) @ vAA<- r1 4260 GOTO_OPCODE(ip) @ jump to next instruction 4261 /* 11-14 instructions */ 4262 4263 4264 4265/* ------------------------------ */ 4266 .balign 64 4267.L_OP_AND_INT: /* 0x95 */ 4268/* File: armv5te/OP_AND_INT.S */ 4269/* File: armv5te/binop.S */ 4270 /* 4271 * Generic 32-bit binary operation. Provide an "instr" line that 4272 * specifies an instruction that performs "result = r0 op r1". 4273 * This could be an ARM instruction or a function call. (If the result 4274 * comes back in a register other than r0, you can override "result".) 4275 * 4276 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4277 * vCC (r1). Useful for integer division and modulus. Note that we 4278 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4279 * handles it correctly. 4280 * 4281 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4282 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4283 * mul-float, div-float, rem-float 4284 */ 4285 /* binop vAA, vBB, vCC */ 4286 FETCH(r0, 1) @ r0<- CCBB 4287 mov r9, rINST, lsr #8 @ r9<- AA 4288 mov r3, r0, lsr #8 @ r3<- CC 4289 and r2, r0, #255 @ r2<- BB 4290 GET_VREG(r1, r3) @ r1<- vCC 4291 GET_VREG(r0, r2) @ r0<- vBB 4292 .if 0 4293 cmp r1, #0 @ is second operand zero? 4294 beq common_errDivideByZero 4295 .endif 4296 4297 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4298 @ optional op; may set condition codes 4299 and r0, r0, r1 @ r0<- op, r0-r3 changed 4300 GET_INST_OPCODE(ip) @ extract opcode from rINST 4301 SET_VREG(r0, r9) @ vAA<- r0 4302 GOTO_OPCODE(ip) @ jump to next instruction 4303 /* 11-14 instructions */ 4304 4305 4306 4307/* ------------------------------ */ 4308 .balign 64 4309.L_OP_OR_INT: /* 0x96 */ 4310/* File: armv5te/OP_OR_INT.S */ 4311/* File: armv5te/binop.S */ 4312 /* 4313 * Generic 32-bit binary operation. Provide an "instr" line that 4314 * specifies an instruction that performs "result = r0 op r1". 4315 * This could be an ARM instruction or a function call. (If the result 4316 * comes back in a register other than r0, you can override "result".) 4317 * 4318 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4319 * vCC (r1). Useful for integer division and modulus. Note that we 4320 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4321 * handles it correctly. 4322 * 4323 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4324 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4325 * mul-float, div-float, rem-float 4326 */ 4327 /* binop vAA, vBB, vCC */ 4328 FETCH(r0, 1) @ r0<- CCBB 4329 mov r9, rINST, lsr #8 @ r9<- AA 4330 mov r3, r0, lsr #8 @ r3<- CC 4331 and r2, r0, #255 @ r2<- BB 4332 GET_VREG(r1, r3) @ r1<- vCC 4333 GET_VREG(r0, r2) @ r0<- vBB 4334 .if 0 4335 cmp r1, #0 @ is second operand zero? 4336 beq common_errDivideByZero 4337 .endif 4338 4339 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4340 @ optional op; may set condition codes 4341 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4342 GET_INST_OPCODE(ip) @ extract opcode from rINST 4343 SET_VREG(r0, r9) @ vAA<- r0 4344 GOTO_OPCODE(ip) @ jump to next instruction 4345 /* 11-14 instructions */ 4346 4347 4348 4349/* ------------------------------ */ 4350 .balign 64 4351.L_OP_XOR_INT: /* 0x97 */ 4352/* File: armv5te/OP_XOR_INT.S */ 4353/* File: armv5te/binop.S */ 4354 /* 4355 * Generic 32-bit binary operation. Provide an "instr" line that 4356 * specifies an instruction that performs "result = r0 op r1". 4357 * This could be an ARM instruction or a function call. (If the result 4358 * comes back in a register other than r0, you can override "result".) 4359 * 4360 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4361 * vCC (r1). Useful for integer division and modulus. Note that we 4362 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4363 * handles it correctly. 4364 * 4365 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4366 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4367 * mul-float, div-float, rem-float 4368 */ 4369 /* binop vAA, vBB, vCC */ 4370 FETCH(r0, 1) @ r0<- CCBB 4371 mov r9, rINST, lsr #8 @ r9<- AA 4372 mov r3, r0, lsr #8 @ r3<- CC 4373 and r2, r0, #255 @ r2<- BB 4374 GET_VREG(r1, r3) @ r1<- vCC 4375 GET_VREG(r0, r2) @ r0<- vBB 4376 .if 0 4377 cmp r1, #0 @ is second operand zero? 4378 beq common_errDivideByZero 4379 .endif 4380 4381 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4382 @ optional op; may set condition codes 4383 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4384 GET_INST_OPCODE(ip) @ extract opcode from rINST 4385 SET_VREG(r0, r9) @ vAA<- r0 4386 GOTO_OPCODE(ip) @ jump to next instruction 4387 /* 11-14 instructions */ 4388 4389 4390 4391/* ------------------------------ */ 4392 .balign 64 4393.L_OP_SHL_INT: /* 0x98 */ 4394/* File: armv5te/OP_SHL_INT.S */ 4395/* File: armv5te/binop.S */ 4396 /* 4397 * Generic 32-bit binary operation. Provide an "instr" line that 4398 * specifies an instruction that performs "result = r0 op r1". 4399 * This could be an ARM instruction or a function call. (If the result 4400 * comes back in a register other than r0, you can override "result".) 4401 * 4402 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4403 * vCC (r1). Useful for integer division and modulus. Note that we 4404 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4405 * handles it correctly. 4406 * 4407 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4408 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4409 * mul-float, div-float, rem-float 4410 */ 4411 /* binop vAA, vBB, vCC */ 4412 FETCH(r0, 1) @ r0<- CCBB 4413 mov r9, rINST, lsr #8 @ r9<- AA 4414 mov r3, r0, lsr #8 @ r3<- CC 4415 and r2, r0, #255 @ r2<- BB 4416 GET_VREG(r1, r3) @ r1<- vCC 4417 GET_VREG(r0, r2) @ r0<- vBB 4418 .if 0 4419 cmp r1, #0 @ is second operand zero? 4420 beq common_errDivideByZero 4421 .endif 4422 4423 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4424 and r1, r1, #31 @ optional op; may set condition codes 4425 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4426 GET_INST_OPCODE(ip) @ extract opcode from rINST 4427 SET_VREG(r0, r9) @ vAA<- r0 4428 GOTO_OPCODE(ip) @ jump to next instruction 4429 /* 11-14 instructions */ 4430 4431 4432 4433/* ------------------------------ */ 4434 .balign 64 4435.L_OP_SHR_INT: /* 0x99 */ 4436/* File: armv5te/OP_SHR_INT.S */ 4437/* File: armv5te/binop.S */ 4438 /* 4439 * Generic 32-bit binary operation. Provide an "instr" line that 4440 * specifies an instruction that performs "result = r0 op r1". 4441 * This could be an ARM instruction or a function call. (If the result 4442 * comes back in a register other than r0, you can override "result".) 4443 * 4444 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4445 * vCC (r1). Useful for integer division and modulus. Note that we 4446 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4447 * handles it correctly. 4448 * 4449 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4450 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4451 * mul-float, div-float, rem-float 4452 */ 4453 /* binop vAA, vBB, vCC */ 4454 FETCH(r0, 1) @ r0<- CCBB 4455 mov r9, rINST, lsr #8 @ r9<- AA 4456 mov r3, r0, lsr #8 @ r3<- CC 4457 and r2, r0, #255 @ r2<- BB 4458 GET_VREG(r1, r3) @ r1<- vCC 4459 GET_VREG(r0, r2) @ r0<- vBB 4460 .if 0 4461 cmp r1, #0 @ is second operand zero? 4462 beq common_errDivideByZero 4463 .endif 4464 4465 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4466 and r1, r1, #31 @ optional op; may set condition codes 4467 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4468 GET_INST_OPCODE(ip) @ extract opcode from rINST 4469 SET_VREG(r0, r9) @ vAA<- r0 4470 GOTO_OPCODE(ip) @ jump to next instruction 4471 /* 11-14 instructions */ 4472 4473 4474 4475/* ------------------------------ */ 4476 .balign 64 4477.L_OP_USHR_INT: /* 0x9a */ 4478/* File: armv5te/OP_USHR_INT.S */ 4479/* File: armv5te/binop.S */ 4480 /* 4481 * Generic 32-bit binary operation. Provide an "instr" line that 4482 * specifies an instruction that performs "result = r0 op r1". 4483 * This could be an ARM instruction or a function call. (If the result 4484 * comes back in a register other than r0, you can override "result".) 4485 * 4486 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4487 * vCC (r1). Useful for integer division and modulus. Note that we 4488 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4489 * handles it correctly. 4490 * 4491 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4492 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4493 * mul-float, div-float, rem-float 4494 */ 4495 /* binop vAA, vBB, vCC */ 4496 FETCH(r0, 1) @ r0<- CCBB 4497 mov r9, rINST, lsr #8 @ r9<- AA 4498 mov r3, r0, lsr #8 @ r3<- CC 4499 and r2, r0, #255 @ r2<- BB 4500 GET_VREG(r1, r3) @ r1<- vCC 4501 GET_VREG(r0, r2) @ r0<- vBB 4502 .if 0 4503 cmp r1, #0 @ is second operand zero? 4504 beq common_errDivideByZero 4505 .endif 4506 4507 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4508 and r1, r1, #31 @ optional op; may set condition codes 4509 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4510 GET_INST_OPCODE(ip) @ extract opcode from rINST 4511 SET_VREG(r0, r9) @ vAA<- r0 4512 GOTO_OPCODE(ip) @ jump to next instruction 4513 /* 11-14 instructions */ 4514 4515 4516 4517/* ------------------------------ */ 4518 .balign 64 4519.L_OP_ADD_LONG: /* 0x9b */ 4520/* File: armv5te/OP_ADD_LONG.S */ 4521/* File: armv5te/binopWide.S */ 4522 /* 4523 * Generic 64-bit binary operation. Provide an "instr" line that 4524 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4525 * This could be an ARM instruction or a function call. (If the result 4526 * comes back in a register other than r0, you can override "result".) 4527 * 4528 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4529 * vCC (r1). Useful for integer division and modulus. 4530 * 4531 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4532 * xor-long, add-double, sub-double, mul-double, div-double, 4533 * rem-double 4534 * 4535 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4536 */ 4537 /* binop vAA, vBB, vCC */ 4538 FETCH(r0, 1) @ r0<- CCBB 4539 mov r9, rINST, lsr #8 @ r9<- AA 4540 and r2, r0, #255 @ r2<- BB 4541 mov r3, r0, lsr #8 @ r3<- CC 4542 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4543 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4544 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4545 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4546 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4547 .if 0 4548 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4549 beq common_errDivideByZero 4550 .endif 4551 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4552 4553 adds r0, r0, r2 @ optional op; may set condition codes 4554 adc r1, r1, r3 @ result<- op, r0-r3 changed 4555 GET_INST_OPCODE(ip) @ extract opcode from rINST 4556 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4557 GOTO_OPCODE(ip) @ jump to next instruction 4558 /* 14-17 instructions */ 4559 4560 4561 4562/* ------------------------------ */ 4563 .balign 64 4564.L_OP_SUB_LONG: /* 0x9c */ 4565/* File: armv5te/OP_SUB_LONG.S */ 4566/* File: armv5te/binopWide.S */ 4567 /* 4568 * Generic 64-bit binary operation. Provide an "instr" line that 4569 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4570 * This could be an ARM instruction or a function call. (If the result 4571 * comes back in a register other than r0, you can override "result".) 4572 * 4573 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4574 * vCC (r1). Useful for integer division and modulus. 4575 * 4576 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4577 * xor-long, add-double, sub-double, mul-double, div-double, 4578 * rem-double 4579 * 4580 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4581 */ 4582 /* binop vAA, vBB, vCC */ 4583 FETCH(r0, 1) @ r0<- CCBB 4584 mov r9, rINST, lsr #8 @ r9<- AA 4585 and r2, r0, #255 @ r2<- BB 4586 mov r3, r0, lsr #8 @ r3<- CC 4587 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4588 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4589 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4590 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4591 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4592 .if 0 4593 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4594 beq common_errDivideByZero 4595 .endif 4596 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4597 4598 subs r0, r0, r2 @ optional op; may set condition codes 4599 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4600 GET_INST_OPCODE(ip) @ extract opcode from rINST 4601 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4602 GOTO_OPCODE(ip) @ jump to next instruction 4603 /* 14-17 instructions */ 4604 4605 4606 4607/* ------------------------------ */ 4608 .balign 64 4609.L_OP_MUL_LONG: /* 0x9d */ 4610/* File: armv5te/OP_MUL_LONG.S */ 4611 /* 4612 * Signed 64-bit integer multiply. 4613 * 4614 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4615 * WX 4616 * x YZ 4617 * -------- 4618 * ZW ZX 4619 * YW YX 4620 * 4621 * The low word of the result holds ZX, the high word holds 4622 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4623 * it doesn't fit in the low 64 bits. 4624 * 4625 * Unlike most ARM math operations, multiply instructions have 4626 * restrictions on using the same register more than once (Rd and Rm 4627 * cannot be the same). 4628 */ 4629 /* mul-long vAA, vBB, vCC */ 4630 FETCH(r0, 1) @ r0<- CCBB 4631 and r2, r0, #255 @ r2<- BB 4632 mov r3, r0, lsr #8 @ r3<- CC 4633 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4634 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4635 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4636 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4637 mul ip, r2, r1 @ ip<- ZxW 4638 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4639 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4640 mov r0, rINST, lsr #8 @ r0<- AA 4641 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4642 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4643 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4644 b .LOP_MUL_LONG_finish 4645 4646/* ------------------------------ */ 4647 .balign 64 4648.L_OP_DIV_LONG: /* 0x9e */ 4649/* File: armv5te/OP_DIV_LONG.S */ 4650/* File: armv5te/binopWide.S */ 4651 /* 4652 * Generic 64-bit binary operation. Provide an "instr" line that 4653 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4654 * This could be an ARM instruction or a function call. (If the result 4655 * comes back in a register other than r0, you can override "result".) 4656 * 4657 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4658 * vCC (r1). Useful for integer division and modulus. 4659 * 4660 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4661 * xor-long, add-double, sub-double, mul-double, div-double, 4662 * rem-double 4663 * 4664 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4665 */ 4666 /* binop vAA, vBB, vCC */ 4667 FETCH(r0, 1) @ r0<- CCBB 4668 mov r9, rINST, lsr #8 @ r9<- AA 4669 and r2, r0, #255 @ r2<- BB 4670 mov r3, r0, lsr #8 @ r3<- CC 4671 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4672 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4673 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4674 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4675 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4676 .if 1 4677 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4678 beq common_errDivideByZero 4679 .endif 4680 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4681 4682 @ optional op; may set condition codes 4683 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4684 GET_INST_OPCODE(ip) @ extract opcode from rINST 4685 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4686 GOTO_OPCODE(ip) @ jump to next instruction 4687 /* 14-17 instructions */ 4688 4689 4690 4691/* ------------------------------ */ 4692 .balign 64 4693.L_OP_REM_LONG: /* 0x9f */ 4694/* File: armv5te/OP_REM_LONG.S */ 4695/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4696/* File: armv5te/binopWide.S */ 4697 /* 4698 * Generic 64-bit binary operation. Provide an "instr" line that 4699 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4700 * This could be an ARM instruction or a function call. (If the result 4701 * comes back in a register other than r0, you can override "result".) 4702 * 4703 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4704 * vCC (r1). Useful for integer division and modulus. 4705 * 4706 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4707 * xor-long, add-double, sub-double, mul-double, div-double, 4708 * rem-double 4709 * 4710 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4711 */ 4712 /* binop vAA, vBB, vCC */ 4713 FETCH(r0, 1) @ r0<- CCBB 4714 mov r9, rINST, lsr #8 @ r9<- AA 4715 and r2, r0, #255 @ r2<- BB 4716 mov r3, r0, lsr #8 @ r3<- CC 4717 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4718 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4719 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4720 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4721 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4722 .if 1 4723 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4724 beq common_errDivideByZero 4725 .endif 4726 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4727 4728 @ optional op; may set condition codes 4729 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4730 GET_INST_OPCODE(ip) @ extract opcode from rINST 4731 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4732 GOTO_OPCODE(ip) @ jump to next instruction 4733 /* 14-17 instructions */ 4734 4735 4736 4737/* ------------------------------ */ 4738 .balign 64 4739.L_OP_AND_LONG: /* 0xa0 */ 4740/* File: armv5te/OP_AND_LONG.S */ 4741/* File: armv5te/binopWide.S */ 4742 /* 4743 * Generic 64-bit binary operation. Provide an "instr" line that 4744 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4745 * This could be an ARM instruction or a function call. (If the result 4746 * comes back in a register other than r0, you can override "result".) 4747 * 4748 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4749 * vCC (r1). Useful for integer division and modulus. 4750 * 4751 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4752 * xor-long, add-double, sub-double, mul-double, div-double, 4753 * rem-double 4754 * 4755 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4756 */ 4757 /* binop vAA, vBB, vCC */ 4758 FETCH(r0, 1) @ r0<- CCBB 4759 mov r9, rINST, lsr #8 @ r9<- AA 4760 and r2, r0, #255 @ r2<- BB 4761 mov r3, r0, lsr #8 @ r3<- CC 4762 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4763 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4764 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4765 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4766 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4767 .if 0 4768 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4769 beq common_errDivideByZero 4770 .endif 4771 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4772 4773 and r0, r0, r2 @ optional op; may set condition codes 4774 and r1, r1, r3 @ result<- op, r0-r3 changed 4775 GET_INST_OPCODE(ip) @ extract opcode from rINST 4776 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4777 GOTO_OPCODE(ip) @ jump to next instruction 4778 /* 14-17 instructions */ 4779 4780 4781 4782/* ------------------------------ */ 4783 .balign 64 4784.L_OP_OR_LONG: /* 0xa1 */ 4785/* File: armv5te/OP_OR_LONG.S */ 4786/* File: armv5te/binopWide.S */ 4787 /* 4788 * Generic 64-bit binary operation. Provide an "instr" line that 4789 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4790 * This could be an ARM instruction or a function call. (If the result 4791 * comes back in a register other than r0, you can override "result".) 4792 * 4793 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4794 * vCC (r1). Useful for integer division and modulus. 4795 * 4796 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4797 * xor-long, add-double, sub-double, mul-double, div-double, 4798 * rem-double 4799 * 4800 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4801 */ 4802 /* binop vAA, vBB, vCC */ 4803 FETCH(r0, 1) @ r0<- CCBB 4804 mov r9, rINST, lsr #8 @ r9<- AA 4805 and r2, r0, #255 @ r2<- BB 4806 mov r3, r0, lsr #8 @ r3<- CC 4807 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4808 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4809 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4810 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4811 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4812 .if 0 4813 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4814 beq common_errDivideByZero 4815 .endif 4816 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4817 4818 orr r0, r0, r2 @ optional op; may set condition codes 4819 orr r1, r1, r3 @ result<- op, r0-r3 changed 4820 GET_INST_OPCODE(ip) @ extract opcode from rINST 4821 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4822 GOTO_OPCODE(ip) @ jump to next instruction 4823 /* 14-17 instructions */ 4824 4825 4826 4827/* ------------------------------ */ 4828 .balign 64 4829.L_OP_XOR_LONG: /* 0xa2 */ 4830/* File: armv5te/OP_XOR_LONG.S */ 4831/* File: armv5te/binopWide.S */ 4832 /* 4833 * Generic 64-bit binary operation. Provide an "instr" line that 4834 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4835 * This could be an ARM instruction or a function call. (If the result 4836 * comes back in a register other than r0, you can override "result".) 4837 * 4838 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4839 * vCC (r1). Useful for integer division and modulus. 4840 * 4841 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4842 * xor-long, add-double, sub-double, mul-double, div-double, 4843 * rem-double 4844 * 4845 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4846 */ 4847 /* binop vAA, vBB, vCC */ 4848 FETCH(r0, 1) @ r0<- CCBB 4849 mov r9, rINST, lsr #8 @ r9<- AA 4850 and r2, r0, #255 @ r2<- BB 4851 mov r3, r0, lsr #8 @ r3<- CC 4852 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4853 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4854 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4855 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4856 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4857 .if 0 4858 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4859 beq common_errDivideByZero 4860 .endif 4861 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4862 4863 eor r0, r0, r2 @ optional op; may set condition codes 4864 eor r1, r1, r3 @ result<- op, r0-r3 changed 4865 GET_INST_OPCODE(ip) @ extract opcode from rINST 4866 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4867 GOTO_OPCODE(ip) @ jump to next instruction 4868 /* 14-17 instructions */ 4869 4870 4871 4872/* ------------------------------ */ 4873 .balign 64 4874.L_OP_SHL_LONG: /* 0xa3 */ 4875/* File: armv5te/OP_SHL_LONG.S */ 4876 /* 4877 * Long integer shift. This is different from the generic 32/64-bit 4878 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4879 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4880 * 6 bits of the shift distance. 4881 */ 4882 /* shl-long vAA, vBB, vCC */ 4883 FETCH(r0, 1) @ r0<- CCBB 4884 mov r9, rINST, lsr #8 @ r9<- AA 4885 and r3, r0, #255 @ r3<- BB 4886 mov r0, r0, lsr #8 @ r0<- CC 4887 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4888 GET_VREG(r2, r0) @ r2<- vCC 4889 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4890 and r2, r2, #63 @ r2<- r2 & 0x3f 4891 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4892 4893 mov r1, r1, asl r2 @ r1<- r1 << r2 4894 rsb r3, r2, #32 @ r3<- 32 - r2 4895 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4896 subs ip, r2, #32 @ ip<- r2 - 32 4897 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4898 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4899 b .LOP_SHL_LONG_finish 4900 4901/* ------------------------------ */ 4902 .balign 64 4903.L_OP_SHR_LONG: /* 0xa4 */ 4904/* File: armv5te/OP_SHR_LONG.S */ 4905 /* 4906 * Long integer shift. This is different from the generic 32/64-bit 4907 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4908 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4909 * 6 bits of the shift distance. 4910 */ 4911 /* shr-long vAA, vBB, vCC */ 4912 FETCH(r0, 1) @ r0<- CCBB 4913 mov r9, rINST, lsr #8 @ r9<- AA 4914 and r3, r0, #255 @ r3<- BB 4915 mov r0, r0, lsr #8 @ r0<- CC 4916 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4917 GET_VREG(r2, r0) @ r2<- vCC 4918 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4919 and r2, r2, #63 @ r0<- r0 & 0x3f 4920 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4921 4922 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4923 rsb r3, r2, #32 @ r3<- 32 - r2 4924 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4925 subs ip, r2, #32 @ ip<- r2 - 32 4926 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4927 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4928 b .LOP_SHR_LONG_finish 4929 4930/* ------------------------------ */ 4931 .balign 64 4932.L_OP_USHR_LONG: /* 0xa5 */ 4933/* File: armv5te/OP_USHR_LONG.S */ 4934 /* 4935 * Long integer shift. This is different from the generic 32/64-bit 4936 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4937 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4938 * 6 bits of the shift distance. 4939 */ 4940 /* ushr-long vAA, vBB, vCC */ 4941 FETCH(r0, 1) @ r0<- CCBB 4942 mov r9, rINST, lsr #8 @ r9<- AA 4943 and r3, r0, #255 @ r3<- BB 4944 mov r0, r0, lsr #8 @ r0<- CC 4945 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4946 GET_VREG(r2, r0) @ r2<- vCC 4947 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4948 and r2, r2, #63 @ r0<- r0 & 0x3f 4949 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4950 4951 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4952 rsb r3, r2, #32 @ r3<- 32 - r2 4953 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4954 subs ip, r2, #32 @ ip<- r2 - 32 4955 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4956 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4957 b .LOP_USHR_LONG_finish 4958 4959/* ------------------------------ */ 4960 .balign 64 4961.L_OP_ADD_FLOAT: /* 0xa6 */ 4962/* File: arm-vfp/OP_ADD_FLOAT.S */ 4963/* File: arm-vfp/fbinop.S */ 4964 /* 4965 * Generic 32-bit floating-point operation. Provide an "instr" line that 4966 * specifies an instruction that performs "s2 = s0 op s1". Because we 4967 * use the "softfp" ABI, this must be an instruction, not a function call. 4968 * 4969 * For: add-float, sub-float, mul-float, div-float 4970 */ 4971 /* floatop vAA, vBB, vCC */ 4972 FETCH(r0, 1) @ r0<- CCBB 4973 mov r9, rINST, lsr #8 @ r9<- AA 4974 mov r3, r0, lsr #8 @ r3<- CC 4975 and r2, r0, #255 @ r2<- BB 4976 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4977 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4978 flds s1, [r3] @ s1<- vCC 4979 flds s0, [r2] @ s0<- vBB 4980 4981 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4982 fadds s2, s0, s1 @ s2<- op 4983 GET_INST_OPCODE(ip) @ extract opcode from rINST 4984 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4985 fsts s2, [r9] @ vAA<- s2 4986 GOTO_OPCODE(ip) @ jump to next instruction 4987 4988 4989/* ------------------------------ */ 4990 .balign 64 4991.L_OP_SUB_FLOAT: /* 0xa7 */ 4992/* File: arm-vfp/OP_SUB_FLOAT.S */ 4993/* File: arm-vfp/fbinop.S */ 4994 /* 4995 * Generic 32-bit floating-point operation. Provide an "instr" line that 4996 * specifies an instruction that performs "s2 = s0 op s1". Because we 4997 * use the "softfp" ABI, this must be an instruction, not a function call. 4998 * 4999 * For: add-float, sub-float, mul-float, div-float 5000 */ 5001 /* floatop vAA, vBB, vCC */ 5002 FETCH(r0, 1) @ r0<- CCBB 5003 mov r9, rINST, lsr #8 @ r9<- AA 5004 mov r3, r0, lsr #8 @ r3<- CC 5005 and r2, r0, #255 @ r2<- BB 5006 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5007 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5008 flds s1, [r3] @ s1<- vCC 5009 flds s0, [r2] @ s0<- vBB 5010 5011 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5012 fsubs s2, s0, s1 @ s2<- op 5013 GET_INST_OPCODE(ip) @ extract opcode from rINST 5014 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5015 fsts s2, [r9] @ vAA<- s2 5016 GOTO_OPCODE(ip) @ jump to next instruction 5017 5018 5019/* ------------------------------ */ 5020 .balign 64 5021.L_OP_MUL_FLOAT: /* 0xa8 */ 5022/* File: arm-vfp/OP_MUL_FLOAT.S */ 5023/* File: arm-vfp/fbinop.S */ 5024 /* 5025 * Generic 32-bit floating-point operation. Provide an "instr" line that 5026 * specifies an instruction that performs "s2 = s0 op s1". Because we 5027 * use the "softfp" ABI, this must be an instruction, not a function call. 5028 * 5029 * For: add-float, sub-float, mul-float, div-float 5030 */ 5031 /* floatop vAA, vBB, vCC */ 5032 FETCH(r0, 1) @ r0<- CCBB 5033 mov r9, rINST, lsr #8 @ r9<- AA 5034 mov r3, r0, lsr #8 @ r3<- CC 5035 and r2, r0, #255 @ r2<- BB 5036 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5037 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5038 flds s1, [r3] @ s1<- vCC 5039 flds s0, [r2] @ s0<- vBB 5040 5041 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5042 fmuls s2, s0, s1 @ s2<- op 5043 GET_INST_OPCODE(ip) @ extract opcode from rINST 5044 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5045 fsts s2, [r9] @ vAA<- s2 5046 GOTO_OPCODE(ip) @ jump to next instruction 5047 5048 5049/* ------------------------------ */ 5050 .balign 64 5051.L_OP_DIV_FLOAT: /* 0xa9 */ 5052/* File: arm-vfp/OP_DIV_FLOAT.S */ 5053/* File: arm-vfp/fbinop.S */ 5054 /* 5055 * Generic 32-bit floating-point operation. Provide an "instr" line that 5056 * specifies an instruction that performs "s2 = s0 op s1". Because we 5057 * use the "softfp" ABI, this must be an instruction, not a function call. 5058 * 5059 * For: add-float, sub-float, mul-float, div-float 5060 */ 5061 /* floatop vAA, vBB, vCC */ 5062 FETCH(r0, 1) @ r0<- CCBB 5063 mov r9, rINST, lsr #8 @ r9<- AA 5064 mov r3, r0, lsr #8 @ r3<- CC 5065 and r2, r0, #255 @ r2<- BB 5066 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5067 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5068 flds s1, [r3] @ s1<- vCC 5069 flds s0, [r2] @ s0<- vBB 5070 5071 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5072 fdivs s2, s0, s1 @ s2<- op 5073 GET_INST_OPCODE(ip) @ extract opcode from rINST 5074 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5075 fsts s2, [r9] @ vAA<- s2 5076 GOTO_OPCODE(ip) @ jump to next instruction 5077 5078 5079/* ------------------------------ */ 5080 .balign 64 5081.L_OP_REM_FLOAT: /* 0xaa */ 5082/* File: armv5te/OP_REM_FLOAT.S */ 5083/* EABI doesn't define a float remainder function, but libm does */ 5084/* File: armv5te/binop.S */ 5085 /* 5086 * Generic 32-bit binary operation. Provide an "instr" line that 5087 * specifies an instruction that performs "result = r0 op r1". 5088 * This could be an ARM instruction or a function call. (If the result 5089 * comes back in a register other than r0, you can override "result".) 5090 * 5091 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5092 * vCC (r1). Useful for integer division and modulus. Note that we 5093 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5094 * handles it correctly. 5095 * 5096 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5097 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5098 * mul-float, div-float, rem-float 5099 */ 5100 /* binop vAA, vBB, vCC */ 5101 FETCH(r0, 1) @ r0<- CCBB 5102 mov r9, rINST, lsr #8 @ r9<- AA 5103 mov r3, r0, lsr #8 @ r3<- CC 5104 and r2, r0, #255 @ r2<- BB 5105 GET_VREG(r1, r3) @ r1<- vCC 5106 GET_VREG(r0, r2) @ r0<- vBB 5107 .if 0 5108 cmp r1, #0 @ is second operand zero? 5109 beq common_errDivideByZero 5110 .endif 5111 5112 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5113 @ optional op; may set condition codes 5114 bl fmodf @ r0<- op, r0-r3 changed 5115 GET_INST_OPCODE(ip) @ extract opcode from rINST 5116 SET_VREG(r0, r9) @ vAA<- r0 5117 GOTO_OPCODE(ip) @ jump to next instruction 5118 /* 11-14 instructions */ 5119 5120 5121 5122/* ------------------------------ */ 5123 .balign 64 5124.L_OP_ADD_DOUBLE: /* 0xab */ 5125/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5126/* File: arm-vfp/fbinopWide.S */ 5127 /* 5128 * Generic 64-bit double-precision floating point binary operation. 5129 * Provide an "instr" line that specifies an instruction that performs 5130 * "d2 = d0 op d1". 5131 * 5132 * for: add-double, sub-double, mul-double, div-double 5133 */ 5134 /* doubleop vAA, vBB, vCC */ 5135 FETCH(r0, 1) @ r0<- CCBB 5136 mov r9, rINST, lsr #8 @ r9<- AA 5137 mov r3, r0, lsr #8 @ r3<- CC 5138 and r2, r0, #255 @ r2<- BB 5139 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5140 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5141 fldd d1, [r3] @ d1<- vCC 5142 fldd d0, [r2] @ d0<- vBB 5143 5144 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5145 faddd d2, d0, d1 @ s2<- op 5146 GET_INST_OPCODE(ip) @ extract opcode from rINST 5147 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5148 fstd d2, [r9] @ vAA<- d2 5149 GOTO_OPCODE(ip) @ jump to next instruction 5150 5151 5152/* ------------------------------ */ 5153 .balign 64 5154.L_OP_SUB_DOUBLE: /* 0xac */ 5155/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5156/* File: arm-vfp/fbinopWide.S */ 5157 /* 5158 * Generic 64-bit double-precision floating point binary operation. 5159 * Provide an "instr" line that specifies an instruction that performs 5160 * "d2 = d0 op d1". 5161 * 5162 * for: add-double, sub-double, mul-double, div-double 5163 */ 5164 /* doubleop vAA, vBB, vCC */ 5165 FETCH(r0, 1) @ r0<- CCBB 5166 mov r9, rINST, lsr #8 @ r9<- AA 5167 mov r3, r0, lsr #8 @ r3<- CC 5168 and r2, r0, #255 @ r2<- BB 5169 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5170 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5171 fldd d1, [r3] @ d1<- vCC 5172 fldd d0, [r2] @ d0<- vBB 5173 5174 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5175 fsubd d2, d0, d1 @ s2<- op 5176 GET_INST_OPCODE(ip) @ extract opcode from rINST 5177 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5178 fstd d2, [r9] @ vAA<- d2 5179 GOTO_OPCODE(ip) @ jump to next instruction 5180 5181 5182/* ------------------------------ */ 5183 .balign 64 5184.L_OP_MUL_DOUBLE: /* 0xad */ 5185/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5186/* File: arm-vfp/fbinopWide.S */ 5187 /* 5188 * Generic 64-bit double-precision floating point binary operation. 5189 * Provide an "instr" line that specifies an instruction that performs 5190 * "d2 = d0 op d1". 5191 * 5192 * for: add-double, sub-double, mul-double, div-double 5193 */ 5194 /* doubleop vAA, vBB, vCC */ 5195 FETCH(r0, 1) @ r0<- CCBB 5196 mov r9, rINST, lsr #8 @ r9<- AA 5197 mov r3, r0, lsr #8 @ r3<- CC 5198 and r2, r0, #255 @ r2<- BB 5199 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5200 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5201 fldd d1, [r3] @ d1<- vCC 5202 fldd d0, [r2] @ d0<- vBB 5203 5204 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5205 fmuld d2, d0, d1 @ s2<- op 5206 GET_INST_OPCODE(ip) @ extract opcode from rINST 5207 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5208 fstd d2, [r9] @ vAA<- d2 5209 GOTO_OPCODE(ip) @ jump to next instruction 5210 5211 5212/* ------------------------------ */ 5213 .balign 64 5214.L_OP_DIV_DOUBLE: /* 0xae */ 5215/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5216/* File: arm-vfp/fbinopWide.S */ 5217 /* 5218 * Generic 64-bit double-precision floating point binary operation. 5219 * Provide an "instr" line that specifies an instruction that performs 5220 * "d2 = d0 op d1". 5221 * 5222 * for: add-double, sub-double, mul-double, div-double 5223 */ 5224 /* doubleop vAA, vBB, vCC */ 5225 FETCH(r0, 1) @ r0<- CCBB 5226 mov r9, rINST, lsr #8 @ r9<- AA 5227 mov r3, r0, lsr #8 @ r3<- CC 5228 and r2, r0, #255 @ r2<- BB 5229 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5230 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5231 fldd d1, [r3] @ d1<- vCC 5232 fldd d0, [r2] @ d0<- vBB 5233 5234 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5235 fdivd d2, d0, d1 @ s2<- op 5236 GET_INST_OPCODE(ip) @ extract opcode from rINST 5237 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5238 fstd d2, [r9] @ vAA<- d2 5239 GOTO_OPCODE(ip) @ jump to next instruction 5240 5241 5242/* ------------------------------ */ 5243 .balign 64 5244.L_OP_REM_DOUBLE: /* 0xaf */ 5245/* File: armv5te/OP_REM_DOUBLE.S */ 5246/* EABI doesn't define a double remainder function, but libm does */ 5247/* File: armv5te/binopWide.S */ 5248 /* 5249 * Generic 64-bit binary operation. Provide an "instr" line that 5250 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5251 * This could be an ARM instruction or a function call. (If the result 5252 * comes back in a register other than r0, you can override "result".) 5253 * 5254 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5255 * vCC (r1). Useful for integer division and modulus. 5256 * 5257 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5258 * xor-long, add-double, sub-double, mul-double, div-double, 5259 * rem-double 5260 * 5261 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5262 */ 5263 /* binop vAA, vBB, vCC */ 5264 FETCH(r0, 1) @ r0<- CCBB 5265 mov r9, rINST, lsr #8 @ r9<- AA 5266 and r2, r0, #255 @ r2<- BB 5267 mov r3, r0, lsr #8 @ r3<- CC 5268 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5269 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5270 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5271 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5272 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5273 .if 0 5274 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5275 beq common_errDivideByZero 5276 .endif 5277 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5278 5279 @ optional op; may set condition codes 5280 bl fmod @ result<- op, r0-r3 changed 5281 GET_INST_OPCODE(ip) @ extract opcode from rINST 5282 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5283 GOTO_OPCODE(ip) @ jump to next instruction 5284 /* 14-17 instructions */ 5285 5286 5287 5288/* ------------------------------ */ 5289 .balign 64 5290.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5291/* File: armv6t2/OP_ADD_INT_2ADDR.S */ 5292/* File: armv6t2/binop2addr.S */ 5293 /* 5294 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5295 * that specifies an instruction that performs "result = r0 op r1". 5296 * This could be an ARM instruction or a function call. (If the result 5297 * comes back in a register other than r0, you can override "result".) 5298 * 5299 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5300 * vCC (r1). Useful for integer division and modulus. 5301 * 5302 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5303 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5304 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5305 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5306 */ 5307 /* binop/2addr vA, vB */ 5308 mov r3, rINST, lsr #12 @ r3<- B 5309 ubfx r9, rINST, #8, #4 @ r9<- A 5310 GET_VREG(r1, r3) @ r1<- vB 5311 GET_VREG(r0, r9) @ r0<- vA 5312 .if 0 5313 cmp r1, #0 @ is second operand zero? 5314 beq common_errDivideByZero 5315 .endif 5316 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5317 5318 @ optional op; may set condition codes 5319 add r0, r0, r1 @ r0<- op, r0-r3 changed 5320 GET_INST_OPCODE(ip) @ extract opcode from rINST 5321 SET_VREG(r0, r9) @ vAA<- r0 5322 GOTO_OPCODE(ip) @ jump to next instruction 5323 /* 10-13 instructions */ 5324 5325 5326 5327/* ------------------------------ */ 5328 .balign 64 5329.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5330/* File: armv6t2/OP_SUB_INT_2ADDR.S */ 5331/* File: armv6t2/binop2addr.S */ 5332 /* 5333 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5334 * that specifies an instruction that performs "result = r0 op r1". 5335 * This could be an ARM instruction or a function call. (If the result 5336 * comes back in a register other than r0, you can override "result".) 5337 * 5338 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5339 * vCC (r1). Useful for integer division and modulus. 5340 * 5341 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5342 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5343 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5344 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5345 */ 5346 /* binop/2addr vA, vB */ 5347 mov r3, rINST, lsr #12 @ r3<- B 5348 ubfx r9, rINST, #8, #4 @ r9<- A 5349 GET_VREG(r1, r3) @ r1<- vB 5350 GET_VREG(r0, r9) @ r0<- vA 5351 .if 0 5352 cmp r1, #0 @ is second operand zero? 5353 beq common_errDivideByZero 5354 .endif 5355 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5356 5357 @ optional op; may set condition codes 5358 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5359 GET_INST_OPCODE(ip) @ extract opcode from rINST 5360 SET_VREG(r0, r9) @ vAA<- r0 5361 GOTO_OPCODE(ip) @ jump to next instruction 5362 /* 10-13 instructions */ 5363 5364 5365 5366/* ------------------------------ */ 5367 .balign 64 5368.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5369/* File: armv6t2/OP_MUL_INT_2ADDR.S */ 5370/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5371/* File: armv6t2/binop2addr.S */ 5372 /* 5373 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5374 * that specifies an instruction that performs "result = r0 op r1". 5375 * This could be an ARM instruction or a function call. (If the result 5376 * comes back in a register other than r0, you can override "result".) 5377 * 5378 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5379 * vCC (r1). Useful for integer division and modulus. 5380 * 5381 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5382 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5383 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5384 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5385 */ 5386 /* binop/2addr vA, vB */ 5387 mov r3, rINST, lsr #12 @ r3<- B 5388 ubfx r9, rINST, #8, #4 @ r9<- A 5389 GET_VREG(r1, r3) @ r1<- vB 5390 GET_VREG(r0, r9) @ r0<- vA 5391 .if 0 5392 cmp r1, #0 @ is second operand zero? 5393 beq common_errDivideByZero 5394 .endif 5395 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5396 5397 @ optional op; may set condition codes 5398 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5399 GET_INST_OPCODE(ip) @ extract opcode from rINST 5400 SET_VREG(r0, r9) @ vAA<- r0 5401 GOTO_OPCODE(ip) @ jump to next instruction 5402 /* 10-13 instructions */ 5403 5404 5405 5406/* ------------------------------ */ 5407 .balign 64 5408.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5409/* File: armv6t2/OP_DIV_INT_2ADDR.S */ 5410/* File: armv6t2/binop2addr.S */ 5411 /* 5412 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5413 * that specifies an instruction that performs "result = r0 op r1". 5414 * This could be an ARM instruction or a function call. (If the result 5415 * comes back in a register other than r0, you can override "result".) 5416 * 5417 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5418 * vCC (r1). Useful for integer division and modulus. 5419 * 5420 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5421 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5422 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5423 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5424 */ 5425 /* binop/2addr vA, vB */ 5426 mov r3, rINST, lsr #12 @ r3<- B 5427 ubfx r9, rINST, #8, #4 @ r9<- A 5428 GET_VREG(r1, r3) @ r1<- vB 5429 GET_VREG(r0, r9) @ r0<- vA 5430 .if 1 5431 cmp r1, #0 @ is second operand zero? 5432 beq common_errDivideByZero 5433 .endif 5434 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5435 5436 @ optional op; may set condition codes 5437 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5438 GET_INST_OPCODE(ip) @ extract opcode from rINST 5439 SET_VREG(r0, r9) @ vAA<- r0 5440 GOTO_OPCODE(ip) @ jump to next instruction 5441 /* 10-13 instructions */ 5442 5443 5444 5445/* ------------------------------ */ 5446 .balign 64 5447.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5448/* File: armv6t2/OP_REM_INT_2ADDR.S */ 5449/* idivmod returns quotient in r0 and remainder in r1 */ 5450/* File: armv6t2/binop2addr.S */ 5451 /* 5452 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5453 * that specifies an instruction that performs "result = r0 op r1". 5454 * This could be an ARM instruction or a function call. (If the result 5455 * comes back in a register other than r0, you can override "result".) 5456 * 5457 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5458 * vCC (r1). Useful for integer division and modulus. 5459 * 5460 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5461 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5462 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5463 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5464 */ 5465 /* binop/2addr vA, vB */ 5466 mov r3, rINST, lsr #12 @ r3<- B 5467 ubfx r9, rINST, #8, #4 @ r9<- A 5468 GET_VREG(r1, r3) @ r1<- vB 5469 GET_VREG(r0, r9) @ r0<- vA 5470 .if 1 5471 cmp r1, #0 @ is second operand zero? 5472 beq common_errDivideByZero 5473 .endif 5474 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5475 5476 @ optional op; may set condition codes 5477 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5478 GET_INST_OPCODE(ip) @ extract opcode from rINST 5479 SET_VREG(r1, r9) @ vAA<- r1 5480 GOTO_OPCODE(ip) @ jump to next instruction 5481 /* 10-13 instructions */ 5482 5483 5484 5485/* ------------------------------ */ 5486 .balign 64 5487.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5488/* File: armv6t2/OP_AND_INT_2ADDR.S */ 5489/* File: armv6t2/binop2addr.S */ 5490 /* 5491 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5492 * that specifies an instruction that performs "result = r0 op r1". 5493 * This could be an ARM instruction or a function call. (If the result 5494 * comes back in a register other than r0, you can override "result".) 5495 * 5496 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5497 * vCC (r1). Useful for integer division and modulus. 5498 * 5499 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5500 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5501 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5502 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5503 */ 5504 /* binop/2addr vA, vB */ 5505 mov r3, rINST, lsr #12 @ r3<- B 5506 ubfx r9, rINST, #8, #4 @ r9<- A 5507 GET_VREG(r1, r3) @ r1<- vB 5508 GET_VREG(r0, r9) @ r0<- vA 5509 .if 0 5510 cmp r1, #0 @ is second operand zero? 5511 beq common_errDivideByZero 5512 .endif 5513 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5514 5515 @ optional op; may set condition codes 5516 and r0, r0, r1 @ r0<- op, r0-r3 changed 5517 GET_INST_OPCODE(ip) @ extract opcode from rINST 5518 SET_VREG(r0, r9) @ vAA<- r0 5519 GOTO_OPCODE(ip) @ jump to next instruction 5520 /* 10-13 instructions */ 5521 5522 5523 5524/* ------------------------------ */ 5525 .balign 64 5526.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5527/* File: armv6t2/OP_OR_INT_2ADDR.S */ 5528/* File: armv6t2/binop2addr.S */ 5529 /* 5530 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5531 * that specifies an instruction that performs "result = r0 op r1". 5532 * This could be an ARM instruction or a function call. (If the result 5533 * comes back in a register other than r0, you can override "result".) 5534 * 5535 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5536 * vCC (r1). Useful for integer division and modulus. 5537 * 5538 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5539 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5540 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5541 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5542 */ 5543 /* binop/2addr vA, vB */ 5544 mov r3, rINST, lsr #12 @ r3<- B 5545 ubfx r9, rINST, #8, #4 @ r9<- A 5546 GET_VREG(r1, r3) @ r1<- vB 5547 GET_VREG(r0, r9) @ r0<- vA 5548 .if 0 5549 cmp r1, #0 @ is second operand zero? 5550 beq common_errDivideByZero 5551 .endif 5552 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5553 5554 @ optional op; may set condition codes 5555 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5556 GET_INST_OPCODE(ip) @ extract opcode from rINST 5557 SET_VREG(r0, r9) @ vAA<- r0 5558 GOTO_OPCODE(ip) @ jump to next instruction 5559 /* 10-13 instructions */ 5560 5561 5562 5563/* ------------------------------ */ 5564 .balign 64 5565.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5566/* File: armv6t2/OP_XOR_INT_2ADDR.S */ 5567/* File: armv6t2/binop2addr.S */ 5568 /* 5569 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5570 * that specifies an instruction that performs "result = r0 op r1". 5571 * This could be an ARM instruction or a function call. (If the result 5572 * comes back in a register other than r0, you can override "result".) 5573 * 5574 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5575 * vCC (r1). Useful for integer division and modulus. 5576 * 5577 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5578 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5579 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5580 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5581 */ 5582 /* binop/2addr vA, vB */ 5583 mov r3, rINST, lsr #12 @ r3<- B 5584 ubfx r9, rINST, #8, #4 @ r9<- A 5585 GET_VREG(r1, r3) @ r1<- vB 5586 GET_VREG(r0, r9) @ r0<- vA 5587 .if 0 5588 cmp r1, #0 @ is second operand zero? 5589 beq common_errDivideByZero 5590 .endif 5591 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5592 5593 @ optional op; may set condition codes 5594 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5595 GET_INST_OPCODE(ip) @ extract opcode from rINST 5596 SET_VREG(r0, r9) @ vAA<- r0 5597 GOTO_OPCODE(ip) @ jump to next instruction 5598 /* 10-13 instructions */ 5599 5600 5601 5602/* ------------------------------ */ 5603 .balign 64 5604.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5605/* File: armv6t2/OP_SHL_INT_2ADDR.S */ 5606/* File: armv6t2/binop2addr.S */ 5607 /* 5608 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5609 * that specifies an instruction that performs "result = r0 op r1". 5610 * This could be an ARM instruction or a function call. (If the result 5611 * comes back in a register other than r0, you can override "result".) 5612 * 5613 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5614 * vCC (r1). Useful for integer division and modulus. 5615 * 5616 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5617 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5618 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5619 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5620 */ 5621 /* binop/2addr vA, vB */ 5622 mov r3, rINST, lsr #12 @ r3<- B 5623 ubfx r9, rINST, #8, #4 @ r9<- A 5624 GET_VREG(r1, r3) @ r1<- vB 5625 GET_VREG(r0, r9) @ r0<- vA 5626 .if 0 5627 cmp r1, #0 @ is second operand zero? 5628 beq common_errDivideByZero 5629 .endif 5630 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5631 5632 and r1, r1, #31 @ optional op; may set condition codes 5633 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5634 GET_INST_OPCODE(ip) @ extract opcode from rINST 5635 SET_VREG(r0, r9) @ vAA<- r0 5636 GOTO_OPCODE(ip) @ jump to next instruction 5637 /* 10-13 instructions */ 5638 5639 5640 5641/* ------------------------------ */ 5642 .balign 64 5643.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5644/* File: armv6t2/OP_SHR_INT_2ADDR.S */ 5645/* File: armv6t2/binop2addr.S */ 5646 /* 5647 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5648 * that specifies an instruction that performs "result = r0 op r1". 5649 * This could be an ARM instruction or a function call. (If the result 5650 * comes back in a register other than r0, you can override "result".) 5651 * 5652 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5653 * vCC (r1). Useful for integer division and modulus. 5654 * 5655 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5656 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5657 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5658 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5659 */ 5660 /* binop/2addr vA, vB */ 5661 mov r3, rINST, lsr #12 @ r3<- B 5662 ubfx r9, rINST, #8, #4 @ r9<- A 5663 GET_VREG(r1, r3) @ r1<- vB 5664 GET_VREG(r0, r9) @ r0<- vA 5665 .if 0 5666 cmp r1, #0 @ is second operand zero? 5667 beq common_errDivideByZero 5668 .endif 5669 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5670 5671 and r1, r1, #31 @ optional op; may set condition codes 5672 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5673 GET_INST_OPCODE(ip) @ extract opcode from rINST 5674 SET_VREG(r0, r9) @ vAA<- r0 5675 GOTO_OPCODE(ip) @ jump to next instruction 5676 /* 10-13 instructions */ 5677 5678 5679 5680/* ------------------------------ */ 5681 .balign 64 5682.L_OP_USHR_INT_2ADDR: /* 0xba */ 5683/* File: armv6t2/OP_USHR_INT_2ADDR.S */ 5684/* File: armv6t2/binop2addr.S */ 5685 /* 5686 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5687 * that specifies an instruction that performs "result = r0 op r1". 5688 * This could be an ARM instruction or a function call. (If the result 5689 * comes back in a register other than r0, you can override "result".) 5690 * 5691 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5692 * vCC (r1). Useful for integer division and modulus. 5693 * 5694 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5695 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5696 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5697 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5698 */ 5699 /* binop/2addr vA, vB */ 5700 mov r3, rINST, lsr #12 @ r3<- B 5701 ubfx r9, rINST, #8, #4 @ r9<- A 5702 GET_VREG(r1, r3) @ r1<- vB 5703 GET_VREG(r0, r9) @ r0<- vA 5704 .if 0 5705 cmp r1, #0 @ is second operand zero? 5706 beq common_errDivideByZero 5707 .endif 5708 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5709 5710 and r1, r1, #31 @ optional op; may set condition codes 5711 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5712 GET_INST_OPCODE(ip) @ extract opcode from rINST 5713 SET_VREG(r0, r9) @ vAA<- r0 5714 GOTO_OPCODE(ip) @ jump to next instruction 5715 /* 10-13 instructions */ 5716 5717 5718 5719/* ------------------------------ */ 5720 .balign 64 5721.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5722/* File: armv6t2/OP_ADD_LONG_2ADDR.S */ 5723/* File: armv6t2/binopWide2addr.S */ 5724 /* 5725 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5726 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5727 * This could be an ARM instruction or a function call. (If the result 5728 * comes back in a register other than r0, you can override "result".) 5729 * 5730 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5731 * vCC (r1). Useful for integer division and modulus. 5732 * 5733 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5734 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5735 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5736 * rem-double/2addr 5737 */ 5738 /* binop/2addr vA, vB */ 5739 mov r1, rINST, lsr #12 @ r1<- B 5740 ubfx r9, rINST, #8, #4 @ r9<- A 5741 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5742 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5743 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5744 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5745 .if 0 5746 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5747 beq common_errDivideByZero 5748 .endif 5749 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5750 5751 adds r0, r0, r2 @ optional op; may set condition codes 5752 adc r1, r1, r3 @ result<- op, r0-r3 changed 5753 GET_INST_OPCODE(ip) @ extract opcode from rINST 5754 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5755 GOTO_OPCODE(ip) @ jump to next instruction 5756 /* 12-15 instructions */ 5757 5758 5759 5760/* ------------------------------ */ 5761 .balign 64 5762.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5763/* File: armv6t2/OP_SUB_LONG_2ADDR.S */ 5764/* File: armv6t2/binopWide2addr.S */ 5765 /* 5766 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5767 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5768 * This could be an ARM instruction or a function call. (If the result 5769 * comes back in a register other than r0, you can override "result".) 5770 * 5771 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5772 * vCC (r1). Useful for integer division and modulus. 5773 * 5774 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5775 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5776 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5777 * rem-double/2addr 5778 */ 5779 /* binop/2addr vA, vB */ 5780 mov r1, rINST, lsr #12 @ r1<- B 5781 ubfx r9, rINST, #8, #4 @ r9<- A 5782 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5783 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5784 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5785 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5786 .if 0 5787 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5788 beq common_errDivideByZero 5789 .endif 5790 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5791 5792 subs r0, r0, r2 @ optional op; may set condition codes 5793 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5794 GET_INST_OPCODE(ip) @ extract opcode from rINST 5795 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5796 GOTO_OPCODE(ip) @ jump to next instruction 5797 /* 12-15 instructions */ 5798 5799 5800 5801/* ------------------------------ */ 5802 .balign 64 5803.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5804/* File: armv6t2/OP_MUL_LONG_2ADDR.S */ 5805 /* 5806 * Signed 64-bit integer multiply, "/2addr" version. 5807 * 5808 * See OP_MUL_LONG for an explanation. 5809 * 5810 * We get a little tight on registers, so to avoid looking up &fp[A] 5811 * again we stuff it into rINST. 5812 */ 5813 /* mul-long/2addr vA, vB */ 5814 mov r1, rINST, lsr #12 @ r1<- B 5815 ubfx r9, rINST, #8, #4 @ r9<- A 5816 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5817 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5818 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5819 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5820 mul ip, r2, r1 @ ip<- ZxW 5821 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5822 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5823 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5824 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5825 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5826 GET_INST_OPCODE(ip) @ extract opcode from rINST 5827 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5828 GOTO_OPCODE(ip) @ jump to next instruction 5829 5830 5831/* ------------------------------ */ 5832 .balign 64 5833.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5834/* File: armv6t2/OP_DIV_LONG_2ADDR.S */ 5835/* File: armv6t2/binopWide2addr.S */ 5836 /* 5837 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5838 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5839 * This could be an ARM instruction or a function call. (If the result 5840 * comes back in a register other than r0, you can override "result".) 5841 * 5842 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5843 * vCC (r1). Useful for integer division and modulus. 5844 * 5845 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5846 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5847 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5848 * rem-double/2addr 5849 */ 5850 /* binop/2addr vA, vB */ 5851 mov r1, rINST, lsr #12 @ r1<- B 5852 ubfx r9, rINST, #8, #4 @ r9<- A 5853 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5854 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5855 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5856 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5857 .if 1 5858 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5859 beq common_errDivideByZero 5860 .endif 5861 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5862 5863 @ optional op; may set condition codes 5864 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5865 GET_INST_OPCODE(ip) @ extract opcode from rINST 5866 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5867 GOTO_OPCODE(ip) @ jump to next instruction 5868 /* 12-15 instructions */ 5869 5870 5871 5872/* ------------------------------ */ 5873 .balign 64 5874.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5875/* File: armv6t2/OP_REM_LONG_2ADDR.S */ 5876/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5877/* File: armv6t2/binopWide2addr.S */ 5878 /* 5879 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5880 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5881 * This could be an ARM instruction or a function call. (If the result 5882 * comes back in a register other than r0, you can override "result".) 5883 * 5884 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5885 * vCC (r1). Useful for integer division and modulus. 5886 * 5887 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5888 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5889 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5890 * rem-double/2addr 5891 */ 5892 /* binop/2addr vA, vB */ 5893 mov r1, rINST, lsr #12 @ r1<- B 5894 ubfx r9, rINST, #8, #4 @ r9<- A 5895 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5896 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5897 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5898 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5899 .if 1 5900 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5901 beq common_errDivideByZero 5902 .endif 5903 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5904 5905 @ optional op; may set condition codes 5906 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5907 GET_INST_OPCODE(ip) @ extract opcode from rINST 5908 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5909 GOTO_OPCODE(ip) @ jump to next instruction 5910 /* 12-15 instructions */ 5911 5912 5913 5914/* ------------------------------ */ 5915 .balign 64 5916.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5917/* File: armv6t2/OP_AND_LONG_2ADDR.S */ 5918/* File: armv6t2/binopWide2addr.S */ 5919 /* 5920 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5921 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5922 * This could be an ARM instruction or a function call. (If the result 5923 * comes back in a register other than r0, you can override "result".) 5924 * 5925 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5926 * vCC (r1). Useful for integer division and modulus. 5927 * 5928 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5929 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5930 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5931 * rem-double/2addr 5932 */ 5933 /* binop/2addr vA, vB */ 5934 mov r1, rINST, lsr #12 @ r1<- B 5935 ubfx r9, rINST, #8, #4 @ r9<- A 5936 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5937 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5938 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5939 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5940 .if 0 5941 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5942 beq common_errDivideByZero 5943 .endif 5944 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5945 5946 and r0, r0, r2 @ optional op; may set condition codes 5947 and r1, r1, r3 @ result<- op, r0-r3 changed 5948 GET_INST_OPCODE(ip) @ extract opcode from rINST 5949 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5950 GOTO_OPCODE(ip) @ jump to next instruction 5951 /* 12-15 instructions */ 5952 5953 5954 5955/* ------------------------------ */ 5956 .balign 64 5957.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5958/* File: armv6t2/OP_OR_LONG_2ADDR.S */ 5959/* File: armv6t2/binopWide2addr.S */ 5960 /* 5961 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5962 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5963 * This could be an ARM instruction or a function call. (If the result 5964 * comes back in a register other than r0, you can override "result".) 5965 * 5966 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5967 * vCC (r1). Useful for integer division and modulus. 5968 * 5969 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5970 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5971 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5972 * rem-double/2addr 5973 */ 5974 /* binop/2addr vA, vB */ 5975 mov r1, rINST, lsr #12 @ r1<- B 5976 ubfx r9, rINST, #8, #4 @ r9<- A 5977 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5978 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5979 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5980 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5981 .if 0 5982 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5983 beq common_errDivideByZero 5984 .endif 5985 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5986 5987 orr r0, r0, r2 @ optional op; may set condition codes 5988 orr r1, r1, r3 @ result<- op, r0-r3 changed 5989 GET_INST_OPCODE(ip) @ extract opcode from rINST 5990 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5991 GOTO_OPCODE(ip) @ jump to next instruction 5992 /* 12-15 instructions */ 5993 5994 5995 5996/* ------------------------------ */ 5997 .balign 64 5998.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 5999/* File: armv6t2/OP_XOR_LONG_2ADDR.S */ 6000/* File: armv6t2/binopWide2addr.S */ 6001 /* 6002 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6003 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6004 * This could be an ARM instruction or a function call. (If the result 6005 * comes back in a register other than r0, you can override "result".) 6006 * 6007 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6008 * vCC (r1). Useful for integer division and modulus. 6009 * 6010 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6011 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6012 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6013 * rem-double/2addr 6014 */ 6015 /* binop/2addr vA, vB */ 6016 mov r1, rINST, lsr #12 @ r1<- B 6017 ubfx r9, rINST, #8, #4 @ r9<- A 6018 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6019 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6020 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6021 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6022 .if 0 6023 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6024 beq common_errDivideByZero 6025 .endif 6026 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6027 6028 eor r0, r0, r2 @ optional op; may set condition codes 6029 eor r1, r1, r3 @ result<- op, r0-r3 changed 6030 GET_INST_OPCODE(ip) @ extract opcode from rINST 6031 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6032 GOTO_OPCODE(ip) @ jump to next instruction 6033 /* 12-15 instructions */ 6034 6035 6036 6037/* ------------------------------ */ 6038 .balign 64 6039.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6040/* File: armv6t2/OP_SHL_LONG_2ADDR.S */ 6041 /* 6042 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6043 * 32-bit shift distance. 6044 */ 6045 /* shl-long/2addr vA, vB */ 6046 mov r3, rINST, lsr #12 @ r3<- B 6047 ubfx r9, rINST, #8, #4 @ r9<- A 6048 GET_VREG(r2, r3) @ r2<- vB 6049 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6050 and r2, r2, #63 @ r2<- r2 & 0x3f 6051 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6052 6053 mov r1, r1, asl r2 @ r1<- r1 << r2 6054 rsb r3, r2, #32 @ r3<- 32 - r2 6055 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6056 subs ip, r2, #32 @ ip<- r2 - 32 6057 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6058 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6059 mov r0, r0, asl r2 @ r0<- r0 << r2 6060 b .LOP_SHL_LONG_2ADDR_finish 6061 6062/* ------------------------------ */ 6063 .balign 64 6064.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6065/* File: armv6t2/OP_SHR_LONG_2ADDR.S */ 6066 /* 6067 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6068 * 32-bit shift distance. 6069 */ 6070 /* shr-long/2addr vA, vB */ 6071 mov r3, rINST, lsr #12 @ r3<- B 6072 ubfx r9, rINST, #8, #4 @ r9<- A 6073 GET_VREG(r2, r3) @ r2<- vB 6074 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6075 and r2, r2, #63 @ r2<- r2 & 0x3f 6076 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6077 6078 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6079 rsb r3, r2, #32 @ r3<- 32 - r2 6080 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6081 subs ip, r2, #32 @ ip<- r2 - 32 6082 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6083 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6084 mov r1, r1, asr r2 @ r1<- r1 >> r2 6085 b .LOP_SHR_LONG_2ADDR_finish 6086 6087/* ------------------------------ */ 6088 .balign 64 6089.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6090/* File: armv6t2/OP_USHR_LONG_2ADDR.S */ 6091 /* 6092 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6093 * 32-bit shift distance. 6094 */ 6095 /* ushr-long/2addr vA, vB */ 6096 mov r3, rINST, lsr #12 @ r3<- B 6097 ubfx r9, rINST, #8, #4 @ r9<- A 6098 GET_VREG(r2, r3) @ r2<- vB 6099 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6100 and r2, r2, #63 @ r2<- r2 & 0x3f 6101 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6102 6103 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6104 rsb r3, r2, #32 @ r3<- 32 - r2 6105 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6106 subs ip, r2, #32 @ ip<- r2 - 32 6107 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6108 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6109 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6110 b .LOP_USHR_LONG_2ADDR_finish 6111 6112/* ------------------------------ */ 6113 .balign 64 6114.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6115/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6116/* File: arm-vfp/fbinop2addr.S */ 6117 /* 6118 * Generic 32-bit floating point "/2addr" binary operation. Provide 6119 * an "instr" line that specifies an instruction that performs 6120 * "s2 = s0 op s1". 6121 * 6122 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6123 */ 6124 /* binop/2addr vA, vB */ 6125 mov r3, rINST, lsr #12 @ r3<- B 6126 mov r9, rINST, lsr #8 @ r9<- A+ 6127 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6128 and r9, r9, #15 @ r9<- A 6129 flds s1, [r3] @ s1<- vB 6130 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6131 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6132 flds s0, [r9] @ s0<- vA 6133 6134 fadds s2, s0, s1 @ s2<- op 6135 GET_INST_OPCODE(ip) @ extract opcode from rINST 6136 fsts s2, [r9] @ vAA<- s2 6137 GOTO_OPCODE(ip) @ jump to next instruction 6138 6139 6140/* ------------------------------ */ 6141 .balign 64 6142.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6143/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6144/* File: arm-vfp/fbinop2addr.S */ 6145 /* 6146 * Generic 32-bit floating point "/2addr" binary operation. Provide 6147 * an "instr" line that specifies an instruction that performs 6148 * "s2 = s0 op s1". 6149 * 6150 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6151 */ 6152 /* binop/2addr vA, vB */ 6153 mov r3, rINST, lsr #12 @ r3<- B 6154 mov r9, rINST, lsr #8 @ r9<- A+ 6155 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6156 and r9, r9, #15 @ r9<- A 6157 flds s1, [r3] @ s1<- vB 6158 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6159 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6160 flds s0, [r9] @ s0<- vA 6161 6162 fsubs s2, s0, s1 @ s2<- op 6163 GET_INST_OPCODE(ip) @ extract opcode from rINST 6164 fsts s2, [r9] @ vAA<- s2 6165 GOTO_OPCODE(ip) @ jump to next instruction 6166 6167 6168/* ------------------------------ */ 6169 .balign 64 6170.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6171/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6172/* File: arm-vfp/fbinop2addr.S */ 6173 /* 6174 * Generic 32-bit floating point "/2addr" binary operation. Provide 6175 * an "instr" line that specifies an instruction that performs 6176 * "s2 = s0 op s1". 6177 * 6178 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6179 */ 6180 /* binop/2addr vA, vB */ 6181 mov r3, rINST, lsr #12 @ r3<- B 6182 mov r9, rINST, lsr #8 @ r9<- A+ 6183 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6184 and r9, r9, #15 @ r9<- A 6185 flds s1, [r3] @ s1<- vB 6186 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6187 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6188 flds s0, [r9] @ s0<- vA 6189 6190 fmuls s2, s0, s1 @ s2<- op 6191 GET_INST_OPCODE(ip) @ extract opcode from rINST 6192 fsts s2, [r9] @ vAA<- s2 6193 GOTO_OPCODE(ip) @ jump to next instruction 6194 6195 6196/* ------------------------------ */ 6197 .balign 64 6198.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6199/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6200/* File: arm-vfp/fbinop2addr.S */ 6201 /* 6202 * Generic 32-bit floating point "/2addr" binary operation. Provide 6203 * an "instr" line that specifies an instruction that performs 6204 * "s2 = s0 op s1". 6205 * 6206 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6207 */ 6208 /* binop/2addr vA, vB */ 6209 mov r3, rINST, lsr #12 @ r3<- B 6210 mov r9, rINST, lsr #8 @ r9<- A+ 6211 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6212 and r9, r9, #15 @ r9<- A 6213 flds s1, [r3] @ s1<- vB 6214 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6215 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6216 flds s0, [r9] @ s0<- vA 6217 6218 fdivs s2, s0, s1 @ s2<- op 6219 GET_INST_OPCODE(ip) @ extract opcode from rINST 6220 fsts s2, [r9] @ vAA<- s2 6221 GOTO_OPCODE(ip) @ jump to next instruction 6222 6223 6224/* ------------------------------ */ 6225 .balign 64 6226.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6227/* File: armv6t2/OP_REM_FLOAT_2ADDR.S */ 6228/* EABI doesn't define a float remainder function, but libm does */ 6229/* File: armv6t2/binop2addr.S */ 6230 /* 6231 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6232 * that specifies an instruction that performs "result = r0 op r1". 6233 * This could be an ARM instruction or a function call. (If the result 6234 * comes back in a register other than r0, you can override "result".) 6235 * 6236 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6237 * vCC (r1). Useful for integer division and modulus. 6238 * 6239 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6240 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6241 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6242 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6243 */ 6244 /* binop/2addr vA, vB */ 6245 mov r3, rINST, lsr #12 @ r3<- B 6246 ubfx r9, rINST, #8, #4 @ r9<- A 6247 GET_VREG(r1, r3) @ r1<- vB 6248 GET_VREG(r0, r9) @ r0<- vA 6249 .if 0 6250 cmp r1, #0 @ is second operand zero? 6251 beq common_errDivideByZero 6252 .endif 6253 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6254 6255 @ optional op; may set condition codes 6256 bl fmodf @ r0<- op, r0-r3 changed 6257 GET_INST_OPCODE(ip) @ extract opcode from rINST 6258 SET_VREG(r0, r9) @ vAA<- r0 6259 GOTO_OPCODE(ip) @ jump to next instruction 6260 /* 10-13 instructions */ 6261 6262 6263 6264/* ------------------------------ */ 6265 .balign 64 6266.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6267/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6268/* File: arm-vfp/fbinopWide2addr.S */ 6269 /* 6270 * Generic 64-bit floating point "/2addr" binary operation. Provide 6271 * an "instr" line that specifies an instruction that performs 6272 * "d2 = d0 op d1". 6273 * 6274 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6275 * div-double/2addr 6276 */ 6277 /* binop/2addr vA, vB */ 6278 mov r3, rINST, lsr #12 @ r3<- B 6279 mov r9, rINST, lsr #8 @ r9<- A+ 6280 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6281 and r9, r9, #15 @ r9<- A 6282 fldd d1, [r3] @ d1<- vB 6283 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6284 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6285 fldd d0, [r9] @ d0<- vA 6286 6287 faddd d2, d0, d1 @ d2<- op 6288 GET_INST_OPCODE(ip) @ extract opcode from rINST 6289 fstd d2, [r9] @ vAA<- d2 6290 GOTO_OPCODE(ip) @ jump to next instruction 6291 6292 6293/* ------------------------------ */ 6294 .balign 64 6295.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6296/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6297/* File: arm-vfp/fbinopWide2addr.S */ 6298 /* 6299 * Generic 64-bit floating point "/2addr" binary operation. Provide 6300 * an "instr" line that specifies an instruction that performs 6301 * "d2 = d0 op d1". 6302 * 6303 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6304 * div-double/2addr 6305 */ 6306 /* binop/2addr vA, vB */ 6307 mov r3, rINST, lsr #12 @ r3<- B 6308 mov r9, rINST, lsr #8 @ r9<- A+ 6309 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6310 and r9, r9, #15 @ r9<- A 6311 fldd d1, [r3] @ d1<- vB 6312 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6313 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6314 fldd d0, [r9] @ d0<- vA 6315 6316 fsubd d2, d0, d1 @ d2<- op 6317 GET_INST_OPCODE(ip) @ extract opcode from rINST 6318 fstd d2, [r9] @ vAA<- d2 6319 GOTO_OPCODE(ip) @ jump to next instruction 6320 6321 6322/* ------------------------------ */ 6323 .balign 64 6324.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6325/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6326/* File: arm-vfp/fbinopWide2addr.S */ 6327 /* 6328 * Generic 64-bit floating point "/2addr" binary operation. Provide 6329 * an "instr" line that specifies an instruction that performs 6330 * "d2 = d0 op d1". 6331 * 6332 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6333 * div-double/2addr 6334 */ 6335 /* binop/2addr vA, vB */ 6336 mov r3, rINST, lsr #12 @ r3<- B 6337 mov r9, rINST, lsr #8 @ r9<- A+ 6338 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6339 and r9, r9, #15 @ r9<- A 6340 fldd d1, [r3] @ d1<- vB 6341 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6342 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6343 fldd d0, [r9] @ d0<- vA 6344 6345 fmuld d2, d0, d1 @ d2<- op 6346 GET_INST_OPCODE(ip) @ extract opcode from rINST 6347 fstd d2, [r9] @ vAA<- d2 6348 GOTO_OPCODE(ip) @ jump to next instruction 6349 6350 6351/* ------------------------------ */ 6352 .balign 64 6353.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6354/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6355/* File: arm-vfp/fbinopWide2addr.S */ 6356 /* 6357 * Generic 64-bit floating point "/2addr" binary operation. Provide 6358 * an "instr" line that specifies an instruction that performs 6359 * "d2 = d0 op d1". 6360 * 6361 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6362 * div-double/2addr 6363 */ 6364 /* binop/2addr vA, vB */ 6365 mov r3, rINST, lsr #12 @ r3<- B 6366 mov r9, rINST, lsr #8 @ r9<- A+ 6367 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6368 and r9, r9, #15 @ r9<- A 6369 fldd d1, [r3] @ d1<- vB 6370 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6371 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6372 fldd d0, [r9] @ d0<- vA 6373 6374 fdivd d2, d0, d1 @ d2<- op 6375 GET_INST_OPCODE(ip) @ extract opcode from rINST 6376 fstd d2, [r9] @ vAA<- d2 6377 GOTO_OPCODE(ip) @ jump to next instruction 6378 6379 6380/* ------------------------------ */ 6381 .balign 64 6382.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6383/* File: armv6t2/OP_REM_DOUBLE_2ADDR.S */ 6384/* EABI doesn't define a double remainder function, but libm does */ 6385/* File: armv6t2/binopWide2addr.S */ 6386 /* 6387 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6388 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6389 * This could be an ARM instruction or a function call. (If the result 6390 * comes back in a register other than r0, you can override "result".) 6391 * 6392 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6393 * vCC (r1). Useful for integer division and modulus. 6394 * 6395 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6396 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6397 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6398 * rem-double/2addr 6399 */ 6400 /* binop/2addr vA, vB */ 6401 mov r1, rINST, lsr #12 @ r1<- B 6402 ubfx r9, rINST, #8, #4 @ r9<- A 6403 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6404 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6405 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6406 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6407 .if 0 6408 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6409 beq common_errDivideByZero 6410 .endif 6411 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6412 6413 @ optional op; may set condition codes 6414 bl fmod @ result<- op, r0-r3 changed 6415 GET_INST_OPCODE(ip) @ extract opcode from rINST 6416 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6417 GOTO_OPCODE(ip) @ jump to next instruction 6418 /* 12-15 instructions */ 6419 6420 6421 6422/* ------------------------------ */ 6423 .balign 64 6424.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6425/* File: armv6t2/OP_ADD_INT_LIT16.S */ 6426/* File: armv6t2/binopLit16.S */ 6427 /* 6428 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6429 * that specifies an instruction that performs "result = r0 op r1". 6430 * This could be an ARM instruction or a function call. (If the result 6431 * comes back in a register other than r0, you can override "result".) 6432 * 6433 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6434 * vCC (r1). Useful for integer division and modulus. 6435 * 6436 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6437 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6438 */ 6439 /* binop/lit16 vA, vB, #+CCCC */ 6440 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6441 mov r2, rINST, lsr #12 @ r2<- B 6442 ubfx r9, rINST, #8, #4 @ r9<- A 6443 GET_VREG(r0, r2) @ r0<- vB 6444 .if 0 6445 cmp r1, #0 @ is second operand zero? 6446 beq common_errDivideByZero 6447 .endif 6448 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6449 6450 add r0, r0, r1 @ r0<- op, r0-r3 changed 6451 GET_INST_OPCODE(ip) @ extract opcode from rINST 6452 SET_VREG(r0, r9) @ vAA<- r0 6453 GOTO_OPCODE(ip) @ jump to next instruction 6454 /* 10-13 instructions */ 6455 6456 6457 6458/* ------------------------------ */ 6459 .balign 64 6460.L_OP_RSUB_INT: /* 0xd1 */ 6461/* File: armv6t2/OP_RSUB_INT.S */ 6462/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6463/* File: armv6t2/binopLit16.S */ 6464 /* 6465 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6466 * that specifies an instruction that performs "result = r0 op r1". 6467 * This could be an ARM instruction or a function call. (If the result 6468 * comes back in a register other than r0, you can override "result".) 6469 * 6470 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6471 * vCC (r1). Useful for integer division and modulus. 6472 * 6473 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6474 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6475 */ 6476 /* binop/lit16 vA, vB, #+CCCC */ 6477 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6478 mov r2, rINST, lsr #12 @ r2<- B 6479 ubfx r9, rINST, #8, #4 @ r9<- A 6480 GET_VREG(r0, r2) @ r0<- vB 6481 .if 0 6482 cmp r1, #0 @ is second operand zero? 6483 beq common_errDivideByZero 6484 .endif 6485 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6486 6487 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6488 GET_INST_OPCODE(ip) @ extract opcode from rINST 6489 SET_VREG(r0, r9) @ vAA<- r0 6490 GOTO_OPCODE(ip) @ jump to next instruction 6491 /* 10-13 instructions */ 6492 6493 6494 6495/* ------------------------------ */ 6496 .balign 64 6497.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6498/* File: armv6t2/OP_MUL_INT_LIT16.S */ 6499/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6500/* File: armv6t2/binopLit16.S */ 6501 /* 6502 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6503 * that specifies an instruction that performs "result = r0 op r1". 6504 * This could be an ARM instruction or a function call. (If the result 6505 * comes back in a register other than r0, you can override "result".) 6506 * 6507 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6508 * vCC (r1). Useful for integer division and modulus. 6509 * 6510 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6511 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6512 */ 6513 /* binop/lit16 vA, vB, #+CCCC */ 6514 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6515 mov r2, rINST, lsr #12 @ r2<- B 6516 ubfx r9, rINST, #8, #4 @ r9<- A 6517 GET_VREG(r0, r2) @ r0<- vB 6518 .if 0 6519 cmp r1, #0 @ is second operand zero? 6520 beq common_errDivideByZero 6521 .endif 6522 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6523 6524 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6525 GET_INST_OPCODE(ip) @ extract opcode from rINST 6526 SET_VREG(r0, r9) @ vAA<- r0 6527 GOTO_OPCODE(ip) @ jump to next instruction 6528 /* 10-13 instructions */ 6529 6530 6531 6532/* ------------------------------ */ 6533 .balign 64 6534.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6535/* File: armv6t2/OP_DIV_INT_LIT16.S */ 6536/* File: armv6t2/binopLit16.S */ 6537 /* 6538 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6539 * that specifies an instruction that performs "result = r0 op r1". 6540 * This could be an ARM instruction or a function call. (If the result 6541 * comes back in a register other than r0, you can override "result".) 6542 * 6543 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6544 * vCC (r1). Useful for integer division and modulus. 6545 * 6546 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6547 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6548 */ 6549 /* binop/lit16 vA, vB, #+CCCC */ 6550 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6551 mov r2, rINST, lsr #12 @ r2<- B 6552 ubfx r9, rINST, #8, #4 @ r9<- A 6553 GET_VREG(r0, r2) @ r0<- vB 6554 .if 1 6555 cmp r1, #0 @ is second operand zero? 6556 beq common_errDivideByZero 6557 .endif 6558 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6559 6560 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6561 GET_INST_OPCODE(ip) @ extract opcode from rINST 6562 SET_VREG(r0, r9) @ vAA<- r0 6563 GOTO_OPCODE(ip) @ jump to next instruction 6564 /* 10-13 instructions */ 6565 6566 6567 6568/* ------------------------------ */ 6569 .balign 64 6570.L_OP_REM_INT_LIT16: /* 0xd4 */ 6571/* File: armv6t2/OP_REM_INT_LIT16.S */ 6572/* idivmod returns quotient in r0 and remainder in r1 */ 6573/* File: armv6t2/binopLit16.S */ 6574 /* 6575 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6576 * that specifies an instruction that performs "result = r0 op r1". 6577 * This could be an ARM instruction or a function call. (If the result 6578 * comes back in a register other than r0, you can override "result".) 6579 * 6580 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6581 * vCC (r1). Useful for integer division and modulus. 6582 * 6583 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6584 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6585 */ 6586 /* binop/lit16 vA, vB, #+CCCC */ 6587 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6588 mov r2, rINST, lsr #12 @ r2<- B 6589 ubfx r9, rINST, #8, #4 @ r9<- A 6590 GET_VREG(r0, r2) @ r0<- vB 6591 .if 1 6592 cmp r1, #0 @ is second operand zero? 6593 beq common_errDivideByZero 6594 .endif 6595 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6596 6597 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6598 GET_INST_OPCODE(ip) @ extract opcode from rINST 6599 SET_VREG(r1, r9) @ vAA<- r1 6600 GOTO_OPCODE(ip) @ jump to next instruction 6601 /* 10-13 instructions */ 6602 6603 6604 6605/* ------------------------------ */ 6606 .balign 64 6607.L_OP_AND_INT_LIT16: /* 0xd5 */ 6608/* File: armv6t2/OP_AND_INT_LIT16.S */ 6609/* File: armv6t2/binopLit16.S */ 6610 /* 6611 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6612 * that specifies an instruction that performs "result = r0 op r1". 6613 * This could be an ARM instruction or a function call. (If the result 6614 * comes back in a register other than r0, you can override "result".) 6615 * 6616 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6617 * vCC (r1). Useful for integer division and modulus. 6618 * 6619 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6620 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6621 */ 6622 /* binop/lit16 vA, vB, #+CCCC */ 6623 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6624 mov r2, rINST, lsr #12 @ r2<- B 6625 ubfx r9, rINST, #8, #4 @ r9<- A 6626 GET_VREG(r0, r2) @ r0<- vB 6627 .if 0 6628 cmp r1, #0 @ is second operand zero? 6629 beq common_errDivideByZero 6630 .endif 6631 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6632 6633 and r0, r0, r1 @ r0<- op, r0-r3 changed 6634 GET_INST_OPCODE(ip) @ extract opcode from rINST 6635 SET_VREG(r0, r9) @ vAA<- r0 6636 GOTO_OPCODE(ip) @ jump to next instruction 6637 /* 10-13 instructions */ 6638 6639 6640 6641/* ------------------------------ */ 6642 .balign 64 6643.L_OP_OR_INT_LIT16: /* 0xd6 */ 6644/* File: armv6t2/OP_OR_INT_LIT16.S */ 6645/* File: armv6t2/binopLit16.S */ 6646 /* 6647 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6648 * that specifies an instruction that performs "result = r0 op r1". 6649 * This could be an ARM instruction or a function call. (If the result 6650 * comes back in a register other than r0, you can override "result".) 6651 * 6652 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6653 * vCC (r1). Useful for integer division and modulus. 6654 * 6655 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6656 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6657 */ 6658 /* binop/lit16 vA, vB, #+CCCC */ 6659 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6660 mov r2, rINST, lsr #12 @ r2<- B 6661 ubfx r9, rINST, #8, #4 @ r9<- A 6662 GET_VREG(r0, r2) @ r0<- vB 6663 .if 0 6664 cmp r1, #0 @ is second operand zero? 6665 beq common_errDivideByZero 6666 .endif 6667 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6668 6669 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6670 GET_INST_OPCODE(ip) @ extract opcode from rINST 6671 SET_VREG(r0, r9) @ vAA<- r0 6672 GOTO_OPCODE(ip) @ jump to next instruction 6673 /* 10-13 instructions */ 6674 6675 6676 6677/* ------------------------------ */ 6678 .balign 64 6679.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6680/* File: armv6t2/OP_XOR_INT_LIT16.S */ 6681/* File: armv6t2/binopLit16.S */ 6682 /* 6683 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6684 * that specifies an instruction that performs "result = r0 op r1". 6685 * This could be an ARM instruction or a function call. (If the result 6686 * comes back in a register other than r0, you can override "result".) 6687 * 6688 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6689 * vCC (r1). Useful for integer division and modulus. 6690 * 6691 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6692 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6693 */ 6694 /* binop/lit16 vA, vB, #+CCCC */ 6695 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6696 mov r2, rINST, lsr #12 @ r2<- B 6697 ubfx r9, rINST, #8, #4 @ r9<- A 6698 GET_VREG(r0, r2) @ r0<- vB 6699 .if 0 6700 cmp r1, #0 @ is second operand zero? 6701 beq common_errDivideByZero 6702 .endif 6703 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6704 6705 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6706 GET_INST_OPCODE(ip) @ extract opcode from rINST 6707 SET_VREG(r0, r9) @ vAA<- r0 6708 GOTO_OPCODE(ip) @ jump to next instruction 6709 /* 10-13 instructions */ 6710 6711 6712 6713/* ------------------------------ */ 6714 .balign 64 6715.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6716/* File: armv5te/OP_ADD_INT_LIT8.S */ 6717/* File: armv5te/binopLit8.S */ 6718 /* 6719 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6720 * that specifies an instruction that performs "result = r0 op r1". 6721 * This could be an ARM instruction or a function call. (If the result 6722 * comes back in a register other than r0, you can override "result".) 6723 * 6724 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6725 * vCC (r1). Useful for integer division and modulus. 6726 * 6727 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6728 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6729 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6730 */ 6731 /* binop/lit8 vAA, vBB, #+CC */ 6732 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6733 mov r9, rINST, lsr #8 @ r9<- AA 6734 and r2, r3, #255 @ r2<- BB 6735 GET_VREG(r0, r2) @ r0<- vBB 6736 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6737 .if 0 6738 @cmp r1, #0 @ is second operand zero? 6739 beq common_errDivideByZero 6740 .endif 6741 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6742 6743 @ optional op; may set condition codes 6744 add r0, r0, r1 @ r0<- op, r0-r3 changed 6745 GET_INST_OPCODE(ip) @ extract opcode from rINST 6746 SET_VREG(r0, r9) @ vAA<- r0 6747 GOTO_OPCODE(ip) @ jump to next instruction 6748 /* 10-12 instructions */ 6749 6750 6751 6752/* ------------------------------ */ 6753 .balign 64 6754.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6755/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6756/* File: armv5te/binopLit8.S */ 6757 /* 6758 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6759 * that specifies an instruction that performs "result = r0 op r1". 6760 * This could be an ARM instruction or a function call. (If the result 6761 * comes back in a register other than r0, you can override "result".) 6762 * 6763 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6764 * vCC (r1). Useful for integer division and modulus. 6765 * 6766 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6767 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6768 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6769 */ 6770 /* binop/lit8 vAA, vBB, #+CC */ 6771 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6772 mov r9, rINST, lsr #8 @ r9<- AA 6773 and r2, r3, #255 @ r2<- BB 6774 GET_VREG(r0, r2) @ r0<- vBB 6775 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6776 .if 0 6777 @cmp r1, #0 @ is second operand zero? 6778 beq common_errDivideByZero 6779 .endif 6780 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6781 6782 @ optional op; may set condition codes 6783 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6784 GET_INST_OPCODE(ip) @ extract opcode from rINST 6785 SET_VREG(r0, r9) @ vAA<- r0 6786 GOTO_OPCODE(ip) @ jump to next instruction 6787 /* 10-12 instructions */ 6788 6789 6790 6791/* ------------------------------ */ 6792 .balign 64 6793.L_OP_MUL_INT_LIT8: /* 0xda */ 6794/* File: armv5te/OP_MUL_INT_LIT8.S */ 6795/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6796/* File: armv5te/binopLit8.S */ 6797 /* 6798 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6799 * that specifies an instruction that performs "result = r0 op r1". 6800 * This could be an ARM instruction or a function call. (If the result 6801 * comes back in a register other than r0, you can override "result".) 6802 * 6803 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6804 * vCC (r1). Useful for integer division and modulus. 6805 * 6806 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6807 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6808 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6809 */ 6810 /* binop/lit8 vAA, vBB, #+CC */ 6811 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6812 mov r9, rINST, lsr #8 @ r9<- AA 6813 and r2, r3, #255 @ r2<- BB 6814 GET_VREG(r0, r2) @ r0<- vBB 6815 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6816 .if 0 6817 @cmp r1, #0 @ is second operand zero? 6818 beq common_errDivideByZero 6819 .endif 6820 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6821 6822 @ optional op; may set condition codes 6823 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6824 GET_INST_OPCODE(ip) @ extract opcode from rINST 6825 SET_VREG(r0, r9) @ vAA<- r0 6826 GOTO_OPCODE(ip) @ jump to next instruction 6827 /* 10-12 instructions */ 6828 6829 6830 6831/* ------------------------------ */ 6832 .balign 64 6833.L_OP_DIV_INT_LIT8: /* 0xdb */ 6834/* File: armv5te/OP_DIV_INT_LIT8.S */ 6835/* File: armv5te/binopLit8.S */ 6836 /* 6837 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6838 * that specifies an instruction that performs "result = r0 op r1". 6839 * This could be an ARM instruction or a function call. (If the result 6840 * comes back in a register other than r0, you can override "result".) 6841 * 6842 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6843 * vCC (r1). Useful for integer division and modulus. 6844 * 6845 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6846 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6847 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6848 */ 6849 /* binop/lit8 vAA, vBB, #+CC */ 6850 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6851 mov r9, rINST, lsr #8 @ r9<- AA 6852 and r2, r3, #255 @ r2<- BB 6853 GET_VREG(r0, r2) @ r0<- vBB 6854 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6855 .if 1 6856 @cmp r1, #0 @ is second operand zero? 6857 beq common_errDivideByZero 6858 .endif 6859 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6860 6861 @ optional op; may set condition codes 6862 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6863 GET_INST_OPCODE(ip) @ extract opcode from rINST 6864 SET_VREG(r0, r9) @ vAA<- r0 6865 GOTO_OPCODE(ip) @ jump to next instruction 6866 /* 10-12 instructions */ 6867 6868 6869 6870/* ------------------------------ */ 6871 .balign 64 6872.L_OP_REM_INT_LIT8: /* 0xdc */ 6873/* File: armv5te/OP_REM_INT_LIT8.S */ 6874/* idivmod returns quotient in r0 and remainder in r1 */ 6875/* File: armv5te/binopLit8.S */ 6876 /* 6877 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6878 * that specifies an instruction that performs "result = r0 op r1". 6879 * This could be an ARM instruction or a function call. (If the result 6880 * comes back in a register other than r0, you can override "result".) 6881 * 6882 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6883 * vCC (r1). Useful for integer division and modulus. 6884 * 6885 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6886 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6887 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6888 */ 6889 /* binop/lit8 vAA, vBB, #+CC */ 6890 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6891 mov r9, rINST, lsr #8 @ r9<- AA 6892 and r2, r3, #255 @ r2<- BB 6893 GET_VREG(r0, r2) @ r0<- vBB 6894 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6895 .if 1 6896 @cmp r1, #0 @ is second operand zero? 6897 beq common_errDivideByZero 6898 .endif 6899 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6900 6901 @ optional op; may set condition codes 6902 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6903 GET_INST_OPCODE(ip) @ extract opcode from rINST 6904 SET_VREG(r1, r9) @ vAA<- r1 6905 GOTO_OPCODE(ip) @ jump to next instruction 6906 /* 10-12 instructions */ 6907 6908 6909 6910/* ------------------------------ */ 6911 .balign 64 6912.L_OP_AND_INT_LIT8: /* 0xdd */ 6913/* File: armv5te/OP_AND_INT_LIT8.S */ 6914/* File: armv5te/binopLit8.S */ 6915 /* 6916 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6917 * that specifies an instruction that performs "result = r0 op r1". 6918 * This could be an ARM instruction or a function call. (If the result 6919 * comes back in a register other than r0, you can override "result".) 6920 * 6921 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6922 * vCC (r1). Useful for integer division and modulus. 6923 * 6924 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6925 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6926 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6927 */ 6928 /* binop/lit8 vAA, vBB, #+CC */ 6929 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6930 mov r9, rINST, lsr #8 @ r9<- AA 6931 and r2, r3, #255 @ r2<- BB 6932 GET_VREG(r0, r2) @ r0<- vBB 6933 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6934 .if 0 6935 @cmp r1, #0 @ is second operand zero? 6936 beq common_errDivideByZero 6937 .endif 6938 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6939 6940 @ optional op; may set condition codes 6941 and r0, r0, r1 @ r0<- op, r0-r3 changed 6942 GET_INST_OPCODE(ip) @ extract opcode from rINST 6943 SET_VREG(r0, r9) @ vAA<- r0 6944 GOTO_OPCODE(ip) @ jump to next instruction 6945 /* 10-12 instructions */ 6946 6947 6948 6949/* ------------------------------ */ 6950 .balign 64 6951.L_OP_OR_INT_LIT8: /* 0xde */ 6952/* File: armv5te/OP_OR_INT_LIT8.S */ 6953/* File: armv5te/binopLit8.S */ 6954 /* 6955 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6956 * that specifies an instruction that performs "result = r0 op r1". 6957 * This could be an ARM instruction or a function call. (If the result 6958 * comes back in a register other than r0, you can override "result".) 6959 * 6960 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6961 * vCC (r1). Useful for integer division and modulus. 6962 * 6963 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6964 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6965 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6966 */ 6967 /* binop/lit8 vAA, vBB, #+CC */ 6968 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6969 mov r9, rINST, lsr #8 @ r9<- AA 6970 and r2, r3, #255 @ r2<- BB 6971 GET_VREG(r0, r2) @ r0<- vBB 6972 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6973 .if 0 6974 @cmp r1, #0 @ is second operand zero? 6975 beq common_errDivideByZero 6976 .endif 6977 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6978 6979 @ optional op; may set condition codes 6980 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6981 GET_INST_OPCODE(ip) @ extract opcode from rINST 6982 SET_VREG(r0, r9) @ vAA<- r0 6983 GOTO_OPCODE(ip) @ jump to next instruction 6984 /* 10-12 instructions */ 6985 6986 6987 6988/* ------------------------------ */ 6989 .balign 64 6990.L_OP_XOR_INT_LIT8: /* 0xdf */ 6991/* File: armv5te/OP_XOR_INT_LIT8.S */ 6992/* File: armv5te/binopLit8.S */ 6993 /* 6994 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6995 * that specifies an instruction that performs "result = r0 op r1". 6996 * This could be an ARM instruction or a function call. (If the result 6997 * comes back in a register other than r0, you can override "result".) 6998 * 6999 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7000 * vCC (r1). Useful for integer division and modulus. 7001 * 7002 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7003 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7004 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7005 */ 7006 /* binop/lit8 vAA, vBB, #+CC */ 7007 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7008 mov r9, rINST, lsr #8 @ r9<- AA 7009 and r2, r3, #255 @ r2<- BB 7010 GET_VREG(r0, r2) @ r0<- vBB 7011 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7012 .if 0 7013 @cmp r1, #0 @ is second operand zero? 7014 beq common_errDivideByZero 7015 .endif 7016 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7017 7018 @ optional op; may set condition codes 7019 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7020 GET_INST_OPCODE(ip) @ extract opcode from rINST 7021 SET_VREG(r0, r9) @ vAA<- r0 7022 GOTO_OPCODE(ip) @ jump to next instruction 7023 /* 10-12 instructions */ 7024 7025 7026 7027/* ------------------------------ */ 7028 .balign 64 7029.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7030/* File: armv5te/OP_SHL_INT_LIT8.S */ 7031/* File: armv5te/binopLit8.S */ 7032 /* 7033 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7034 * that specifies an instruction that performs "result = r0 op r1". 7035 * This could be an ARM instruction or a function call. (If the result 7036 * comes back in a register other than r0, you can override "result".) 7037 * 7038 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7039 * vCC (r1). Useful for integer division and modulus. 7040 * 7041 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7042 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7043 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7044 */ 7045 /* binop/lit8 vAA, vBB, #+CC */ 7046 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7047 mov r9, rINST, lsr #8 @ r9<- AA 7048 and r2, r3, #255 @ r2<- BB 7049 GET_VREG(r0, r2) @ r0<- vBB 7050 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7051 .if 0 7052 @cmp r1, #0 @ is second operand zero? 7053 beq common_errDivideByZero 7054 .endif 7055 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7056 7057 and r1, r1, #31 @ optional op; may set condition codes 7058 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7059 GET_INST_OPCODE(ip) @ extract opcode from rINST 7060 SET_VREG(r0, r9) @ vAA<- r0 7061 GOTO_OPCODE(ip) @ jump to next instruction 7062 /* 10-12 instructions */ 7063 7064 7065 7066/* ------------------------------ */ 7067 .balign 64 7068.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7069/* File: armv5te/OP_SHR_INT_LIT8.S */ 7070/* File: armv5te/binopLit8.S */ 7071 /* 7072 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7073 * that specifies an instruction that performs "result = r0 op r1". 7074 * This could be an ARM instruction or a function call. (If the result 7075 * comes back in a register other than r0, you can override "result".) 7076 * 7077 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7078 * vCC (r1). Useful for integer division and modulus. 7079 * 7080 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7081 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7082 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7083 */ 7084 /* binop/lit8 vAA, vBB, #+CC */ 7085 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7086 mov r9, rINST, lsr #8 @ r9<- AA 7087 and r2, r3, #255 @ r2<- BB 7088 GET_VREG(r0, r2) @ r0<- vBB 7089 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7090 .if 0 7091 @cmp r1, #0 @ is second operand zero? 7092 beq common_errDivideByZero 7093 .endif 7094 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7095 7096 and r1, r1, #31 @ optional op; may set condition codes 7097 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7098 GET_INST_OPCODE(ip) @ extract opcode from rINST 7099 SET_VREG(r0, r9) @ vAA<- r0 7100 GOTO_OPCODE(ip) @ jump to next instruction 7101 /* 10-12 instructions */ 7102 7103 7104 7105/* ------------------------------ */ 7106 .balign 64 7107.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7108/* File: armv5te/OP_USHR_INT_LIT8.S */ 7109/* File: armv5te/binopLit8.S */ 7110 /* 7111 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7112 * that specifies an instruction that performs "result = r0 op r1". 7113 * This could be an ARM instruction or a function call. (If the result 7114 * comes back in a register other than r0, you can override "result".) 7115 * 7116 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7117 * vCC (r1). Useful for integer division and modulus. 7118 * 7119 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7120 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7121 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7122 */ 7123 /* binop/lit8 vAA, vBB, #+CC */ 7124 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7125 mov r9, rINST, lsr #8 @ r9<- AA 7126 and r2, r3, #255 @ r2<- BB 7127 GET_VREG(r0, r2) @ r0<- vBB 7128 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7129 .if 0 7130 @cmp r1, #0 @ is second operand zero? 7131 beq common_errDivideByZero 7132 .endif 7133 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7134 7135 and r1, r1, #31 @ optional op; may set condition codes 7136 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7137 GET_INST_OPCODE(ip) @ extract opcode from rINST 7138 SET_VREG(r0, r9) @ vAA<- r0 7139 GOTO_OPCODE(ip) @ jump to next instruction 7140 /* 10-12 instructions */ 7141 7142 7143 7144/* ------------------------------ */ 7145 .balign 64 7146.L_OP_UNUSED_E3: /* 0xe3 */ 7147/* File: armv5te/OP_UNUSED_E3.S */ 7148/* File: armv5te/unused.S */ 7149 bl common_abort 7150 7151 7152 7153/* ------------------------------ */ 7154 .balign 64 7155.L_OP_UNUSED_E4: /* 0xe4 */ 7156/* File: armv5te/OP_UNUSED_E4.S */ 7157/* File: armv5te/unused.S */ 7158 bl common_abort 7159 7160 7161 7162/* ------------------------------ */ 7163 .balign 64 7164.L_OP_UNUSED_E5: /* 0xe5 */ 7165/* File: armv5te/OP_UNUSED_E5.S */ 7166/* File: armv5te/unused.S */ 7167 bl common_abort 7168 7169 7170 7171/* ------------------------------ */ 7172 .balign 64 7173.L_OP_UNUSED_E6: /* 0xe6 */ 7174/* File: armv5te/OP_UNUSED_E6.S */ 7175/* File: armv5te/unused.S */ 7176 bl common_abort 7177 7178 7179 7180/* ------------------------------ */ 7181 .balign 64 7182.L_OP_UNUSED_E7: /* 0xe7 */ 7183/* File: armv5te/OP_UNUSED_E7.S */ 7184/* File: armv5te/unused.S */ 7185 bl common_abort 7186 7187 7188 7189/* ------------------------------ */ 7190 .balign 64 7191.L_OP_UNUSED_E8: /* 0xe8 */ 7192/* File: armv5te/OP_UNUSED_E8.S */ 7193/* File: armv5te/unused.S */ 7194 bl common_abort 7195 7196 7197 7198/* ------------------------------ */ 7199 .balign 64 7200.L_OP_UNUSED_E9: /* 0xe9 */ 7201/* File: armv5te/OP_UNUSED_E9.S */ 7202/* File: armv5te/unused.S */ 7203 bl common_abort 7204 7205 7206 7207/* ------------------------------ */ 7208 .balign 64 7209.L_OP_UNUSED_EA: /* 0xea */ 7210/* File: armv5te/OP_UNUSED_EA.S */ 7211/* File: armv5te/unused.S */ 7212 bl common_abort 7213 7214 7215 7216/* ------------------------------ */ 7217 .balign 64 7218.L_OP_UNUSED_EB: /* 0xeb */ 7219/* File: armv5te/OP_UNUSED_EB.S */ 7220/* File: armv5te/unused.S */ 7221 bl common_abort 7222 7223 7224 7225/* ------------------------------ */ 7226 .balign 64 7227.L_OP_BREAKPOINT: /* 0xec */ 7228/* File: armv5te/OP_BREAKPOINT.S */ 7229/* File: armv5te/unused.S */ 7230 bl common_abort 7231 7232 7233 7234/* ------------------------------ */ 7235 .balign 64 7236.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7237/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7238 /* 7239 * Handle a throw-verification-error instruction. This throws an 7240 * exception for an error discovered during verification. The 7241 * exception is indicated by AA, with some detail provided by BBBB. 7242 */ 7243 /* op AA, ref@BBBB */ 7244 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7245 FETCH(r2, 1) @ r2<- BBBB 7246 EXPORT_PC() @ export the PC 7247 mov r1, rINST, lsr #8 @ r1<- AA 7248 bl dvmThrowVerificationError @ always throws 7249 b common_exceptionThrown @ handle exception 7250 7251 7252/* ------------------------------ */ 7253 .balign 64 7254.L_OP_EXECUTE_INLINE: /* 0xee */ 7255/* File: armv5te/OP_EXECUTE_INLINE.S */ 7256 /* 7257 * Execute a "native inline" instruction. 7258 * 7259 * We need to call an InlineOp4Func: 7260 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7261 * 7262 * The first four args are in r0-r3, pointer to return value storage 7263 * is on the stack. The function's return value is a flag that tells 7264 * us if an exception was thrown. 7265 */ 7266 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7267 FETCH(r10, 1) @ r10<- BBBB 7268 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7269 EXPORT_PC() @ can throw 7270 sub sp, sp, #8 @ make room for arg, +64 bit align 7271 mov r0, rINST, lsr #12 @ r0<- B 7272 str r1, [sp] @ push &glue->retval 7273 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7274 add sp, sp, #8 @ pop stack 7275 cmp r0, #0 @ test boolean result of inline 7276 beq common_exceptionThrown @ returned false, handle exception 7277 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7278 GET_INST_OPCODE(ip) @ extract opcode from rINST 7279 GOTO_OPCODE(ip) @ jump to next instruction 7280 7281/* ------------------------------ */ 7282 .balign 64 7283.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7284/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7285 /* 7286 * Execute a "native inline" instruction, using "/range" semantics. 7287 * Same idea as execute-inline, but we get the args differently. 7288 * 7289 * We need to call an InlineOp4Func: 7290 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7291 * 7292 * The first four args are in r0-r3, pointer to return value storage 7293 * is on the stack. The function's return value is a flag that tells 7294 * us if an exception was thrown. 7295 */ 7296 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7297 FETCH(r10, 1) @ r10<- BBBB 7298 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7299 EXPORT_PC() @ can throw 7300 sub sp, sp, #8 @ make room for arg, +64 bit align 7301 mov r0, rINST, lsr #8 @ r0<- AA 7302 str r1, [sp] @ push &glue->retval 7303 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7304 add sp, sp, #8 @ pop stack 7305 cmp r0, #0 @ test boolean result of inline 7306 beq common_exceptionThrown @ returned false, handle exception 7307 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7308 GET_INST_OPCODE(ip) @ extract opcode from rINST 7309 GOTO_OPCODE(ip) @ jump to next instruction 7310 7311/* ------------------------------ */ 7312 .balign 64 7313.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7314/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7315 /* 7316 * invoke-direct-empty is a no-op in a "standard" interpreter. 7317 */ 7318 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7319 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7320 GOTO_OPCODE(ip) @ execute it 7321 7322/* ------------------------------ */ 7323 .balign 64 7324.L_OP_UNUSED_F1: /* 0xf1 */ 7325/* File: armv5te/OP_UNUSED_F1.S */ 7326/* File: armv5te/unused.S */ 7327 bl common_abort 7328 7329 7330 7331/* ------------------------------ */ 7332 .balign 64 7333.L_OP_IGET_QUICK: /* 0xf2 */ 7334/* File: armv6t2/OP_IGET_QUICK.S */ 7335 /* For: iget-quick, iget-object-quick */ 7336 /* op vA, vB, offset@CCCC */ 7337 mov r2, rINST, lsr #12 @ r2<- B 7338 FETCH(r1, 1) @ r1<- field byte offset 7339 GET_VREG(r3, r2) @ r3<- object we're operating on 7340 ubfx r2, rINST, #8, #4 @ r2<- A 7341 cmp r3, #0 @ check object for null 7342 beq common_errNullObject @ object was null 7343 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7344 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7345 GET_INST_OPCODE(ip) @ extract opcode from rINST 7346 SET_VREG(r0, r2) @ fp[A]<- r0 7347 GOTO_OPCODE(ip) @ jump to next instruction 7348 7349 7350/* ------------------------------ */ 7351 .balign 64 7352.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7353/* File: armv6t2/OP_IGET_WIDE_QUICK.S */ 7354 /* iget-wide-quick vA, vB, offset@CCCC */ 7355 mov r2, rINST, lsr #12 @ r2<- B 7356 FETCH(r1, 1) @ r1<- field byte offset 7357 GET_VREG(r3, r2) @ r3<- object we're operating on 7358 ubfx r2, rINST, #8, #4 @ r2<- A 7359 cmp r3, #0 @ check object for null 7360 beq common_errNullObject @ object was null 7361 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned) 7362 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7363 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7364 GET_INST_OPCODE(ip) @ extract opcode from rINST 7365 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7366 GOTO_OPCODE(ip) @ jump to next instruction 7367 7368 7369/* ------------------------------ */ 7370 .balign 64 7371.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7372/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7373/* File: armv5te/OP_IGET_QUICK.S */ 7374 /* For: iget-quick, iget-object-quick */ 7375 /* op vA, vB, offset@CCCC */ 7376 mov r2, rINST, lsr #12 @ r2<- B 7377 GET_VREG(r3, r2) @ r3<- object we're operating on 7378 FETCH(r1, 1) @ r1<- field byte offset 7379 cmp r3, #0 @ check object for null 7380 mov r2, rINST, lsr #8 @ r2<- A(+) 7381 beq common_errNullObject @ object was null 7382 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7383 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7384 and r2, r2, #15 7385 GET_INST_OPCODE(ip) @ extract opcode from rINST 7386 SET_VREG(r0, r2) @ fp[A]<- r0 7387 GOTO_OPCODE(ip) @ jump to next instruction 7388 7389 7390 7391/* ------------------------------ */ 7392 .balign 64 7393.L_OP_IPUT_QUICK: /* 0xf5 */ 7394/* File: armv6t2/OP_IPUT_QUICK.S */ 7395 /* For: iput-quick, iput-object-quick */ 7396 /* op vA, vB, offset@CCCC */ 7397 mov r2, rINST, lsr #12 @ r2<- B 7398 FETCH(r1, 1) @ r1<- field byte offset 7399 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7400 ubfx r2, rINST, #8, #4 @ r2<- A 7401 cmp r3, #0 @ check object for null 7402 beq common_errNullObject @ object was null 7403 GET_VREG(r0, r2) @ r0<- fp[A] 7404 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7405 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7406 GET_INST_OPCODE(ip) @ extract opcode from rINST 7407 GOTO_OPCODE(ip) @ jump to next instruction 7408 7409 7410/* ------------------------------ */ 7411 .balign 64 7412.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7413/* File: armv6t2/OP_IPUT_WIDE_QUICK.S */ 7414 /* iput-wide-quick vA, vB, offset@CCCC */ 7415 mov r1, rINST, lsr #12 @ r1<- B 7416 ubfx r0, rINST, #8, #4 @ r0<- A 7417 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7418 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7419 cmp r2, #0 @ check object for null 7420 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7421 beq common_errNullObject @ object was null 7422 FETCH(r3, 1) @ r3<- field byte offset 7423 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7424 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7425 GET_INST_OPCODE(ip) @ extract opcode from rINST 7426 GOTO_OPCODE(ip) @ jump to next instruction 7427 7428 7429/* ------------------------------ */ 7430 .balign 64 7431.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7432/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7433/* File: armv5te/OP_IPUT_QUICK.S */ 7434 /* For: iput-quick, iput-object-quick */ 7435 /* op vA, vB, offset@CCCC */ 7436 mov r2, rINST, lsr #12 @ r2<- B 7437 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7438 FETCH(r1, 1) @ r1<- field byte offset 7439 cmp r3, #0 @ check object for null 7440 mov r2, rINST, lsr #8 @ r2<- A(+) 7441 beq common_errNullObject @ object was null 7442 and r2, r2, #15 7443 GET_VREG(r0, r2) @ r0<- fp[A] 7444 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7445 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7446 GET_INST_OPCODE(ip) @ extract opcode from rINST 7447 GOTO_OPCODE(ip) @ jump to next instruction 7448 7449 7450 7451/* ------------------------------ */ 7452 .balign 64 7453.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7454/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7455 /* 7456 * Handle an optimized virtual method call. 7457 * 7458 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7459 */ 7460 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7461 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7462 FETCH(r3, 2) @ r3<- FEDC or CCCC 7463 FETCH(r1, 1) @ r1<- BBBB 7464 .if (!0) 7465 and r3, r3, #15 @ r3<- C (or stays CCCC) 7466 .endif 7467 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7468 cmp r2, #0 @ is "this" null? 7469 beq common_errNullObject @ null "this", throw exception 7470 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7471 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7472 EXPORT_PC() @ invoke must export 7473 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7474 bl common_invokeMethodNoRange @ continue on 7475 7476/* ------------------------------ */ 7477 .balign 64 7478.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7479/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7480/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7481 /* 7482 * Handle an optimized virtual method call. 7483 * 7484 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7485 */ 7486 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7487 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7488 FETCH(r3, 2) @ r3<- FEDC or CCCC 7489 FETCH(r1, 1) @ r1<- BBBB 7490 .if (!1) 7491 and r3, r3, #15 @ r3<- C (or stays CCCC) 7492 .endif 7493 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7494 cmp r2, #0 @ is "this" null? 7495 beq common_errNullObject @ null "this", throw exception 7496 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7497 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7498 EXPORT_PC() @ invoke must export 7499 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7500 bl common_invokeMethodRange @ continue on 7501 7502 7503/* ------------------------------ */ 7504 .balign 64 7505.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7506/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7507 /* 7508 * Handle an optimized "super" method call. 7509 * 7510 * for: [opt] invoke-super-quick, invoke-super-quick/range 7511 */ 7512 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7513 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7514 FETCH(r10, 2) @ r10<- GFED or CCCC 7515 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7516 .if (!0) 7517 and r10, r10, #15 @ r10<- D (or stays CCCC) 7518 .endif 7519 FETCH(r1, 1) @ r1<- BBBB 7520 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7521 EXPORT_PC() @ must export for invoke 7522 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7523 GET_VREG(r3, r10) @ r3<- "this" 7524 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7525 cmp r3, #0 @ null "this" ref? 7526 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7527 beq common_errNullObject @ "this" is null, throw exception 7528 bl common_invokeMethodNoRange @ continue on 7529 7530 7531/* ------------------------------ */ 7532 .balign 64 7533.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7534/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7535/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7536 /* 7537 * Handle an optimized "super" method call. 7538 * 7539 * for: [opt] invoke-super-quick, invoke-super-quick/range 7540 */ 7541 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7542 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7543 FETCH(r10, 2) @ r10<- GFED or CCCC 7544 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7545 .if (!1) 7546 and r10, r10, #15 @ r10<- D (or stays CCCC) 7547 .endif 7548 FETCH(r1, 1) @ r1<- BBBB 7549 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7550 EXPORT_PC() @ must export for invoke 7551 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7552 GET_VREG(r3, r10) @ r3<- "this" 7553 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7554 cmp r3, #0 @ null "this" ref? 7555 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7556 beq common_errNullObject @ "this" is null, throw exception 7557 bl common_invokeMethodRange @ continue on 7558 7559 7560 7561/* ------------------------------ */ 7562 .balign 64 7563.L_OP_UNUSED_FC: /* 0xfc */ 7564/* File: armv5te/OP_UNUSED_FC.S */ 7565/* File: armv5te/unused.S */ 7566 bl common_abort 7567 7568 7569 7570/* ------------------------------ */ 7571 .balign 64 7572.L_OP_UNUSED_FD: /* 0xfd */ 7573/* File: armv5te/OP_UNUSED_FD.S */ 7574/* File: armv5te/unused.S */ 7575 bl common_abort 7576 7577 7578 7579/* ------------------------------ */ 7580 .balign 64 7581.L_OP_UNUSED_FE: /* 0xfe */ 7582/* File: armv5te/OP_UNUSED_FE.S */ 7583/* File: armv5te/unused.S */ 7584 bl common_abort 7585 7586 7587 7588/* ------------------------------ */ 7589 .balign 64 7590.L_OP_UNUSED_FF: /* 0xff */ 7591/* File: armv5te/OP_UNUSED_FF.S */ 7592/* File: armv5te/unused.S */ 7593 bl common_abort 7594 7595 7596 7597 7598 .balign 64 7599 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7600 .global dvmAsmInstructionEnd 7601dvmAsmInstructionEnd: 7602 7603/* 7604 * =========================================================================== 7605 * Sister implementations 7606 * =========================================================================== 7607 */ 7608 .global dvmAsmSisterStart 7609 .type dvmAsmSisterStart, %function 7610 .text 7611 .balign 4 7612dvmAsmSisterStart: 7613 7614/* continuation for OP_CONST_STRING */ 7615 7616 /* 7617 * Continuation if the String has not yet been resolved. 7618 * r1: BBBB (String ref) 7619 * r9: target register 7620 */ 7621.LOP_CONST_STRING_resolve: 7622 EXPORT_PC() 7623 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7624 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7625 bl dvmResolveString @ r0<- String reference 7626 cmp r0, #0 @ failed? 7627 beq common_exceptionThrown @ yup, handle the exception 7628 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7629 GET_INST_OPCODE(ip) @ extract opcode from rINST 7630 SET_VREG(r0, r9) @ vAA<- r0 7631 GOTO_OPCODE(ip) @ jump to next instruction 7632 7633 7634/* continuation for OP_CONST_STRING_JUMBO */ 7635 7636 /* 7637 * Continuation if the String has not yet been resolved. 7638 * r1: BBBBBBBB (String ref) 7639 * r9: target register 7640 */ 7641.LOP_CONST_STRING_JUMBO_resolve: 7642 EXPORT_PC() 7643 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7644 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7645 bl dvmResolveString @ r0<- String reference 7646 cmp r0, #0 @ failed? 7647 beq common_exceptionThrown @ yup, handle the exception 7648 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7649 GET_INST_OPCODE(ip) @ extract opcode from rINST 7650 SET_VREG(r0, r9) @ vAA<- r0 7651 GOTO_OPCODE(ip) @ jump to next instruction 7652 7653 7654/* continuation for OP_CONST_CLASS */ 7655 7656 /* 7657 * Continuation if the Class has not yet been resolved. 7658 * r1: BBBB (Class ref) 7659 * r9: target register 7660 */ 7661.LOP_CONST_CLASS_resolve: 7662 EXPORT_PC() 7663 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7664 mov r2, #1 @ r2<- true 7665 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7666 bl dvmResolveClass @ r0<- Class reference 7667 cmp r0, #0 @ failed? 7668 beq common_exceptionThrown @ yup, handle the exception 7669 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7670 GET_INST_OPCODE(ip) @ extract opcode from rINST 7671 SET_VREG(r0, r9) @ vAA<- r0 7672 GOTO_OPCODE(ip) @ jump to next instruction 7673 7674 7675/* continuation for OP_CHECK_CAST */ 7676 7677 /* 7678 * Trivial test failed, need to perform full check. This is common. 7679 * r0 holds obj->clazz 7680 * r1 holds class resolved from BBBB 7681 * r9 holds object 7682 */ 7683.LOP_CHECK_CAST_fullcheck: 7684 bl dvmInstanceofNonTrivial @ r0<- boolean result 7685 cmp r0, #0 @ failed? 7686 bne .LOP_CHECK_CAST_okay @ no, success 7687 7688 @ A cast has failed. We need to throw a ClassCastException with the 7689 @ class of the object that failed to be cast. 7690 EXPORT_PC() @ about to throw 7691 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7692 ldr r0, .LstrClassCastExceptionPtr 7693 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7694 bl dvmThrowExceptionWithClassMessage 7695 b common_exceptionThrown 7696 7697 /* 7698 * Resolution required. This is the least-likely path. 7699 * 7700 * r2 holds BBBB 7701 * r9 holds object 7702 */ 7703.LOP_CHECK_CAST_resolve: 7704 EXPORT_PC() @ resolve() could throw 7705 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7706 mov r1, r2 @ r1<- BBBB 7707 mov r2, #0 @ r2<- false 7708 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7709 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7710 cmp r0, #0 @ got null? 7711 beq common_exceptionThrown @ yes, handle exception 7712 mov r1, r0 @ r1<- class resolved from BBB 7713 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7714 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7715 7716.LstrClassCastExceptionPtr: 7717 .word .LstrClassCastException 7718 7719 7720/* continuation for OP_INSTANCE_OF */ 7721 7722 /* 7723 * Trivial test failed, need to perform full check. This is common. 7724 * r0 holds obj->clazz 7725 * r1 holds class resolved from BBBB 7726 * r9 holds A 7727 */ 7728.LOP_INSTANCE_OF_fullcheck: 7729 bl dvmInstanceofNonTrivial @ r0<- boolean result 7730 @ fall through to OP_INSTANCE_OF_store 7731 7732 /* 7733 * r0 holds boolean result 7734 * r9 holds A 7735 */ 7736.LOP_INSTANCE_OF_store: 7737 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7738 SET_VREG(r0, r9) @ vA<- r0 7739 GET_INST_OPCODE(ip) @ extract opcode from rINST 7740 GOTO_OPCODE(ip) @ jump to next instruction 7741 7742 /* 7743 * Trivial test succeeded, save and bail. 7744 * r9 holds A 7745 */ 7746.LOP_INSTANCE_OF_trivial: 7747 mov r0, #1 @ indicate success 7748 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7750 SET_VREG(r0, r9) @ vA<- r0 7751 GET_INST_OPCODE(ip) @ extract opcode from rINST 7752 GOTO_OPCODE(ip) @ jump to next instruction 7753 7754 /* 7755 * Resolution required. This is the least-likely path. 7756 * 7757 * r3 holds BBBB 7758 * r9 holds A 7759 */ 7760.LOP_INSTANCE_OF_resolve: 7761 EXPORT_PC() @ resolve() could throw 7762 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7763 mov r1, r3 @ r1<- BBBB 7764 mov r2, #1 @ r2<- true 7765 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7766 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7767 cmp r0, #0 @ got null? 7768 beq common_exceptionThrown @ yes, handle exception 7769 mov r1, r0 @ r1<- class resolved from BBB 7770 mov r3, rINST, lsr #12 @ r3<- B 7771 GET_VREG(r0, r3) @ r0<- vB (object) 7772 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7773 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7774 7775 7776/* continuation for OP_NEW_INSTANCE */ 7777 7778 .balign 32 @ minimize cache lines 7779.LOP_NEW_INSTANCE_finish: @ r0=new object 7780 mov r3, rINST, lsr #8 @ r3<- AA 7781 cmp r0, #0 @ failed? 7782 beq common_exceptionThrown @ yes, handle the exception 7783 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7784 GET_INST_OPCODE(ip) @ extract opcode from rINST 7785 SET_VREG(r0, r3) @ vAA<- r0 7786 GOTO_OPCODE(ip) @ jump to next instruction 7787 7788 /* 7789 * Class initialization required. 7790 * 7791 * r0 holds class object 7792 */ 7793.LOP_NEW_INSTANCE_needinit: 7794 mov r9, r0 @ save r0 7795 bl dvmInitClass @ initialize class 7796 cmp r0, #0 @ check boolean result 7797 mov r0, r9 @ restore r0 7798 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7799 b common_exceptionThrown @ failed, deal with init exception 7800 7801 /* 7802 * Resolution required. This is the least-likely path. 7803 * 7804 * r1 holds BBBB 7805 */ 7806.LOP_NEW_INSTANCE_resolve: 7807 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7808 mov r2, #0 @ r2<- false 7809 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7810 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7811 cmp r0, #0 @ got null? 7812 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7813 b common_exceptionThrown @ yes, handle exception 7814 7815.LstrInstantiationErrorPtr: 7816 .word .LstrInstantiationError 7817 7818 7819/* continuation for OP_NEW_ARRAY */ 7820 7821 7822 /* 7823 * Resolve class. (This is an uncommon case.) 7824 * 7825 * r1 holds array length 7826 * r2 holds class ref CCCC 7827 */ 7828.LOP_NEW_ARRAY_resolve: 7829 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7830 mov r9, r1 @ r9<- length (save) 7831 mov r1, r2 @ r1<- CCCC 7832 mov r2, #0 @ r2<- false 7833 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7834 bl dvmResolveClass @ r0<- call(clazz, ref) 7835 cmp r0, #0 @ got null? 7836 mov r1, r9 @ r1<- length (restore) 7837 beq common_exceptionThrown @ yes, handle exception 7838 @ fall through to OP_NEW_ARRAY_finish 7839 7840 /* 7841 * Finish allocation. 7842 * 7843 * r0 holds class 7844 * r1 holds array length 7845 */ 7846.LOP_NEW_ARRAY_finish: 7847 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 7848 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 7849 cmp r0, #0 @ failed? 7850 mov r2, rINST, lsr #8 @ r2<- A+ 7851 beq common_exceptionThrown @ yes, handle the exception 7852 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7853 and r2, r2, #15 @ r2<- A 7854 GET_INST_OPCODE(ip) @ extract opcode from rINST 7855 SET_VREG(r0, r2) @ vA<- r0 7856 GOTO_OPCODE(ip) @ jump to next instruction 7857 7858 7859/* continuation for OP_FILLED_NEW_ARRAY */ 7860 7861 /* 7862 * On entry: 7863 * r0 holds array class 7864 * r10 holds AA or BA 7865 */ 7866.LOP_FILLED_NEW_ARRAY_continue: 7867 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7868 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7869 ldrb r3, [r3, #1] @ r3<- descriptor[1] 7870 .if 0 7871 mov r1, r10 @ r1<- AA (length) 7872 .else 7873 mov r1, r10, lsr #4 @ r1<- B (length) 7874 .endif 7875 cmp r3, #'I' @ array of ints? 7876 cmpne r3, #'L' @ array of objects? 7877 cmpne r3, #'[' @ array of arrays? 7878 mov r9, r1 @ save length in r9 7879 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 7880 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7881 cmp r0, #0 @ null return? 7882 beq common_exceptionThrown @ alloc failed, handle exception 7883 7884 FETCH(r1, 2) @ r1<- FEDC or CCCC 7885 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 7886 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 7887 subs r9, r9, #1 @ length--, check for neg 7888 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7889 bmi 2f @ was zero, bail 7890 7891 @ copy values from registers into the array 7892 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 7893 .if 0 7894 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 78951: ldr r3, [r2], #4 @ r3<- *r2++ 7896 subs r9, r9, #1 @ count-- 7897 str r3, [r0], #4 @ *contents++ = vX 7898 bpl 1b 7899 @ continue at 2 7900 .else 7901 cmp r9, #4 @ length was initially 5? 7902 and r2, r10, #15 @ r2<- A 7903 bne 1f @ <= 4 args, branch 7904 GET_VREG(r3, r2) @ r3<- vA 7905 sub r9, r9, #1 @ count-- 7906 str r3, [r0, #16] @ contents[4] = vA 79071: and r2, r1, #15 @ r2<- F/E/D/C 7908 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 7909 mov r1, r1, lsr #4 @ r1<- next reg in low 4 7910 subs r9, r9, #1 @ count-- 7911 str r3, [r0], #4 @ *contents++ = vX 7912 bpl 1b 7913 @ continue at 2 7914 .endif 7915 79162: 7917 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7918 GOTO_OPCODE(ip) @ execute it 7919 7920 /* 7921 * Throw an exception indicating that we have not implemented this 7922 * mode of filled-new-array. 7923 */ 7924.LOP_FILLED_NEW_ARRAY_notimpl: 7925 ldr r0, .L_strInternalError 7926 ldr r1, .L_strFilledNewArrayNotImpl 7927 bl dvmThrowException 7928 b common_exceptionThrown 7929 7930 .if (!0) @ define in one or the other, not both 7931.L_strFilledNewArrayNotImpl: 7932 .word .LstrFilledNewArrayNotImpl 7933.L_strInternalError: 7934 .word .LstrInternalError 7935 .endif 7936 7937 7938/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 7939 7940 /* 7941 * On entry: 7942 * r0 holds array class 7943 * r10 holds AA or BA 7944 */ 7945.LOP_FILLED_NEW_ARRAY_RANGE_continue: 7946 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7947 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7948 ldrb r3, [r3, #1] @ r3<- descriptor[1] 7949 .if 1 7950 mov r1, r10 @ r1<- AA (length) 7951 .else 7952 mov r1, r10, lsr #4 @ r1<- B (length) 7953 .endif 7954 cmp r3, #'I' @ array of ints? 7955 cmpne r3, #'L' @ array of objects? 7956 cmpne r3, #'[' @ array of arrays? 7957 mov r9, r1 @ save length in r9 7958 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 7959 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7960 cmp r0, #0 @ null return? 7961 beq common_exceptionThrown @ alloc failed, handle exception 7962 7963 FETCH(r1, 2) @ r1<- FEDC or CCCC 7964 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 7965 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 7966 subs r9, r9, #1 @ length--, check for neg 7967 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7968 bmi 2f @ was zero, bail 7969 7970 @ copy values from registers into the array 7971 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 7972 .if 1 7973 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 79741: ldr r3, [r2], #4 @ r3<- *r2++ 7975 subs r9, r9, #1 @ count-- 7976 str r3, [r0], #4 @ *contents++ = vX 7977 bpl 1b 7978 @ continue at 2 7979 .else 7980 cmp r9, #4 @ length was initially 5? 7981 and r2, r10, #15 @ r2<- A 7982 bne 1f @ <= 4 args, branch 7983 GET_VREG(r3, r2) @ r3<- vA 7984 sub r9, r9, #1 @ count-- 7985 str r3, [r0, #16] @ contents[4] = vA 79861: and r2, r1, #15 @ r2<- F/E/D/C 7987 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 7988 mov r1, r1, lsr #4 @ r1<- next reg in low 4 7989 subs r9, r9, #1 @ count-- 7990 str r3, [r0], #4 @ *contents++ = vX 7991 bpl 1b 7992 @ continue at 2 7993 .endif 7994 79952: 7996 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7997 GOTO_OPCODE(ip) @ execute it 7998 7999 /* 8000 * Throw an exception indicating that we have not implemented this 8001 * mode of filled-new-array. 8002 */ 8003.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8004 ldr r0, .L_strInternalError 8005 ldr r1, .L_strFilledNewArrayNotImpl 8006 bl dvmThrowException 8007 b common_exceptionThrown 8008 8009 .if (!1) @ define in one or the other, not both 8010.L_strFilledNewArrayNotImpl: 8011 .word .LstrFilledNewArrayNotImpl 8012.L_strInternalError: 8013 .word .LstrInternalError 8014 .endif 8015 8016 8017/* continuation for OP_CMPL_FLOAT */ 8018.LOP_CMPL_FLOAT_finish: 8019 SET_VREG(r0, r9) @ vAA<- r0 8020 GOTO_OPCODE(ip) @ jump to next instruction 8021 8022 8023/* continuation for OP_CMPG_FLOAT */ 8024.LOP_CMPG_FLOAT_finish: 8025 SET_VREG(r0, r9) @ vAA<- r0 8026 GOTO_OPCODE(ip) @ jump to next instruction 8027 8028 8029/* continuation for OP_CMPL_DOUBLE */ 8030.LOP_CMPL_DOUBLE_finish: 8031 SET_VREG(r0, r9) @ vAA<- r0 8032 GOTO_OPCODE(ip) @ jump to next instruction 8033 8034 8035/* continuation for OP_CMPG_DOUBLE */ 8036.LOP_CMPG_DOUBLE_finish: 8037 SET_VREG(r0, r9) @ vAA<- r0 8038 GOTO_OPCODE(ip) @ jump to next instruction 8039 8040 8041/* continuation for OP_CMP_LONG */ 8042 8043.LOP_CMP_LONG_less: 8044 mvn r1, #0 @ r1<- -1 8045 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8046 @ instead, we just replicate the tail end. 8047 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8048 SET_VREG(r1, r9) @ vAA<- r1 8049 GET_INST_OPCODE(ip) @ extract opcode from rINST 8050 GOTO_OPCODE(ip) @ jump to next instruction 8051 8052.LOP_CMP_LONG_greater: 8053 mov r1, #1 @ r1<- 1 8054 @ fall through to _finish 8055 8056.LOP_CMP_LONG_finish: 8057 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8058 SET_VREG(r1, r9) @ vAA<- r1 8059 GET_INST_OPCODE(ip) @ extract opcode from rINST 8060 GOTO_OPCODE(ip) @ jump to next instruction 8061 8062 8063/* continuation for OP_AGET_WIDE */ 8064 8065.LOP_AGET_WIDE_finish: 8066 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8067 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8068 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8069 GET_INST_OPCODE(ip) @ extract opcode from rINST 8070 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8071 GOTO_OPCODE(ip) @ jump to next instruction 8072 8073 8074/* continuation for OP_APUT_WIDE */ 8075 8076.LOP_APUT_WIDE_finish: 8077 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8078 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8079 GET_INST_OPCODE(ip) @ extract opcode from rINST 8080 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8081 GOTO_OPCODE(ip) @ jump to next instruction 8082 8083 8084/* continuation for OP_APUT_OBJECT */ 8085 /* 8086 * On entry: 8087 * r1 = vBB (arrayObj) 8088 * r9 = vAA (obj) 8089 * r10 = offset into array (vBB + vCC * width) 8090 */ 8091.LOP_APUT_OBJECT_finish: 8092 cmp r9, #0 @ storing null reference? 8093 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8094 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8095 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8096 bl dvmCanPutArrayElement @ test object type vs. array type 8097 cmp r0, #0 @ okay? 8098 beq common_errArrayStore @ no 8099.LOP_APUT_OBJECT_skip_check: 8100 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8101 GET_INST_OPCODE(ip) @ extract opcode from rINST 8102 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8103 GOTO_OPCODE(ip) @ jump to next instruction 8104 8105 8106/* continuation for OP_IGET */ 8107 8108 /* 8109 * Currently: 8110 * r0 holds resolved field 8111 * r9 holds object 8112 */ 8113.LOP_IGET_finish: 8114 @bl common_squeak0 8115 cmp r9, #0 @ check object for null 8116 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8117 beq common_errNullObject @ object was null 8118 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8119 ubfx r2, rINST, #8, #4 @ r2<- A 8120 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8121 GET_INST_OPCODE(ip) @ extract opcode from rINST 8122 SET_VREG(r0, r2) @ fp[A]<- r0 8123 GOTO_OPCODE(ip) @ jump to next instruction 8124 8125 8126/* continuation for OP_IGET_WIDE */ 8127 8128 /* 8129 * Currently: 8130 * r0 holds resolved field 8131 * r9 holds object 8132 */ 8133.LOP_IGET_WIDE_finish: 8134 cmp r9, #0 @ check object for null 8135 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8136 beq common_errNullObject @ object was null 8137 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8138 ubfx r2, rINST, #8, #4 @ r2<- A 8139 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8140 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8141 GET_INST_OPCODE(ip) @ extract opcode from rINST 8142 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8143 GOTO_OPCODE(ip) @ jump to next instruction 8144 8145 8146/* continuation for OP_IGET_OBJECT */ 8147 8148 /* 8149 * Currently: 8150 * r0 holds resolved field 8151 * r9 holds object 8152 */ 8153.LOP_IGET_OBJECT_finish: 8154 @bl common_squeak0 8155 cmp r9, #0 @ check object for null 8156 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8157 beq common_errNullObject @ object was null 8158 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8159 mov r2, rINST, lsr #8 @ r2<- A+ 8160 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8161 and r2, r2, #15 @ r2<- A 8162 GET_INST_OPCODE(ip) @ extract opcode from rINST 8163 SET_VREG(r0, r2) @ fp[A]<- r0 8164 GOTO_OPCODE(ip) @ jump to next instruction 8165 8166 8167/* continuation for OP_IGET_BOOLEAN */ 8168 8169 /* 8170 * Currently: 8171 * r0 holds resolved field 8172 * r9 holds object 8173 */ 8174.LOP_IGET_BOOLEAN_finish: 8175 @bl common_squeak1 8176 cmp r9, #0 @ check object for null 8177 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8178 beq common_errNullObject @ object was null 8179 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8180 mov r2, rINST, lsr #8 @ r2<- A+ 8181 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8182 and r2, r2, #15 @ r2<- A 8183 GET_INST_OPCODE(ip) @ extract opcode from rINST 8184 SET_VREG(r0, r2) @ fp[A]<- r0 8185 GOTO_OPCODE(ip) @ jump to next instruction 8186 8187 8188/* continuation for OP_IGET_BYTE */ 8189 8190 /* 8191 * Currently: 8192 * r0 holds resolved field 8193 * r9 holds object 8194 */ 8195.LOP_IGET_BYTE_finish: 8196 @bl common_squeak2 8197 cmp r9, #0 @ check object for null 8198 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8199 beq common_errNullObject @ object was null 8200 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8201 mov r2, rINST, lsr #8 @ r2<- A+ 8202 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8203 and r2, r2, #15 @ r2<- A 8204 GET_INST_OPCODE(ip) @ extract opcode from rINST 8205 SET_VREG(r0, r2) @ fp[A]<- r0 8206 GOTO_OPCODE(ip) @ jump to next instruction 8207 8208 8209/* continuation for OP_IGET_CHAR */ 8210 8211 /* 8212 * Currently: 8213 * r0 holds resolved field 8214 * r9 holds object 8215 */ 8216.LOP_IGET_CHAR_finish: 8217 @bl common_squeak3 8218 cmp r9, #0 @ check object for null 8219 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8220 beq common_errNullObject @ object was null 8221 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8222 mov r2, rINST, lsr #8 @ r2<- A+ 8223 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8224 and r2, r2, #15 @ r2<- A 8225 GET_INST_OPCODE(ip) @ extract opcode from rINST 8226 SET_VREG(r0, r2) @ fp[A]<- r0 8227 GOTO_OPCODE(ip) @ jump to next instruction 8228 8229 8230/* continuation for OP_IGET_SHORT */ 8231 8232 /* 8233 * Currently: 8234 * r0 holds resolved field 8235 * r9 holds object 8236 */ 8237.LOP_IGET_SHORT_finish: 8238 @bl common_squeak4 8239 cmp r9, #0 @ check object for null 8240 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8241 beq common_errNullObject @ object was null 8242 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8243 mov r2, rINST, lsr #8 @ r2<- A+ 8244 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8245 and r2, r2, #15 @ r2<- A 8246 GET_INST_OPCODE(ip) @ extract opcode from rINST 8247 SET_VREG(r0, r2) @ fp[A]<- r0 8248 GOTO_OPCODE(ip) @ jump to next instruction 8249 8250 8251/* continuation for OP_IPUT */ 8252 8253 /* 8254 * Currently: 8255 * r0 holds resolved field 8256 * r9 holds object 8257 */ 8258.LOP_IPUT_finish: 8259 @bl common_squeak0 8260 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8261 ubfx r1, rINST, #8, #4 @ r1<- A 8262 cmp r9, #0 @ check object for null 8263 GET_VREG(r0, r1) @ r0<- fp[A] 8264 beq common_errNullObject @ object was null 8265 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8266 GET_INST_OPCODE(ip) @ extract opcode from rINST 8267 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8268 GOTO_OPCODE(ip) @ jump to next instruction 8269 8270 8271/* continuation for OP_IPUT_WIDE */ 8272 8273 /* 8274 * Currently: 8275 * r0 holds resolved field 8276 * r9 holds object 8277 */ 8278.LOP_IPUT_WIDE_finish: 8279 ubfx r2, rINST, #8, #4 @ r2<- A 8280 cmp r9, #0 @ check object for null 8281 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8282 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8283 beq common_errNullObject @ object was null 8284 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8285 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8286 GET_INST_OPCODE(ip) @ extract opcode from rINST 8287 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0 8288 GOTO_OPCODE(ip) @ jump to next instruction 8289 8290 8291/* continuation for OP_IPUT_OBJECT */ 8292 8293 /* 8294 * Currently: 8295 * r0 holds resolved field 8296 * r9 holds object 8297 */ 8298.LOP_IPUT_OBJECT_finish: 8299 @bl common_squeak0 8300 mov r1, rINST, lsr #8 @ r1<- A+ 8301 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8302 and r1, r1, #15 @ r1<- A 8303 cmp r9, #0 @ check object for null 8304 GET_VREG(r0, r1) @ r0<- fp[A] 8305 beq common_errNullObject @ object was null 8306 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8307 GET_INST_OPCODE(ip) @ extract opcode from rINST 8308 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8309 GOTO_OPCODE(ip) @ jump to next instruction 8310 8311 8312/* continuation for OP_IPUT_BOOLEAN */ 8313 8314 /* 8315 * Currently: 8316 * r0 holds resolved field 8317 * r9 holds object 8318 */ 8319.LOP_IPUT_BOOLEAN_finish: 8320 @bl common_squeak1 8321 mov r1, rINST, lsr #8 @ r1<- A+ 8322 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8323 and r1, r1, #15 @ r1<- A 8324 cmp r9, #0 @ check object for null 8325 GET_VREG(r0, r1) @ r0<- fp[A] 8326 beq common_errNullObject @ object was null 8327 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8328 GET_INST_OPCODE(ip) @ extract opcode from rINST 8329 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8330 GOTO_OPCODE(ip) @ jump to next instruction 8331 8332 8333/* continuation for OP_IPUT_BYTE */ 8334 8335 /* 8336 * Currently: 8337 * r0 holds resolved field 8338 * r9 holds object 8339 */ 8340.LOP_IPUT_BYTE_finish: 8341 @bl common_squeak2 8342 mov r1, rINST, lsr #8 @ r1<- A+ 8343 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8344 and r1, r1, #15 @ r1<- A 8345 cmp r9, #0 @ check object for null 8346 GET_VREG(r0, r1) @ r0<- fp[A] 8347 beq common_errNullObject @ object was null 8348 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8349 GET_INST_OPCODE(ip) @ extract opcode from rINST 8350 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8351 GOTO_OPCODE(ip) @ jump to next instruction 8352 8353 8354/* continuation for OP_IPUT_CHAR */ 8355 8356 /* 8357 * Currently: 8358 * r0 holds resolved field 8359 * r9 holds object 8360 */ 8361.LOP_IPUT_CHAR_finish: 8362 @bl common_squeak3 8363 mov r1, rINST, lsr #8 @ r1<- A+ 8364 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8365 and r1, r1, #15 @ r1<- A 8366 cmp r9, #0 @ check object for null 8367 GET_VREG(r0, r1) @ r0<- fp[A] 8368 beq common_errNullObject @ object was null 8369 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8370 GET_INST_OPCODE(ip) @ extract opcode from rINST 8371 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8372 GOTO_OPCODE(ip) @ jump to next instruction 8373 8374 8375/* continuation for OP_IPUT_SHORT */ 8376 8377 /* 8378 * Currently: 8379 * r0 holds resolved field 8380 * r9 holds object 8381 */ 8382.LOP_IPUT_SHORT_finish: 8383 @bl common_squeak4 8384 mov r1, rINST, lsr #8 @ r1<- A+ 8385 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8386 and r1, r1, #15 @ r1<- A 8387 cmp r9, #0 @ check object for null 8388 GET_VREG(r0, r1) @ r0<- fp[A] 8389 beq common_errNullObject @ object was null 8390 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8391 GET_INST_OPCODE(ip) @ extract opcode from rINST 8392 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8393 GOTO_OPCODE(ip) @ jump to next instruction 8394 8395 8396/* continuation for OP_SGET */ 8397 8398 /* 8399 * Continuation if the field has not yet been resolved. 8400 * r1: BBBB field ref 8401 */ 8402.LOP_SGET_resolve: 8403 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8404 EXPORT_PC() @ resolve() could throw, so export now 8405 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8406 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8407 cmp r0, #0 @ success? 8408 bne .LOP_SGET_finish @ yes, finish 8409 b common_exceptionThrown @ no, handle exception 8410 8411 8412/* continuation for OP_SGET_WIDE */ 8413 8414 /* 8415 * Continuation if the field has not yet been resolved. 8416 * r1: BBBB field ref 8417 */ 8418.LOP_SGET_WIDE_resolve: 8419 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8420 EXPORT_PC() @ resolve() could throw, so export now 8421 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8422 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8423 cmp r0, #0 @ success? 8424 bne .LOP_SGET_WIDE_finish @ yes, finish 8425 b common_exceptionThrown @ no, handle exception 8426 8427 8428/* continuation for OP_SGET_OBJECT */ 8429 8430 /* 8431 * Continuation if the field has not yet been resolved. 8432 * r1: BBBB field ref 8433 */ 8434.LOP_SGET_OBJECT_resolve: 8435 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8436 EXPORT_PC() @ resolve() could throw, so export now 8437 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8438 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8439 cmp r0, #0 @ success? 8440 bne .LOP_SGET_OBJECT_finish @ yes, finish 8441 b common_exceptionThrown @ no, handle exception 8442 8443 8444/* continuation for OP_SGET_BOOLEAN */ 8445 8446 /* 8447 * Continuation if the field has not yet been resolved. 8448 * r1: BBBB field ref 8449 */ 8450.LOP_SGET_BOOLEAN_resolve: 8451 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8452 EXPORT_PC() @ resolve() could throw, so export now 8453 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8454 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8455 cmp r0, #0 @ success? 8456 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8457 b common_exceptionThrown @ no, handle exception 8458 8459 8460/* continuation for OP_SGET_BYTE */ 8461 8462 /* 8463 * Continuation if the field has not yet been resolved. 8464 * r1: BBBB field ref 8465 */ 8466.LOP_SGET_BYTE_resolve: 8467 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8468 EXPORT_PC() @ resolve() could throw, so export now 8469 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8470 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8471 cmp r0, #0 @ success? 8472 bne .LOP_SGET_BYTE_finish @ yes, finish 8473 b common_exceptionThrown @ no, handle exception 8474 8475 8476/* continuation for OP_SGET_CHAR */ 8477 8478 /* 8479 * Continuation if the field has not yet been resolved. 8480 * r1: BBBB field ref 8481 */ 8482.LOP_SGET_CHAR_resolve: 8483 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8484 EXPORT_PC() @ resolve() could throw, so export now 8485 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8486 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8487 cmp r0, #0 @ success? 8488 bne .LOP_SGET_CHAR_finish @ yes, finish 8489 b common_exceptionThrown @ no, handle exception 8490 8491 8492/* continuation for OP_SGET_SHORT */ 8493 8494 /* 8495 * Continuation if the field has not yet been resolved. 8496 * r1: BBBB field ref 8497 */ 8498.LOP_SGET_SHORT_resolve: 8499 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8500 EXPORT_PC() @ resolve() could throw, so export now 8501 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8502 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8503 cmp r0, #0 @ success? 8504 bne .LOP_SGET_SHORT_finish @ yes, finish 8505 b common_exceptionThrown @ no, handle exception 8506 8507 8508/* continuation for OP_SPUT */ 8509 8510 /* 8511 * Continuation if the field has not yet been resolved. 8512 * r1: BBBB field ref 8513 */ 8514.LOP_SPUT_resolve: 8515 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8516 EXPORT_PC() @ resolve() could throw, so export now 8517 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8518 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8519 cmp r0, #0 @ success? 8520 bne .LOP_SPUT_finish @ yes, finish 8521 b common_exceptionThrown @ no, handle exception 8522 8523 8524/* continuation for OP_SPUT_WIDE */ 8525 8526 /* 8527 * Continuation if the field has not yet been resolved. 8528 * r1: BBBB field ref 8529 * r9: &fp[AA] 8530 */ 8531.LOP_SPUT_WIDE_resolve: 8532 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8533 EXPORT_PC() @ resolve() could throw, so export now 8534 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8535 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8536 cmp r0, #0 @ success? 8537 bne .LOP_SPUT_WIDE_finish @ yes, finish 8538 b common_exceptionThrown @ no, handle exception 8539 8540 8541/* continuation for OP_SPUT_OBJECT */ 8542 8543 /* 8544 * Continuation if the field has not yet been resolved. 8545 * r1: BBBB field ref 8546 */ 8547.LOP_SPUT_OBJECT_resolve: 8548 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8549 EXPORT_PC() @ resolve() could throw, so export now 8550 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8551 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8552 cmp r0, #0 @ success? 8553 bne .LOP_SPUT_OBJECT_finish @ yes, finish 8554 b common_exceptionThrown @ no, handle exception 8555 8556 8557/* continuation for OP_SPUT_BOOLEAN */ 8558 8559 /* 8560 * Continuation if the field has not yet been resolved. 8561 * r1: BBBB field ref 8562 */ 8563.LOP_SPUT_BOOLEAN_resolve: 8564 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8565 EXPORT_PC() @ resolve() could throw, so export now 8566 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8567 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8568 cmp r0, #0 @ success? 8569 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8570 b common_exceptionThrown @ no, handle exception 8571 8572 8573/* continuation for OP_SPUT_BYTE */ 8574 8575 /* 8576 * Continuation if the field has not yet been resolved. 8577 * r1: BBBB field ref 8578 */ 8579.LOP_SPUT_BYTE_resolve: 8580 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8581 EXPORT_PC() @ resolve() could throw, so export now 8582 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8583 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8584 cmp r0, #0 @ success? 8585 bne .LOP_SPUT_BYTE_finish @ yes, finish 8586 b common_exceptionThrown @ no, handle exception 8587 8588 8589/* continuation for OP_SPUT_CHAR */ 8590 8591 /* 8592 * Continuation if the field has not yet been resolved. 8593 * r1: BBBB field ref 8594 */ 8595.LOP_SPUT_CHAR_resolve: 8596 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8597 EXPORT_PC() @ resolve() could throw, so export now 8598 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8599 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8600 cmp r0, #0 @ success? 8601 bne .LOP_SPUT_CHAR_finish @ yes, finish 8602 b common_exceptionThrown @ no, handle exception 8603 8604 8605/* continuation for OP_SPUT_SHORT */ 8606 8607 /* 8608 * Continuation if the field has not yet been resolved. 8609 * r1: BBBB field ref 8610 */ 8611.LOP_SPUT_SHORT_resolve: 8612 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8613 EXPORT_PC() @ resolve() could throw, so export now 8614 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8615 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8616 cmp r0, #0 @ success? 8617 bne .LOP_SPUT_SHORT_finish @ yes, finish 8618 b common_exceptionThrown @ no, handle exception 8619 8620 8621/* continuation for OP_INVOKE_VIRTUAL */ 8622 8623 /* 8624 * At this point: 8625 * r0 = resolved base method 8626 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8627 */ 8628.LOP_INVOKE_VIRTUAL_continue: 8629 GET_VREG(r1, r10) @ r1<- "this" ptr 8630 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8631 cmp r1, #0 @ is "this" null? 8632 beq common_errNullObject @ null "this", throw exception 8633 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8634 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8635 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8636 bl common_invokeMethodNoRange @ continue on 8637 8638 8639/* continuation for OP_INVOKE_SUPER */ 8640 8641 /* 8642 * At this point: 8643 * r0 = resolved base method 8644 * r9 = method->clazz 8645 */ 8646.LOP_INVOKE_SUPER_continue: 8647 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8648 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8649 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8650 EXPORT_PC() @ must export for invoke 8651 cmp r2, r3 @ compare (methodIndex, vtableCount) 8652 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8653 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8654 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8655 bl common_invokeMethodNoRange @ continue on 8656 8657.LOP_INVOKE_SUPER_resolve: 8658 mov r0, r9 @ r0<- method->clazz 8659 mov r2, #METHOD_VIRTUAL @ resolver method type 8660 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8661 cmp r0, #0 @ got null? 8662 bne .LOP_INVOKE_SUPER_continue @ no, continue 8663 b common_exceptionThrown @ yes, handle exception 8664 8665 /* 8666 * Throw a NoSuchMethodError with the method name as the message. 8667 * r0 = resolved base method 8668 */ 8669.LOP_INVOKE_SUPER_nsm: 8670 ldr r1, [r0, #offMethod_name] @ r1<- method name 8671 b common_errNoSuchMethod 8672 8673 8674/* continuation for OP_INVOKE_DIRECT */ 8675 8676 /* 8677 * On entry: 8678 * r1 = reference (BBBB or CCCC) 8679 * r10 = "this" register 8680 */ 8681.LOP_INVOKE_DIRECT_resolve: 8682 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8683 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8684 mov r2, #METHOD_DIRECT @ resolver method type 8685 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8686 cmp r0, #0 @ got null? 8687 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8688 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8689 b common_exceptionThrown @ yes, handle exception 8690 8691 8692/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8693 8694 /* 8695 * At this point: 8696 * r0 = resolved base method 8697 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8698 */ 8699.LOP_INVOKE_VIRTUAL_RANGE_continue: 8700 GET_VREG(r1, r10) @ r1<- "this" ptr 8701 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8702 cmp r1, #0 @ is "this" null? 8703 beq common_errNullObject @ null "this", throw exception 8704 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8705 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8706 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8707 bl common_invokeMethodRange @ continue on 8708 8709 8710/* continuation for OP_INVOKE_SUPER_RANGE */ 8711 8712 /* 8713 * At this point: 8714 * r0 = resolved base method 8715 * r9 = method->clazz 8716 */ 8717.LOP_INVOKE_SUPER_RANGE_continue: 8718 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8719 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8720 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8721 EXPORT_PC() @ must export for invoke 8722 cmp r2, r3 @ compare (methodIndex, vtableCount) 8723 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8724 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8725 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8726 bl common_invokeMethodRange @ continue on 8727 8728.LOP_INVOKE_SUPER_RANGE_resolve: 8729 mov r0, r9 @ r0<- method->clazz 8730 mov r2, #METHOD_VIRTUAL @ resolver method type 8731 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8732 cmp r0, #0 @ got null? 8733 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8734 b common_exceptionThrown @ yes, handle exception 8735 8736 /* 8737 * Throw a NoSuchMethodError with the method name as the message. 8738 * r0 = resolved base method 8739 */ 8740.LOP_INVOKE_SUPER_RANGE_nsm: 8741 ldr r1, [r0, #offMethod_name] @ r1<- method name 8742 b common_errNoSuchMethod 8743 8744 8745/* continuation for OP_INVOKE_DIRECT_RANGE */ 8746 8747 /* 8748 * On entry: 8749 * r1 = reference (BBBB or CCCC) 8750 * r10 = "this" register 8751 */ 8752.LOP_INVOKE_DIRECT_RANGE_resolve: 8753 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8754 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8755 mov r2, #METHOD_DIRECT @ resolver method type 8756 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8757 cmp r0, #0 @ got null? 8758 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8759 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8760 b common_exceptionThrown @ yes, handle exception 8761 8762 8763/* continuation for OP_FLOAT_TO_LONG */ 8764/* 8765 * Convert the float in r0 to a long in r0/r1. 8766 * 8767 * We have to clip values to long min/max per the specification. The 8768 * expected common case is a "reasonable" value that converts directly 8769 * to modest integer. The EABI convert function isn't doing this for us. 8770 */ 8771f2l_doconv: 8772 stmfd sp!, {r4, lr} 8773 mov r1, #0x5f000000 @ (float)maxlong 8774 mov r4, r0 8775 bl __aeabi_fcmpge @ is arg >= maxlong? 8776 cmp r0, #0 @ nonzero == yes 8777 mvnne r0, #0 @ return maxlong (7fffffff) 8778 mvnne r1, #0x80000000 8779 ldmnefd sp!, {r4, pc} 8780 8781 mov r0, r4 @ recover arg 8782 mov r1, #0xdf000000 @ (float)minlong 8783 bl __aeabi_fcmple @ is arg <= minlong? 8784 cmp r0, #0 @ nonzero == yes 8785 movne r0, #0 @ return minlong (80000000) 8786 movne r1, #0x80000000 8787 ldmnefd sp!, {r4, pc} 8788 8789 mov r0, r4 @ recover arg 8790 mov r1, r4 8791 bl __aeabi_fcmpeq @ is arg == self? 8792 cmp r0, #0 @ zero == no 8793 moveq r1, #0 @ return zero for NaN 8794 ldmeqfd sp!, {r4, pc} 8795 8796 mov r0, r4 @ recover arg 8797 bl __aeabi_f2lz @ convert float to long 8798 ldmfd sp!, {r4, pc} 8799 8800 8801/* continuation for OP_DOUBLE_TO_LONG */ 8802/* 8803 * Convert the double in r0/r1 to a long in r0/r1. 8804 * 8805 * We have to clip values to long min/max per the specification. The 8806 * expected common case is a "reasonable" value that converts directly 8807 * to modest integer. The EABI convert function isn't doing this for us. 8808 */ 8809d2l_doconv: 8810 stmfd sp!, {r4, r5, lr} @ save regs 8811 mov r3, #0x43000000 @ maxlong, as a double (high word) 8812 add r3, #0x00e00000 @ 0x43e00000 8813 mov r2, #0 @ maxlong, as a double (low word) 8814 sub sp, sp, #4 @ align for EABI 8815 mov r4, r0 @ save a copy of r0 8816 mov r5, r1 @ and r1 8817 bl __aeabi_dcmpge @ is arg >= maxlong? 8818 cmp r0, #0 @ nonzero == yes 8819 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8820 mvnne r1, #0x80000000 8821 bne 1f 8822 8823 mov r0, r4 @ recover arg 8824 mov r1, r5 8825 mov r3, #0xc3000000 @ minlong, as a double (high word) 8826 add r3, #0x00e00000 @ 0xc3e00000 8827 mov r2, #0 @ minlong, as a double (low word) 8828 bl __aeabi_dcmple @ is arg <= minlong? 8829 cmp r0, #0 @ nonzero == yes 8830 movne r0, #0 @ return minlong (8000000000000000) 8831 movne r1, #0x80000000 8832 bne 1f 8833 8834 mov r0, r4 @ recover arg 8835 mov r1, r5 8836 mov r2, r4 @ compare against self 8837 mov r3, r5 8838 bl __aeabi_dcmpeq @ is arg == self? 8839 cmp r0, #0 @ zero == no 8840 moveq r1, #0 @ return zero for NaN 8841 beq 1f 8842 8843 mov r0, r4 @ recover arg 8844 mov r1, r5 8845 bl __aeabi_d2lz @ convert double to long 8846 88471: 8848 add sp, sp, #4 8849 ldmfd sp!, {r4, r5, pc} 8850 8851 8852/* continuation for OP_MUL_LONG */ 8853 8854.LOP_MUL_LONG_finish: 8855 GET_INST_OPCODE(ip) @ extract opcode from rINST 8856 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 8857 GOTO_OPCODE(ip) @ jump to next instruction 8858 8859 8860/* continuation for OP_SHL_LONG */ 8861 8862.LOP_SHL_LONG_finish: 8863 mov r0, r0, asl r2 @ r0<- r0 << r2 8864 GET_INST_OPCODE(ip) @ extract opcode from rINST 8865 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8866 GOTO_OPCODE(ip) @ jump to next instruction 8867 8868 8869/* continuation for OP_SHR_LONG */ 8870 8871.LOP_SHR_LONG_finish: 8872 mov r1, r1, asr r2 @ r1<- r1 >> r2 8873 GET_INST_OPCODE(ip) @ extract opcode from rINST 8874 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8875 GOTO_OPCODE(ip) @ jump to next instruction 8876 8877 8878/* continuation for OP_USHR_LONG */ 8879 8880.LOP_USHR_LONG_finish: 8881 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 8882 GET_INST_OPCODE(ip) @ extract opcode from rINST 8883 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8884 GOTO_OPCODE(ip) @ jump to next instruction 8885 8886 8887/* continuation for OP_SHL_LONG_2ADDR */ 8888 8889.LOP_SHL_LONG_2ADDR_finish: 8890 GET_INST_OPCODE(ip) @ extract opcode from rINST 8891 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8892 GOTO_OPCODE(ip) @ jump to next instruction 8893 8894 8895/* continuation for OP_SHR_LONG_2ADDR */ 8896 8897.LOP_SHR_LONG_2ADDR_finish: 8898 GET_INST_OPCODE(ip) @ extract opcode from rINST 8899 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8900 GOTO_OPCODE(ip) @ jump to next instruction 8901 8902 8903/* continuation for OP_USHR_LONG_2ADDR */ 8904 8905.LOP_USHR_LONG_2ADDR_finish: 8906 GET_INST_OPCODE(ip) @ extract opcode from rINST 8907 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8908 GOTO_OPCODE(ip) @ jump to next instruction 8909 8910 8911/* continuation for OP_EXECUTE_INLINE */ 8912 8913 /* 8914 * Extract args, call function. 8915 * r0 = #of args (0-4) 8916 * r10 = call index 8917 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 8918 * 8919 * Other ideas: 8920 * - Use a jump table from the main piece to jump directly into the 8921 * AND/LDR pairs. Costs a data load, saves a branch. 8922 * - Have five separate pieces that do the loading, so we can work the 8923 * interleave a little better. Increases code size. 8924 */ 8925.LOP_EXECUTE_INLINE_continue: 8926 rsb r0, r0, #4 @ r0<- 4-r0 8927 FETCH(r9, 2) @ r9<- FEDC 8928 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 8929 bl common_abort @ (skipped due to ARM prefetch) 89304: and ip, r9, #0xf000 @ isolate F 8931 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 89323: and ip, r9, #0x0f00 @ isolate E 8933 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 89342: and ip, r9, #0x00f0 @ isolate D 8935 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 89361: and ip, r9, #0x000f @ isolate C 8937 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 89380: 8939 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 8940 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 8941 @ (not reached) 8942 8943.LOP_EXECUTE_INLINE_table: 8944 .word gDvmInlineOpsTable 8945 8946 8947/* continuation for OP_EXECUTE_INLINE_RANGE */ 8948 8949 /* 8950 * Extract args, call function. 8951 * r0 = #of args (0-4) 8952 * r10 = call index 8953 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 8954 */ 8955.LOP_EXECUTE_INLINE_RANGE_continue: 8956 rsb r0, r0, #4 @ r0<- 4-r0 8957 FETCH(r9, 2) @ r9<- CCCC 8958 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 8959 bl common_abort @ (skipped due to ARM prefetch) 89604: add ip, r9, #3 @ base+3 8961 GET_VREG(r3, ip) @ r3<- vBase[3] 89623: add ip, r9, #2 @ base+2 8963 GET_VREG(r2, ip) @ r2<- vBase[2] 89642: add ip, r9, #1 @ base+1 8965 GET_VREG(r1, ip) @ r1<- vBase[1] 89661: add ip, r9, #0 @ (nop) 8967 GET_VREG(r0, ip) @ r0<- vBase[0] 89680: 8969 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 8970 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 8971 @ (not reached) 8972 8973.LOP_EXECUTE_INLINE_RANGE_table: 8974 .word gDvmInlineOpsTable 8975 8976 8977 .size dvmAsmSisterStart, .-dvmAsmSisterStart 8978 .global dvmAsmSisterEnd 8979dvmAsmSisterEnd: 8980 8981/* File: armv5te/footer.S */ 8982 8983/* 8984 * =========================================================================== 8985 * Common subroutines and data 8986 * =========================================================================== 8987 */ 8988 8989 8990 8991 .text 8992 .align 2 8993 8994#if defined(WITH_JIT) 8995#if defined(WITH_SELF_VERIFICATION) 8996 .global dvmJitToInterpPunt 8997dvmJitToInterpPunt: 8998 mov r2,#kSVSPunt @ r2<- interpreter entry point 8999 b dvmJitSelfVerificationEnd @ doesn't return 9000 9001 .global dvmJitToInterpSingleStep 9002dvmJitToInterpSingleStep: 9003 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9004 b dvmJitSelfVerificationEnd @ doesn't return 9005 9006 .global dvmJitToTraceSelect 9007dvmJitToTraceSelect: 9008 ldr r0,[lr, #-1] @ pass our target PC 9009 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9010 b dvmJitSelfVerificationEnd @ doesn't return 9011 9012 .global dvmJitToBackwardBranch 9013dvmJitToBackwardBranch: 9014 ldr r0,[lr, #-1] @ pass our target PC 9015 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9016 b dvmJitSelfVerificationEnd @ doesn't return 9017 9018 .global dvmJitToInterpNormal 9019dvmJitToInterpNormal: 9020 ldr r0,[lr, #-1] @ pass our target PC 9021 mov r2,#kSVSNormal @ r2<- interpreter entry point 9022 b dvmJitSelfVerificationEnd @ doesn't return 9023 9024 .global dvmJitToInterpNoChain 9025dvmJitToInterpNoChain: 9026 mov r0,rPC @ pass our target PC 9027 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9028 b dvmJitSelfVerificationEnd @ doesn't return 9029#else 9030/* 9031 * Return from the translation cache to the interpreter when the compiler is 9032 * having issues translating/executing a Dalvik instruction. We have to skip 9033 * the code cache lookup otherwise it is possible to indefinitely bouce 9034 * between the interpreter and the code cache if the instruction that fails 9035 * to be compiled happens to be at a trace start. 9036 */ 9037 .global dvmJitToInterpPunt 9038dvmJitToInterpPunt: 9039 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9040 mov rPC, r0 9041#ifdef EXIT_STATS 9042 mov r0,lr 9043 bl dvmBumpPunt; 9044#endif 9045 EXPORT_PC() 9046 mov r0, #0 9047 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9048 adrl rIBASE, dvmAsmInstructionStart 9049 FETCH_INST() 9050 GET_INST_OPCODE(ip) 9051 GOTO_OPCODE(ip) 9052 9053/* 9054 * Return to the interpreter to handle a single instruction. 9055 * On entry: 9056 * r0 <= PC 9057 * r1 <= PC of resume instruction 9058 * lr <= resume point in translation 9059 */ 9060 .global dvmJitToInterpSingleStep 9061dvmJitToInterpSingleStep: 9062 str lr,[rGLUE,#offGlue_jitResume] 9063 str r1,[rGLUE,#offGlue_jitResumePC] 9064 mov r1,#kInterpEntryInstr 9065 @ enum is 4 byte in aapcs-EABI 9066 str r1, [rGLUE, #offGlue_entryPoint] 9067 mov rPC,r0 9068 EXPORT_PC() 9069 9070 adrl rIBASE, dvmAsmInstructionStart 9071 mov r2,#kJitSingleStep @ Ask for single step and then revert 9072 str r2,[rGLUE,#offGlue_jitState] 9073 mov r1,#1 @ set changeInterp to bail to debug interp 9074 b common_gotoBail 9075 9076 9077/* 9078 * Return from the translation cache and immediately request 9079 * a translation for the exit target. Commonly used following 9080 * invokes. 9081 */ 9082 .global dvmJitToTraceSelect 9083dvmJitToTraceSelect: 9084 ldr rPC,[lr, #-1] @ get our target PC 9085 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9086 add rINST,lr,#-5 @ save start of chain branch 9087 mov r0,rPC 9088 bl dvmJitGetCodeAddr @ Is there a translation? 9089 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9090 cmp r0,#0 9091 beq 2f 9092 mov r1,rINST 9093 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9094 mov r1, rPC @ arg1 of translation may need this 9095 mov lr, #0 @ in case target is HANDLER_INTERPRET 9096 cmp r0,#0 @ successful chain? 9097 bxne r0 @ continue native execution 9098 b toInterpreter @ didn't chain - resume with interpreter 9099 9100/* No translation, so request one if profiling isn't disabled*/ 91012: 9102 adrl rIBASE, dvmAsmInstructionStart 9103 GET_JIT_PROF_TABLE(r0) 9104 FETCH_INST() 9105 cmp r0, #0 9106 bne common_selectTrace 9107 GET_INST_OPCODE(ip) 9108 GOTO_OPCODE(ip) 9109 9110/* 9111 * Return from the translation cache to the interpreter. 9112 * The return was done with a BLX from thumb mode, and 9113 * the following 32-bit word contains the target rPC value. 9114 * Note that lr (r14) will have its low-order bit set to denote 9115 * its thumb-mode origin. 9116 * 9117 * We'll need to stash our lr origin away, recover the new 9118 * target and then check to see if there is a translation available 9119 * for our new target. If so, we do a translation chain and 9120 * go back to native execution. Otherwise, it's back to the 9121 * interpreter (after treating this entry as a potential 9122 * trace start). 9123 */ 9124 .global dvmJitToInterpNormal 9125dvmJitToInterpNormal: 9126 ldr rPC,[lr, #-1] @ get our target PC 9127 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9128 add rINST,lr,#-5 @ save start of chain branch 9129#ifdef EXIT_STATS 9130 bl dvmBumpNormal 9131#endif 9132 mov r0,rPC 9133 bl dvmJitGetCodeAddr @ Is there a translation? 9134 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9135 cmp r0,#0 9136 beq toInterpreter @ go if not, otherwise do chain 9137 mov r1,rINST 9138 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9139 mov r1, rPC @ arg1 of translation may need this 9140 mov lr, #0 @ in case target is HANDLER_INTERPRET 9141 cmp r0,#0 @ successful chain? 9142 bxne r0 @ continue native execution 9143 b toInterpreter @ didn't chain - resume with interpreter 9144 9145/* 9146 * Return from the translation cache to the interpreter to do method invocation. 9147 * Check if translation exists for the callee, but don't chain to it. 9148 */ 9149 .global dvmJitToInterpNoChain 9150dvmJitToInterpNoChain: 9151#ifdef EXIT_STATS 9152 bl dvmBumpNoChain 9153#endif 9154 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9155 mov r0,rPC 9156 bl dvmJitGetCodeAddr @ Is there a translation? 9157 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9158 mov r1, rPC @ arg1 of translation may need this 9159 mov lr, #0 @ in case target is HANDLER_INTERPRET 9160 cmp r0,#0 9161 bxne r0 @ continue native execution if so 9162#endif 9163 9164/* 9165 * No translation, restore interpreter regs and start interpreting. 9166 * rGLUE & rFP were preserved in the translated code, and rPC has 9167 * already been restored by the time we get here. We'll need to set 9168 * up rIBASE & rINST, and load the address of the JitTable into r0. 9169 */ 9170toInterpreter: 9171 EXPORT_PC() 9172 adrl rIBASE, dvmAsmInstructionStart 9173 FETCH_INST() 9174 GET_JIT_PROF_TABLE(r0) 9175 @ NOTE: intended fallthrough 9176/* 9177 * Common code to update potential trace start counter, and initiate 9178 * a trace-build if appropriate. On entry, rPC should point to the 9179 * next instruction to execute, and rINST should be already loaded with 9180 * the next opcode word, and r0 holds a pointer to the jit profile 9181 * table (pJitProfTable). 9182 */ 9183common_testUpdateProfile: 9184 cmp r0,#0 9185 GET_INST_OPCODE(ip) 9186 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9187 9188common_updateProfile: 9189 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9190 lsl r3,r3,#21 @ shift out excess 2047 9191 ldrb r1,[r0,r3,lsr #21] @ get counter 9192 GET_INST_OPCODE(ip) 9193 subs r1,r1,#1 @ decrement counter 9194 strb r1,[r0,r3,lsr #21] @ and store it 9195 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9196 9197/* 9198 * Here, we switch to the debug interpreter to request 9199 * trace selection. First, though, check to see if there 9200 * is already a native translation in place (and, if so, 9201 * jump to it now). 9202 */ 9203 GET_JIT_THRESHOLD(r1) 9204 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9205 strb r1,[r0,r3,lsr #21] @ reset counter 9206 EXPORT_PC() 9207 mov r0,rPC 9208 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9209 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9210 mov r1, rPC @ arg1 of translation may need this 9211 mov lr, #0 @ in case target is HANDLER_INTERPRET 9212 cmp r0,#0 9213#if !defined(WITH_SELF_VERIFICATION) 9214 bxne r0 @ jump to the translation 9215#else 9216 beq common_selectTrace 9217 /* 9218 * At this point, we have a target translation. However, if 9219 * that translation is actually the interpret-only pseudo-translation 9220 * we want to treat it the same as no translation. 9221 */ 9222 mov r10, r0 @ save target 9223 bl dvmCompilerGetInterpretTemplate 9224 cmp r0, r10 @ special case? 9225 bne dvmJitSelfVerificationStart @ set up self verification 9226 GET_INST_OPCODE(ip) 9227 GOTO_OPCODE(ip) 9228 /* no return */ 9229#endif 9230 9231common_selectTrace: 9232 mov r2,#kJitTSelectRequest @ ask for trace selection 9233 str r2,[rGLUE,#offGlue_jitState] 9234 mov r2,#kInterpEntryInstr @ normal entry reason 9235 str r2,[rGLUE,#offGlue_entryPoint] 9236 mov r1,#1 @ set changeInterp 9237 b common_gotoBail 9238 9239#if defined(WITH_SELF_VERIFICATION) 9240/* 9241 * Save PC and registers to shadow memory for self verification mode 9242 * before jumping to native translation. 9243 * On entry, r10 contains the address of the target translation. 9244 */ 9245dvmJitSelfVerificationStart: 9246 mov r0,rPC @ r0<- program counter 9247 mov r1,rFP @ r1<- frame pointer 9248 mov r2,rGLUE @ r2<- InterpState pointer 9249 mov r3,r10 @ r3<- target translation 9250 bl dvmSelfVerificationSaveState @ save registers to shadow space 9251 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9252 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9253 bx r10 @ jump to the translation 9254 9255/* 9256 * Restore PC, registers, and interpState to original values 9257 * before jumping back to the interpreter. 9258 */ 9259dvmJitSelfVerificationEnd: 9260 mov r1,rFP @ pass ending fp 9261 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9262 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9263 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9264 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9265 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9266 cmp r1,#0 @ check for punt condition 9267 beq 1f 9268 mov r2,#kJitSelfVerification @ ask for self verification 9269 str r2,[rGLUE,#offGlue_jitState] 9270 mov r2,#kInterpEntryInstr @ normal entry reason 9271 str r2,[rGLUE,#offGlue_entryPoint] 9272 mov r1,#1 @ set changeInterp 9273 b common_gotoBail 9274 92751: @ exit to interpreter without check 9276 EXPORT_PC() 9277 adrl rIBASE, dvmAsmInstructionStart 9278 FETCH_INST() 9279 GET_INST_OPCODE(ip) 9280 GOTO_OPCODE(ip) 9281#endif 9282 9283#endif 9284 9285/* 9286 * Common code when a backward branch is taken. 9287 * 9288 * On entry: 9289 * r9 is PC adjustment *in bytes* 9290 */ 9291common_backwardBranch: 9292 mov r0, #kInterpEntryInstr 9293 bl common_periodicChecks 9294#if defined(WITH_JIT) 9295 GET_JIT_PROF_TABLE(r0) 9296 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9297 cmp r0,#0 9298 bne common_updateProfile 9299 GET_INST_OPCODE(ip) 9300 GOTO_OPCODE(ip) 9301#else 9302 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9303 GET_INST_OPCODE(ip) @ extract opcode from rINST 9304 GOTO_OPCODE(ip) @ jump to next instruction 9305#endif 9306 9307 9308/* 9309 * Need to see if the thread needs to be suspended or debugger/profiler 9310 * activity has begun. 9311 * 9312 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9313 * have to do the second ldr. 9314 * 9315 * TODO: reduce this so we're just checking a single location. 9316 * 9317 * On entry: 9318 * r0 is reentry type, e.g. kInterpEntryInstr 9319 * r9 is trampoline PC adjustment *in bytes* 9320 */ 9321common_periodicChecks: 9322 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9323 9324 @ speculatively store r0 before it is clobbered by dvmCheckSuspendPending 9325 str r0, [rGLUE, #offGlue_entryPoint] 9326 9327#if defined(WITH_DEBUGGER) 9328 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9329#endif 9330#if defined(WITH_PROFILER) 9331 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9332#endif 9333 9334 ldr r3, [r3] @ r3<- suspendCount (int) 9335 9336#if defined(WITH_DEBUGGER) 9337 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9338#endif 9339#if defined (WITH_PROFILER) 9340 ldr r2, [r2] @ r2<- activeProfilers (int) 9341#endif 9342 9343 cmp r3, #0 @ suspend pending? 9344 bne 2f @ yes, do full suspension check 9345 9346#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9347# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9348 orrs r1, r1, r2 @ r1<- r1 | r2 9349 cmp r1, #0 @ debugger attached or profiler started? 9350# elif defined(WITH_DEBUGGER) 9351 cmp r1, #0 @ debugger attached? 9352# elif defined(WITH_PROFILER) 9353 cmp r2, #0 @ profiler started? 9354# endif 9355 bne 3f @ debugger/profiler, switch interp 9356#endif 9357 9358 bx lr @ nothing to do, return 9359 93602: @ check suspend 9361 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9362 EXPORT_PC() @ need for precise GC 9363 b dvmCheckSuspendPending @ suspend if necessary, then return 9364 93653: @ debugger/profiler enabled, bail out 9366 add rPC, rPC, r9 @ update rPC 9367 mov r1, #1 @ "want switch" = true 9368 b common_gotoBail 9369 9370 9371/* 9372 * The equivalent of "goto bail", this calls through the "bail handler". 9373 * 9374 * State registers will be saved to the "glue" area before bailing. 9375 * 9376 * On entry: 9377 * r1 is "bool changeInterp", indicating if we want to switch to the 9378 * other interpreter or just bail all the way out 9379 */ 9380common_gotoBail: 9381 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9382 mov r0, rGLUE @ r0<- glue ptr 9383 b dvmMterpStdBail @ call(glue, changeInterp) 9384 9385 @add r1, r1, #1 @ using (boolean+1) 9386 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9387 @bl _longjmp @ does not return 9388 @bl common_abort 9389 9390 9391/* 9392 * Common code for method invocation with range. 9393 * 9394 * On entry: 9395 * r0 is "Method* methodToCall", the method we're trying to call 9396 */ 9397common_invokeMethodRange: 9398.LinvokeNewRange: 9399 @ prepare to copy args to "outs" area of current frame 9400 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9401 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9402 beq .LinvokeArgsDone @ if no args, skip the rest 9403 FETCH(r1, 2) @ r1<- CCCC 9404 9405 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9406 @ (very few methods have > 10 args; could unroll for common cases) 9407 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9408 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9409 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 94101: ldr r1, [r3], #4 @ val = *fp++ 9411 subs r2, r2, #1 @ count-- 9412 str r1, [r10], #4 @ *outs++ = val 9413 bne 1b @ ...while count != 0 9414 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9415 b .LinvokeArgsDone 9416 9417/* 9418 * Common code for method invocation without range. 9419 * 9420 * On entry: 9421 * r0 is "Method* methodToCall", the method we're trying to call 9422 */ 9423common_invokeMethodNoRange: 9424.LinvokeNewNoRange: 9425 @ prepare to copy args to "outs" area of current frame 9426 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9427 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9428 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9429 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9430 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9431 beq .LinvokeArgsDone 9432 9433 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9434.LinvokeNonRange: 9435 rsb r2, r2, #5 @ r2<- 5-r2 9436 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9437 bl common_abort @ (skipped due to ARM prefetch) 94385: and ip, rINST, #0x0f00 @ isolate A 9439 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9440 mov r0, r0 @ nop 9441 str r2, [r10, #-4]! @ *--outs = vA 94424: and ip, r1, #0xf000 @ isolate G 9443 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9444 mov r0, r0 @ nop 9445 str r2, [r10, #-4]! @ *--outs = vG 94463: and ip, r1, #0x0f00 @ isolate F 9447 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9448 mov r0, r0 @ nop 9449 str r2, [r10, #-4]! @ *--outs = vF 94502: and ip, r1, #0x00f0 @ isolate E 9451 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9452 mov r0, r0 @ nop 9453 str r2, [r10, #-4]! @ *--outs = vE 94541: and ip, r1, #0x000f @ isolate D 9455 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9456 mov r0, r0 @ nop 9457 str r2, [r10, #-4]! @ *--outs = vD 94580: @ fall through to .LinvokeArgsDone 9459 9460.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9461 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9462 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9463 @ find space for the new stack frame, check for overflow 9464 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9465 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9466 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9467@ bl common_dumpRegs 9468 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9469 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9470 cmp r3, r9 @ bottom < interpStackEnd? 9471 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9472 blt .LstackOverflow @ yes, this frame will overflow stack 9473 9474 @ set up newSaveArea 9475#ifdef EASY_GDB 9476 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9477 str ip, [r10, #offStackSaveArea_prevSave] 9478#endif 9479 str rFP, [r10, #offStackSaveArea_prevFrame] 9480 str rPC, [r10, #offStackSaveArea_savedPc] 9481#if defined(WITH_JIT) 9482 mov r9, #0 9483 str r9, [r10, #offStackSaveArea_returnAddr] 9484#endif 9485 str r0, [r10, #offStackSaveArea_method] 9486 tst r3, #ACC_NATIVE 9487 bne .LinvokeNative 9488 9489 /* 9490 stmfd sp!, {r0-r3} 9491 bl common_printNewline 9492 mov r0, rFP 9493 mov r1, #0 9494 bl dvmDumpFp 9495 ldmfd sp!, {r0-r3} 9496 stmfd sp!, {r0-r3} 9497 mov r0, r1 9498 mov r1, r10 9499 bl dvmDumpFp 9500 bl common_printNewline 9501 ldmfd sp!, {r0-r3} 9502 */ 9503 9504 ldrh r9, [r2] @ r9 <- load INST from new PC 9505 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9506 mov rPC, r2 @ publish new rPC 9507 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9508 9509 @ Update "glue" values for the new method 9510 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9511 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9512 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9513#if defined(WITH_JIT) 9514 GET_JIT_PROF_TABLE(r0) 9515 mov rFP, r1 @ fp = newFp 9516 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9517 mov rINST, r9 @ publish new rINST 9518 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9519 cmp r0,#0 9520 bne common_updateProfile 9521 GOTO_OPCODE(ip) @ jump to next instruction 9522#else 9523 mov rFP, r1 @ fp = newFp 9524 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9525 mov rINST, r9 @ publish new rINST 9526 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9527 GOTO_OPCODE(ip) @ jump to next instruction 9528#endif 9529 9530.LinvokeNative: 9531 @ Prep for the native call 9532 @ r0=methodToCall, r1=newFp, r10=newSaveArea 9533 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9534 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 9535 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 9536 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 9537 mov r9, r3 @ r9<- glue->self (preserve) 9538 9539 mov r2, r0 @ r2<- methodToCall 9540 mov r0, r1 @ r0<- newFp (points to args) 9541 add r1, rGLUE, #offGlue_retval @ r1<- &retval 9542 9543#ifdef ASSIST_DEBUGGER 9544 /* insert fake function header to help gdb find the stack frame */ 9545 b .Lskip 9546 .type dalvik_mterp, %function 9547dalvik_mterp: 9548 .fnstart 9549 MTERP_ENTRY1 9550 MTERP_ENTRY2 9551.Lskip: 9552#endif 9553 9554 @mov lr, pc @ set return addr 9555 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 9556 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 9557 9558 @ native return; r9=self, r10=newSaveArea 9559 @ equivalent to dvmPopJniLocals 9560 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 9561 ldr r1, [r9, #offThread_exception] @ check for exception 9562 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 9563 cmp r1, #0 @ null? 9564 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 9565 bne common_exceptionThrown @ no, handle exception 9566 9567 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 9568 GET_INST_OPCODE(ip) @ extract opcode from rINST 9569 GOTO_OPCODE(ip) @ jump to next instruction 9570 9571.LstackOverflow: @ r0=methodToCall 9572 mov r1, r0 @ r1<- methodToCall 9573 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 9574 bl dvmHandleStackOverflow 9575 b common_exceptionThrown 9576#ifdef ASSIST_DEBUGGER 9577 .fnend 9578#endif 9579 9580 9581 /* 9582 * Common code for method invocation, calling through "glue code". 9583 * 9584 * TODO: now that we have range and non-range invoke handlers, this 9585 * needs to be split into two. Maybe just create entry points 9586 * that set r9 and jump here? 9587 * 9588 * On entry: 9589 * r0 is "Method* methodToCall", the method we're trying to call 9590 * r9 is "bool methodCallRange", indicating if this is a /range variant 9591 */ 9592 .if 0 9593.LinvokeOld: 9594 sub sp, sp, #8 @ space for args + pad 9595 FETCH(ip, 2) @ ip<- FEDC or CCCC 9596 mov r2, r0 @ A2<- methodToCall 9597 mov r0, rGLUE @ A0<- glue 9598 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9599 mov r1, r9 @ A1<- methodCallRange 9600 mov r3, rINST, lsr #8 @ A3<- AA 9601 str ip, [sp, #0] @ A4<- ip 9602 bl dvmMterp_invokeMethod @ call the C invokeMethod 9603 add sp, sp, #8 @ remove arg area 9604 b common_resumeAfterGlueCall @ continue to next instruction 9605 .endif 9606 9607 9608 9609/* 9610 * Common code for handling a return instruction. 9611 * 9612 * This does not return. 9613 */ 9614common_returnFromMethod: 9615.LreturnNew: 9616 mov r0, #kInterpEntryReturn 9617 mov r9, #0 9618 bl common_periodicChecks 9619 9620 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 9621 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 9622 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 9623 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 9624 @ r2<- method we're returning to 9625 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9626 cmp r2, #0 @ is this a break frame? 9627 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 9628 mov r1, #0 @ "want switch" = false 9629 beq common_gotoBail @ break frame, bail out completely 9630 9631 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 9632 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 9633 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 9634 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 9635#if defined(WITH_JIT) 9636 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 9637 GET_JIT_PROF_TABLE(r0) 9638 mov rPC, r9 @ publish new rPC 9639 str r1, [rGLUE, #offGlue_methodClassDex] 9640 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 9641 cmp r10, #0 @ caller is compiled code 9642 blxne r10 9643 GET_INST_OPCODE(ip) @ extract opcode from rINST 9644 cmp r0,#0 9645 bne common_updateProfile 9646 GOTO_OPCODE(ip) @ jump to next instruction 9647#else 9648 GET_INST_OPCODE(ip) @ extract opcode from rINST 9649 mov rPC, r9 @ publish new rPC 9650 str r1, [rGLUE, #offGlue_methodClassDex] 9651 GOTO_OPCODE(ip) @ jump to next instruction 9652#endif 9653 9654 /* 9655 * Return handling, calls through "glue code". 9656 */ 9657 .if 0 9658.LreturnOld: 9659 SAVE_PC_FP_TO_GLUE() @ export state 9660 mov r0, rGLUE @ arg to function 9661 bl dvmMterp_returnFromMethod 9662 b common_resumeAfterGlueCall 9663 .endif 9664 9665 9666/* 9667 * Somebody has thrown an exception. Handle it. 9668 * 9669 * If the exception processing code returns to us (instead of falling 9670 * out of the interpreter), continue with whatever the next instruction 9671 * now happens to be. 9672 * 9673 * This does not return. 9674 */ 9675 .global dvmMterpCommonExceptionThrown 9676dvmMterpCommonExceptionThrown: 9677common_exceptionThrown: 9678.LexceptionNew: 9679 mov r0, #kInterpEntryThrow 9680 mov r9, #0 9681 bl common_periodicChecks 9682 9683#if defined(WITH_JIT) 9684 mov r2,#kJitTSelectAbort @ abandon trace selection in progress 9685 str r2,[rGLUE,#offGlue_jitState] 9686#endif 9687 9688 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 9689 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 9690 mov r1, r10 @ r1<- self 9691 mov r0, r9 @ r0<- exception 9692 bl dvmAddTrackedAlloc @ don't let the exception be GCed 9693 mov r3, #0 @ r3<- NULL 9694 str r3, [r10, #offThread_exception] @ self->exception = NULL 9695 9696 /* set up args and a local for "&fp" */ 9697 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 9698 str rFP, [sp, #-4]! @ *--sp = fp 9699 mov ip, sp @ ip<- &fp 9700 mov r3, #0 @ r3<- false 9701 str ip, [sp, #-4]! @ *--sp = &fp 9702 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 9703 mov r0, r10 @ r0<- self 9704 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 9705 mov r2, r9 @ r2<- exception 9706 sub r1, rPC, r1 @ r1<- pc - method->insns 9707 mov r1, r1, asr #1 @ r1<- offset in code units 9708 9709 /* call, r0 gets catchRelPc (a code-unit offset) */ 9710 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 9711 9712 /* fix earlier stack overflow if necessary; may trash rFP */ 9713 ldrb r1, [r10, #offThread_stackOverflowed] 9714 cmp r1, #0 @ did we overflow earlier? 9715 beq 1f @ no, skip ahead 9716 mov rFP, r0 @ save relPc result in rFP 9717 mov r0, r10 @ r0<- self 9718 bl dvmCleanupStackOverflow @ call(self) 9719 mov r0, rFP @ restore result 97201: 9721 9722 /* update frame pointer and check result from dvmFindCatchBlock */ 9723 ldr rFP, [sp, #4] @ retrieve the updated rFP 9724 cmp r0, #0 @ is catchRelPc < 0? 9725 add sp, sp, #8 @ restore stack 9726 bmi .LnotCaughtLocally 9727 9728 /* adjust locals to match self->curFrame and updated PC */ 9729 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 9730 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 9731 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 9732 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 9733 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 9734 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 9735 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 9736 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 9737 9738 /* release the tracked alloc on the exception */ 9739 mov r0, r9 @ r0<- exception 9740 mov r1, r10 @ r1<- self 9741 bl dvmReleaseTrackedAlloc @ release the exception 9742 9743 /* restore the exception if the handler wants it */ 9744 FETCH_INST() @ load rINST from rPC 9745 GET_INST_OPCODE(ip) @ extract opcode from rINST 9746 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 9747 streq r9, [r10, #offThread_exception] @ yes, restore the exception 9748 GOTO_OPCODE(ip) @ jump to next instruction 9749 9750.LnotCaughtLocally: @ r9=exception, r10=self 9751 /* fix stack overflow if necessary */ 9752 ldrb r1, [r10, #offThread_stackOverflowed] 9753 cmp r1, #0 @ did we overflow earlier? 9754 movne r0, r10 @ if yes: r0<- self 9755 blne dvmCleanupStackOverflow @ if yes: call(self) 9756 9757 @ may want to show "not caught locally" debug messages here 9758#if DVM_SHOW_EXCEPTION >= 2 9759 /* call __android_log_print(prio, tag, format, ...) */ 9760 /* "Exception %s from %s:%d not caught locally" */ 9761 @ dvmLineNumFromPC(method, pc - method->insns) 9762 ldr r0, [rGLUE, #offGlue_method] 9763 ldr r1, [r0, #offMethod_insns] 9764 sub r1, rPC, r1 9765 asr r1, r1, #1 9766 bl dvmLineNumFromPC 9767 str r0, [sp, #-4]! 9768 @ dvmGetMethodSourceFile(method) 9769 ldr r0, [rGLUE, #offGlue_method] 9770 bl dvmGetMethodSourceFile 9771 str r0, [sp, #-4]! 9772 @ exception->clazz->descriptor 9773 ldr r3, [r9, #offObject_clazz] 9774 ldr r3, [r3, #offClassObject_descriptor] 9775 @ 9776 ldr r2, strExceptionNotCaughtLocally 9777 ldr r1, strLogTag 9778 mov r0, #3 @ LOG_DEBUG 9779 bl __android_log_print 9780#endif 9781 str r9, [r10, #offThread_exception] @ restore exception 9782 mov r0, r9 @ r0<- exception 9783 mov r1, r10 @ r1<- self 9784 bl dvmReleaseTrackedAlloc @ release the exception 9785 mov r1, #0 @ "want switch" = false 9786 b common_gotoBail @ bail out 9787 9788 9789 /* 9790 * Exception handling, calls through "glue code". 9791 */ 9792 .if 0 9793.LexceptionOld: 9794 SAVE_PC_FP_TO_GLUE() @ export state 9795 mov r0, rGLUE @ arg to function 9796 bl dvmMterp_exceptionThrown 9797 b common_resumeAfterGlueCall 9798 .endif 9799 9800 9801/* 9802 * After returning from a "glued" function, pull out the updated 9803 * values and start executing at the next instruction. 9804 */ 9805common_resumeAfterGlueCall: 9806 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 9807 FETCH_INST() @ load rINST from rPC 9808 GET_INST_OPCODE(ip) @ extract opcode from rINST 9809 GOTO_OPCODE(ip) @ jump to next instruction 9810 9811/* 9812 * Invalid array index. 9813 */ 9814common_errArrayIndex: 9815 EXPORT_PC() 9816 ldr r0, strArrayIndexException 9817 mov r1, #0 9818 bl dvmThrowException 9819 b common_exceptionThrown 9820 9821/* 9822 * Invalid array value. 9823 */ 9824common_errArrayStore: 9825 EXPORT_PC() 9826 ldr r0, strArrayStoreException 9827 mov r1, #0 9828 bl dvmThrowException 9829 b common_exceptionThrown 9830 9831/* 9832 * Integer divide or mod by zero. 9833 */ 9834common_errDivideByZero: 9835 EXPORT_PC() 9836 ldr r0, strArithmeticException 9837 ldr r1, strDivideByZero 9838 bl dvmThrowException 9839 b common_exceptionThrown 9840 9841/* 9842 * Attempt to allocate an array with a negative size. 9843 */ 9844common_errNegativeArraySize: 9845 EXPORT_PC() 9846 ldr r0, strNegativeArraySizeException 9847 mov r1, #0 9848 bl dvmThrowException 9849 b common_exceptionThrown 9850 9851/* 9852 * Invocation of a non-existent method. 9853 */ 9854common_errNoSuchMethod: 9855 EXPORT_PC() 9856 ldr r0, strNoSuchMethodError 9857 mov r1, #0 9858 bl dvmThrowException 9859 b common_exceptionThrown 9860 9861/* 9862 * We encountered a null object when we weren't expecting one. We 9863 * export the PC, throw a NullPointerException, and goto the exception 9864 * processing code. 9865 */ 9866common_errNullObject: 9867 EXPORT_PC() 9868 ldr r0, strNullPointerException 9869 mov r1, #0 9870 bl dvmThrowException 9871 b common_exceptionThrown 9872 9873/* 9874 * For debugging, cause an immediate fault. The source address will 9875 * be in lr (use a bl instruction to jump here). 9876 */ 9877common_abort: 9878 ldr pc, .LdeadFood 9879.LdeadFood: 9880 .word 0xdeadf00d 9881 9882/* 9883 * Spit out a "we were here", preserving all registers. (The attempt 9884 * to save ip won't work, but we need to save an even number of 9885 * registers for EABI 64-bit stack alignment.) 9886 */ 9887 .macro SQUEAK num 9888common_squeak\num: 9889 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9890 ldr r0, strSqueak 9891 mov r1, #\num 9892 bl printf 9893 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9894 bx lr 9895 .endm 9896 9897 SQUEAK 0 9898 SQUEAK 1 9899 SQUEAK 2 9900 SQUEAK 3 9901 SQUEAK 4 9902 SQUEAK 5 9903 9904/* 9905 * Spit out the number in r0, preserving registers. 9906 */ 9907common_printNum: 9908 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9909 mov r1, r0 9910 ldr r0, strSqueak 9911 bl printf 9912 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9913 bx lr 9914 9915/* 9916 * Print a newline, preserving registers. 9917 */ 9918common_printNewline: 9919 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9920 ldr r0, strNewline 9921 bl printf 9922 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9923 bx lr 9924 9925 /* 9926 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 9927 */ 9928common_printHex: 9929 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9930 mov r1, r0 9931 ldr r0, strPrintHex 9932 bl printf 9933 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9934 bx lr 9935 9936/* 9937 * Print the 64-bit quantity in r0-r1, preserving registers. 9938 */ 9939common_printLong: 9940 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9941 mov r3, r1 9942 mov r2, r0 9943 ldr r0, strPrintLong 9944 bl printf 9945 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9946 bx lr 9947 9948/* 9949 * Print full method info. Pass the Method* in r0. Preserves regs. 9950 */ 9951common_printMethod: 9952 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9953 bl dvmMterpPrintMethod 9954 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9955 bx lr 9956 9957/* 9958 * Call a C helper function that dumps regs and possibly some 9959 * additional info. Requires the C function to be compiled in. 9960 */ 9961 .if 0 9962common_dumpRegs: 9963 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9964 bl dvmMterpDumpArmRegs 9965 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9966 bx lr 9967 .endif 9968 9969#if 0 9970/* 9971 * Experiment on VFP mode. 9972 * 9973 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 9974 * 9975 * Updates the bits specified by "mask", setting them to the values in "val". 9976 */ 9977setFPSCR: 9978 and r0, r0, r1 @ make sure no stray bits are set 9979 fmrx r2, fpscr @ get VFP reg 9980 mvn r1, r1 @ bit-invert mask 9981 and r2, r2, r1 @ clear masked bits 9982 orr r2, r2, r0 @ set specified bits 9983 fmxr fpscr, r2 @ set VFP reg 9984 mov r0, r2 @ return new value 9985 bx lr 9986 9987 .align 2 9988 .global dvmConfigureFP 9989 .type dvmConfigureFP, %function 9990dvmConfigureFP: 9991 stmfd sp!, {ip, lr} 9992 /* 0x03000000 sets DN/FZ */ 9993 /* 0x00009f00 clears the six exception enable flags */ 9994 bl common_squeak0 9995 mov r0, #0x03000000 @ r0<- 0x03000000 9996 add r1, r0, #0x9f00 @ r1<- 0x03009f00 9997 bl setFPSCR 9998 ldmfd sp!, {ip, pc} 9999#endif 10000 10001 10002/* 10003 * String references, must be close to the code that uses them. 10004 */ 10005 .align 2 10006strArithmeticException: 10007 .word .LstrArithmeticException 10008strArrayIndexException: 10009 .word .LstrArrayIndexException 10010strArrayStoreException: 10011 .word .LstrArrayStoreException 10012strDivideByZero: 10013 .word .LstrDivideByZero 10014strNegativeArraySizeException: 10015 .word .LstrNegativeArraySizeException 10016strNoSuchMethodError: 10017 .word .LstrNoSuchMethodError 10018strNullPointerException: 10019 .word .LstrNullPointerException 10020 10021strLogTag: 10022 .word .LstrLogTag 10023strExceptionNotCaughtLocally: 10024 .word .LstrExceptionNotCaughtLocally 10025 10026strNewline: 10027 .word .LstrNewline 10028strSqueak: 10029 .word .LstrSqueak 10030strPrintHex: 10031 .word .LstrPrintHex 10032strPrintLong: 10033 .word .LstrPrintLong 10034 10035/* 10036 * Zero-terminated ASCII string data. 10037 * 10038 * On ARM we have two choices: do like gcc does, and LDR from a .word 10039 * with the address, or use an ADR pseudo-op to get the address 10040 * directly. ADR saves 4 bytes and an indirection, but it's using a 10041 * PC-relative addressing mode and hence has a limited range, which 10042 * makes it not work well with mergeable string sections. 10043 */ 10044 .section .rodata.str1.4,"aMS",%progbits,1 10045 10046.LstrBadEntryPoint: 10047 .asciz "Bad entry point %d\n" 10048.LstrArithmeticException: 10049 .asciz "Ljava/lang/ArithmeticException;" 10050.LstrArrayIndexException: 10051 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10052.LstrArrayStoreException: 10053 .asciz "Ljava/lang/ArrayStoreException;" 10054.LstrClassCastException: 10055 .asciz "Ljava/lang/ClassCastException;" 10056.LstrDivideByZero: 10057 .asciz "divide by zero" 10058.LstrFilledNewArrayNotImpl: 10059 .asciz "filled-new-array only implemented for objects and 'int'" 10060.LstrInternalError: 10061 .asciz "Ljava/lang/InternalError;" 10062.LstrInstantiationError: 10063 .asciz "Ljava/lang/InstantiationError;" 10064.LstrNegativeArraySizeException: 10065 .asciz "Ljava/lang/NegativeArraySizeException;" 10066.LstrNoSuchMethodError: 10067 .asciz "Ljava/lang/NoSuchMethodError;" 10068.LstrNullPointerException: 10069 .asciz "Ljava/lang/NullPointerException;" 10070 10071.LstrLogTag: 10072 .asciz "mterp" 10073.LstrExceptionNotCaughtLocally: 10074 .asciz "Exception %s from %s:%d not caught locally\n" 10075 10076.LstrNewline: 10077 .asciz "\n" 10078.LstrSqueak: 10079 .asciz "<%d>" 10080.LstrPrintHex: 10081 .asciz "<0x%x>" 10082.LstrPrintLong: 10083 .asciz "<%lld>" 10084 10085 10086