InterpAsm-armv7-a.S revision b1d8044ee3a7503b94eb54459f3077d7200cd675
1/* 2 * This file was generated automatically by gen-mterp.py for 'armv7-a'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7/* File: armv5te/header.S */ 8/* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23/* 24 * ARMv5 definitions and declarations. 25 */ 26 27/* 28ARM EABI general notes: 29 30r0-r3 hold first 4 args to a method; they are not preserved across method calls 31r4-r8 are available for general use 32r9 is given special treatment in some situations, but not for us 33r10 (sl) seems to be generally available 34r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 35r12 (ip) is scratch -- not preserved across method calls 36r13 (sp) should be managed carefully in case a signal arrives 37r14 (lr) must be preserved 38r15 (pc) can be tinkered with directly 39 40r0 holds returns of <= 4 bytes 41r0-r1 hold returns of 8 bytes, low word in r0 42 43Callee must save/restore r4+ (except r12) if it modifies them. If VFP 44is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 45s0-s15 (d0-d7, q0-a3) do not need to be. 46 47Stack is "full descending". Only the arguments that don't fit in the first 4 48registers are placed on the stack. "sp" points at the first stacked argument 49(i.e. the 5th arg). 50 51VFP: single-precision results in s0, double-precision results in d0. 52 53In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 5464-bit quantities (long long, double) must be 64-bit aligned. 55*/ 56 57/* 58Mterp and ARM notes: 59 60The following registers have fixed assignments: 61 62 reg nick purpose 63 r4 rPC interpreted program counter, used for fetching instructions 64 r5 rFP interpreted frame pointer, used for accessing locals and args 65 r6 rGLUE MterpGlue pointer 66 r7 rINST first 16-bit code unit of current instruction 67 r8 rIBASE interpreted instruction base pointer, used for computed goto 68 69Macros are provided for common operations. Each macro MUST emit only 70one instruction to make instruction-counting easier. They MUST NOT alter 71unspecified registers or condition codes. 72*/ 73 74/* single-purpose registers, given names for clarity */ 75#define rPC r4 76#define rFP r5 77#define rGLUE r6 78#define rINST r7 79#define rIBASE r8 80 81/* save/restore the PC and/or FP from the glue struct */ 82#define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 83#define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 84#define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 85#define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 86#define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 87#define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 88 89/* 90 * "export" the PC to the stack frame, f/b/o future exception objects. Must 91 * be done *before* something calls dvmThrowException. 92 * 93 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 94 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 95 * 96 * It's okay to do this more than once. 97 */ 98#define EXPORT_PC() \ 99 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 100 101/* 102 * Given a frame pointer, find the stack save area. 103 * 104 * In C this is "((StackSaveArea*)(_fp) -1)". 105 */ 106#define SAVEAREA_FROM_FP(_reg, _fpreg) \ 107 sub _reg, _fpreg, #sizeofStackSaveArea 108 109/* 110 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 111 */ 112#define FETCH_INST() ldrh rINST, [rPC] 113 114/* 115 * Fetch the next instruction from the specified offset. Advances rPC 116 * to point to the next instruction. "_count" is in 16-bit code units. 117 * 118 * Because of the limited size of immediate constants on ARM, this is only 119 * suitable for small forward movements (i.e. don't try to implement "goto" 120 * with this). 121 * 122 * This must come AFTER anything that can throw an exception, or the 123 * exception catch may miss. (This also implies that it must come after 124 * EXPORT_PC().) 125 */ 126#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 127 128/* 129 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 130 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 131 */ 132#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 133 ldrh _dreg, [_sreg, #(_count*2)]! 134 135/* 136 * Fetch the next instruction from an offset specified by _reg. Updates 137 * rPC to point to the next instruction. "_reg" must specify the distance 138 * in bytes, *not* 16-bit code units, and may be a signed value. 139 * 140 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 141 * bits that hold the shift distance are used for the half/byte/sign flags. 142 * In some cases we can pre-double _reg for free, so we require a byte offset 143 * here. 144 */ 145#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 146 147/* 148 * Fetch a half-word code unit from an offset past the current PC. The 149 * "_count" value is in 16-bit code units. Does not advance rPC. 150 * 151 * The "_S" variant works the same but treats the value as signed. 152 */ 153#define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 154#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 155 156/* 157 * Fetch one byte from an offset past the current PC. Pass in the same 158 * "_count" as you would for FETCH, and an additional 0/1 indicating which 159 * byte of the halfword you want (lo/hi). 160 */ 161#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 162 163/* 164 * Put the instruction's opcode field into the specified register. 165 */ 166#define GET_INST_OPCODE(_reg) and _reg, rINST, #255 167 168/* 169 * Put the prefetched instruction's opcode field into the specified register. 170 */ 171#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 172 173/* 174 * Begin executing the opcode in _reg. Because this only jumps within the 175 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 176 */ 177#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 178#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 179#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 180 181/* 182 * Get/set the 32-bit value from a Dalvik register. 183 */ 184#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 185#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 186 187#if defined(WITH_JIT) 188#define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 189#define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 190#endif 191 192/* 193 * Convert a virtual register index into an address. 194 */ 195#define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 196 add _reg, rFP, _vreg, lsl #2 197 198/* 199 * This is a #include, not a %include, because we want the C pre-processor 200 * to expand the macros into assembler assignment statements. 201 */ 202#include "../common/asm-constants.h" 203 204 205/* File: armv5te/platform.S */ 206/* 207 * =========================================================================== 208 * CPU-version-specific defines 209 * =========================================================================== 210 */ 211 212/* 213 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 214 * one-way branch. 215 * 216 * May modify IP. Does not modify LR. 217 */ 218.macro LDR_PC source 219 ldr pc, \source 220.endm 221 222/* 223 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 224 * Jump to subroutine. 225 * 226 * May modify IP and LR. 227 */ 228.macro LDR_PC_LR source 229 mov lr, pc 230 ldr pc, \source 231.endm 232 233/* 234 * Macro for "LDMFD SP!, {...regs...,PC}". 235 * 236 * May modify IP and LR. 237 */ 238.macro LDMFD_PC regs 239 ldmfd sp!, {\regs,pc} 240.endm 241 242 243/* File: armv5te/entry.S */ 244/* 245 * Copyright (C) 2008 The Android Open Source Project 246 * 247 * Licensed under the Apache License, Version 2.0 (the "License"); 248 * you may not use this file except in compliance with the License. 249 * You may obtain a copy of the License at 250 * 251 * http://www.apache.org/licenses/LICENSE-2.0 252 * 253 * Unless required by applicable law or agreed to in writing, software 254 * distributed under the License is distributed on an "AS IS" BASIS, 255 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 256 * See the License for the specific language governing permissions and 257 * limitations under the License. 258 */ 259/* 260 * Interpreter entry point. 261 */ 262 263/* 264 * We don't have formal stack frames, so gdb scans upward in the code 265 * to find the start of the function (a label with the %function type), 266 * and then looks at the next few instructions to figure out what 267 * got pushed onto the stack. From this it figures out how to restore 268 * the registers, including PC, for the previous stack frame. If gdb 269 * sees a non-function label, it stops scanning, so either we need to 270 * have nothing but assembler-local labels between the entry point and 271 * the break, or we need to fake it out. 272 * 273 * When this is defined, we add some stuff to make gdb less confused. 274 */ 275#define ASSIST_DEBUGGER 1 276 277 .text 278 .align 2 279 .global dvmMterpStdRun 280 .type dvmMterpStdRun, %function 281 282/* 283 * On entry: 284 * r0 MterpGlue* glue 285 * 286 * This function returns a boolean "changeInterp" value. The return comes 287 * via a call to dvmMterpStdBail(). 288 */ 289dvmMterpStdRun: 290#define MTERP_ENTRY1 \ 291 .save {r4-r10,fp,lr}; \ 292 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 293#define MTERP_ENTRY2 \ 294 .pad #4; \ 295 sub sp, sp, #4 @ align 64 296 297 .fnstart 298 MTERP_ENTRY1 299 MTERP_ENTRY2 300 301 /* save stack pointer, add magic word for debuggerd */ 302 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 303 304 /* set up "named" registers, figure out entry point */ 305 mov rGLUE, r0 @ set rGLUE 306 ldrb r1, [r0, #offGlue_entryPoint] @ InterpEntry enum is char 307 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 308 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 309 cmp r1, #kInterpEntryInstr @ usual case? 310 bne .Lnot_instr @ no, handle it 311 312#if defined(WITH_JIT) 313.Lno_singleStep: 314 /* Entry is always a possible trace start */ 315 GET_JIT_PROF_TABLE(r0) 316 FETCH_INST() 317 cmp r0,#0 318 bne common_updateProfile 319 GET_INST_OPCODE(ip) 320 GOTO_OPCODE(ip) 321#else 322 /* start executing the instruction at rPC */ 323 FETCH_INST() @ load rINST from rPC 324 GET_INST_OPCODE(ip) @ extract opcode from rINST 325 GOTO_OPCODE(ip) @ jump to next instruction 326#endif 327 328.Lnot_instr: 329 cmp r1, #kInterpEntryReturn @ were we returning from a method? 330 beq common_returnFromMethod 331 332.Lnot_return: 333 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 334 beq common_exceptionThrown 335 336#if defined(WITH_JIT) 337.Lnot_throw: 338 ldr r0,[rGLUE, #offGlue_jitResume] 339 ldr r2,[rGLUE, #offGlue_jitResumePC] 340 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 341 bne .Lbad_arg 342 cmp rPC,r2 343 bne .Lno_singleStep @ must have branched, don't resume 344 mov r1, #kInterpEntryInstr 345 strb r1, [rGLUE, #offGlue_entryPoint] 346 ldr rINST, .LdvmCompilerTemplate 347 bx r0 @ re-enter the translation 348.LdvmCompilerTemplate: 349 .word dvmCompilerTemplateStart 350#endif 351 352.Lbad_arg: 353 ldr r0, strBadEntryPoint 354 @ r1 holds value of entryPoint 355 bl printf 356 bl dvmAbort 357 .fnend 358 359 360 .global dvmMterpStdBail 361 .type dvmMterpStdBail, %function 362 363/* 364 * Restore the stack pointer and PC from the save point established on entry. 365 * This is essentially the same as a longjmp, but should be cheaper. The 366 * last instruction causes us to return to whoever called dvmMterpStdRun. 367 * 368 * We pushed some registers on the stack in dvmMterpStdRun, then saved 369 * SP and LR. Here we restore SP, restore the registers, and then restore 370 * LR to PC. 371 * 372 * On entry: 373 * r0 MterpGlue* glue 374 * r1 bool changeInterp 375 */ 376dvmMterpStdBail: 377 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 378 mov r0, r1 @ return the changeInterp value 379 add sp, sp, #4 @ un-align 64 380 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 381 382 383/* 384 * String references. 385 */ 386strBadEntryPoint: 387 .word .LstrBadEntryPoint 388 389 390 391 .global dvmAsmInstructionStart 392 .type dvmAsmInstructionStart, %function 393dvmAsmInstructionStart = .L_OP_NOP 394 .text 395 396/* ------------------------------ */ 397 .balign 64 398.L_OP_NOP: /* 0x00 */ 399/* File: armv5te/OP_NOP.S */ 400 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 401 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 402 GOTO_OPCODE(ip) @ execute it 403 404#ifdef ASSIST_DEBUGGER 405 /* insert fake function header to help gdb find the stack frame */ 406 .type dalvik_inst, %function 407dalvik_inst: 408 .fnstart 409 MTERP_ENTRY1 410 MTERP_ENTRY2 411 .fnend 412#endif 413 414 415/* ------------------------------ */ 416 .balign 64 417.L_OP_MOVE: /* 0x01 */ 418/* File: armv6t2/OP_MOVE.S */ 419 /* for move, move-object, long-to-int */ 420 /* op vA, vB */ 421 mov r1, rINST, lsr #12 @ r1<- B from 15:12 422 ubfx r0, rINST, #8, #4 @ r0<- A from 11:8 423 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 424 GET_VREG(r2, r1) @ r2<- fp[B] 425 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 426 SET_VREG(r2, r0) @ fp[A]<- r2 427 GOTO_OPCODE(ip) @ execute next instruction 428 429 430/* ------------------------------ */ 431 .balign 64 432.L_OP_MOVE_FROM16: /* 0x02 */ 433/* File: armv5te/OP_MOVE_FROM16.S */ 434 /* for: move/from16, move-object/from16 */ 435 /* op vAA, vBBBB */ 436 FETCH(r1, 1) @ r1<- BBBB 437 mov r0, rINST, lsr #8 @ r0<- AA 438 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 439 GET_VREG(r2, r1) @ r2<- fp[BBBB] 440 GET_INST_OPCODE(ip) @ extract opcode from rINST 441 SET_VREG(r2, r0) @ fp[AA]<- r2 442 GOTO_OPCODE(ip) @ jump to next instruction 443 444 445/* ------------------------------ */ 446 .balign 64 447.L_OP_MOVE_16: /* 0x03 */ 448/* File: armv5te/OP_MOVE_16.S */ 449 /* for: move/16, move-object/16 */ 450 /* op vAAAA, vBBBB */ 451 FETCH(r1, 2) @ r1<- BBBB 452 FETCH(r0, 1) @ r0<- AAAA 453 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 454 GET_VREG(r2, r1) @ r2<- fp[BBBB] 455 GET_INST_OPCODE(ip) @ extract opcode from rINST 456 SET_VREG(r2, r0) @ fp[AAAA]<- r2 457 GOTO_OPCODE(ip) @ jump to next instruction 458 459 460/* ------------------------------ */ 461 .balign 64 462.L_OP_MOVE_WIDE: /* 0x04 */ 463/* File: armv6t2/OP_MOVE_WIDE.S */ 464 /* move-wide vA, vB */ 465 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 466 mov r3, rINST, lsr #12 @ r3<- B 467 ubfx r2, rINST, #8, #4 @ r2<- A 468 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 469 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 470 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 471 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 472 GET_INST_OPCODE(ip) @ extract opcode from rINST 473 stmia r2, {r0-r1} @ fp[A]<- r0/r1 474 GOTO_OPCODE(ip) @ jump to next instruction 475 476 477/* ------------------------------ */ 478 .balign 64 479.L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 480/* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 481 /* move-wide/from16 vAA, vBBBB */ 482 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 483 FETCH(r3, 1) @ r3<- BBBB 484 mov r2, rINST, lsr #8 @ r2<- AA 485 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 486 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 487 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 488 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 489 GET_INST_OPCODE(ip) @ extract opcode from rINST 490 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 491 GOTO_OPCODE(ip) @ jump to next instruction 492 493 494/* ------------------------------ */ 495 .balign 64 496.L_OP_MOVE_WIDE_16: /* 0x06 */ 497/* File: armv5te/OP_MOVE_WIDE_16.S */ 498 /* move-wide/16 vAAAA, vBBBB */ 499 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 500 FETCH(r3, 2) @ r3<- BBBB 501 FETCH(r2, 1) @ r2<- AAAA 502 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 503 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 504 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 505 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 506 GET_INST_OPCODE(ip) @ extract opcode from rINST 507 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 508 GOTO_OPCODE(ip) @ jump to next instruction 509 510 511/* ------------------------------ */ 512 .balign 64 513.L_OP_MOVE_OBJECT: /* 0x07 */ 514/* File: armv5te/OP_MOVE_OBJECT.S */ 515/* File: armv5te/OP_MOVE.S */ 516 /* for move, move-object, long-to-int */ 517 /* op vA, vB */ 518 mov r1, rINST, lsr #12 @ r1<- B from 15:12 519 mov r0, rINST, lsr #8 @ r0<- A from 11:8 520 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 521 GET_VREG(r2, r1) @ r2<- fp[B] 522 and r0, r0, #15 523 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 524 SET_VREG(r2, r0) @ fp[A]<- r2 525 GOTO_OPCODE(ip) @ execute next instruction 526 527 528 529/* ------------------------------ */ 530 .balign 64 531.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 532/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 533/* File: armv5te/OP_MOVE_FROM16.S */ 534 /* for: move/from16, move-object/from16 */ 535 /* op vAA, vBBBB */ 536 FETCH(r1, 1) @ r1<- BBBB 537 mov r0, rINST, lsr #8 @ r0<- AA 538 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 539 GET_VREG(r2, r1) @ r2<- fp[BBBB] 540 GET_INST_OPCODE(ip) @ extract opcode from rINST 541 SET_VREG(r2, r0) @ fp[AA]<- r2 542 GOTO_OPCODE(ip) @ jump to next instruction 543 544 545 546/* ------------------------------ */ 547 .balign 64 548.L_OP_MOVE_OBJECT_16: /* 0x09 */ 549/* File: armv5te/OP_MOVE_OBJECT_16.S */ 550/* File: armv5te/OP_MOVE_16.S */ 551 /* for: move/16, move-object/16 */ 552 /* op vAAAA, vBBBB */ 553 FETCH(r1, 2) @ r1<- BBBB 554 FETCH(r0, 1) @ r0<- AAAA 555 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 556 GET_VREG(r2, r1) @ r2<- fp[BBBB] 557 GET_INST_OPCODE(ip) @ extract opcode from rINST 558 SET_VREG(r2, r0) @ fp[AAAA]<- r2 559 GOTO_OPCODE(ip) @ jump to next instruction 560 561 562 563/* ------------------------------ */ 564 .balign 64 565.L_OP_MOVE_RESULT: /* 0x0a */ 566/* File: armv5te/OP_MOVE_RESULT.S */ 567 /* for: move-result, move-result-object */ 568 /* op vAA */ 569 mov r2, rINST, lsr #8 @ r2<- AA 570 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 571 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 572 GET_INST_OPCODE(ip) @ extract opcode from rINST 573 SET_VREG(r0, r2) @ fp[AA]<- r0 574 GOTO_OPCODE(ip) @ jump to next instruction 575 576 577/* ------------------------------ */ 578 .balign 64 579.L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 580/* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 581 /* move-result-wide vAA */ 582 mov r2, rINST, lsr #8 @ r2<- AA 583 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 584 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 585 ldmia r3, {r0-r1} @ r0/r1<- retval.j 586 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 587 GET_INST_OPCODE(ip) @ extract opcode from rINST 588 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 589 GOTO_OPCODE(ip) @ jump to next instruction 590 591 592/* ------------------------------ */ 593 .balign 64 594.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 595/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 596/* File: armv5te/OP_MOVE_RESULT.S */ 597 /* for: move-result, move-result-object */ 598 /* op vAA */ 599 mov r2, rINST, lsr #8 @ r2<- AA 600 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 601 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 602 GET_INST_OPCODE(ip) @ extract opcode from rINST 603 SET_VREG(r0, r2) @ fp[AA]<- r0 604 GOTO_OPCODE(ip) @ jump to next instruction 605 606 607 608/* ------------------------------ */ 609 .balign 64 610.L_OP_MOVE_EXCEPTION: /* 0x0d */ 611/* File: armv5te/OP_MOVE_EXCEPTION.S */ 612 /* move-exception vAA */ 613 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 614 mov r2, rINST, lsr #8 @ r2<- AA 615 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 616 mov r1, #0 @ r1<- 0 617 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 618 SET_VREG(r3, r2) @ fp[AA]<- exception obj 619 GET_INST_OPCODE(ip) @ extract opcode from rINST 620 str r1, [r0, #offThread_exception] @ dvmClearException bypass 621 GOTO_OPCODE(ip) @ jump to next instruction 622 623 624/* ------------------------------ */ 625 .balign 64 626.L_OP_RETURN_VOID: /* 0x0e */ 627/* File: armv5te/OP_RETURN_VOID.S */ 628 b common_returnFromMethod 629 630 631/* ------------------------------ */ 632 .balign 64 633.L_OP_RETURN: /* 0x0f */ 634/* File: armv5te/OP_RETURN.S */ 635 /* 636 * Return a 32-bit value. Copies the return value into the "glue" 637 * structure, then jumps to the return handler. 638 * 639 * for: return, return-object 640 */ 641 /* op vAA */ 642 mov r2, rINST, lsr #8 @ r2<- AA 643 GET_VREG(r0, r2) @ r0<- vAA 644 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 645 b common_returnFromMethod 646 647 648/* ------------------------------ */ 649 .balign 64 650.L_OP_RETURN_WIDE: /* 0x10 */ 651/* File: armv5te/OP_RETURN_WIDE.S */ 652 /* 653 * Return a 64-bit value. Copies the return value into the "glue" 654 * structure, then jumps to the return handler. 655 */ 656 /* return-wide vAA */ 657 mov r2, rINST, lsr #8 @ r2<- AA 658 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 659 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 660 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 661 stmia r3, {r0-r1} @ retval<- r0/r1 662 b common_returnFromMethod 663 664 665/* ------------------------------ */ 666 .balign 64 667.L_OP_RETURN_OBJECT: /* 0x11 */ 668/* File: armv5te/OP_RETURN_OBJECT.S */ 669/* File: armv5te/OP_RETURN.S */ 670 /* 671 * Return a 32-bit value. Copies the return value into the "glue" 672 * structure, then jumps to the return handler. 673 * 674 * for: return, return-object 675 */ 676 /* op vAA */ 677 mov r2, rINST, lsr #8 @ r2<- AA 678 GET_VREG(r0, r2) @ r0<- vAA 679 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 680 b common_returnFromMethod 681 682 683 684/* ------------------------------ */ 685 .balign 64 686.L_OP_CONST_4: /* 0x12 */ 687/* File: armv6t2/OP_CONST_4.S */ 688 /* const/4 vA, #+B */ 689 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 690 ubfx r0, rINST, #8, #4 @ r0<- A 691 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 692 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 693 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 694 SET_VREG(r1, r0) @ fp[A]<- r1 695 GOTO_OPCODE(ip) @ execute next instruction 696 697 698/* ------------------------------ */ 699 .balign 64 700.L_OP_CONST_16: /* 0x13 */ 701/* File: armv5te/OP_CONST_16.S */ 702 /* const/16 vAA, #+BBBB */ 703 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 704 mov r3, rINST, lsr #8 @ r3<- AA 705 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 706 SET_VREG(r0, r3) @ vAA<- r0 707 GET_INST_OPCODE(ip) @ extract opcode from rINST 708 GOTO_OPCODE(ip) @ jump to next instruction 709 710 711/* ------------------------------ */ 712 .balign 64 713.L_OP_CONST: /* 0x14 */ 714/* File: armv5te/OP_CONST.S */ 715 /* const vAA, #+BBBBbbbb */ 716 mov r3, rINST, lsr #8 @ r3<- AA 717 FETCH(r0, 1) @ r0<- bbbb (low) 718 FETCH(r1, 2) @ r1<- BBBB (high) 719 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 720 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 721 GET_INST_OPCODE(ip) @ extract opcode from rINST 722 SET_VREG(r0, r3) @ vAA<- r0 723 GOTO_OPCODE(ip) @ jump to next instruction 724 725 726/* ------------------------------ */ 727 .balign 64 728.L_OP_CONST_HIGH16: /* 0x15 */ 729/* File: armv5te/OP_CONST_HIGH16.S */ 730 /* const/high16 vAA, #+BBBB0000 */ 731 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 732 mov r3, rINST, lsr #8 @ r3<- AA 733 mov r0, r0, lsl #16 @ r0<- BBBB0000 734 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 735 SET_VREG(r0, r3) @ vAA<- r0 736 GET_INST_OPCODE(ip) @ extract opcode from rINST 737 GOTO_OPCODE(ip) @ jump to next instruction 738 739 740/* ------------------------------ */ 741 .balign 64 742.L_OP_CONST_WIDE_16: /* 0x16 */ 743/* File: armv5te/OP_CONST_WIDE_16.S */ 744 /* const-wide/16 vAA, #+BBBB */ 745 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 746 mov r3, rINST, lsr #8 @ r3<- AA 747 mov r1, r0, asr #31 @ r1<- ssssssss 748 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 749 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 750 GET_INST_OPCODE(ip) @ extract opcode from rINST 751 stmia r3, {r0-r1} @ vAA<- r0/r1 752 GOTO_OPCODE(ip) @ jump to next instruction 753 754 755/* ------------------------------ */ 756 .balign 64 757.L_OP_CONST_WIDE_32: /* 0x17 */ 758/* File: armv5te/OP_CONST_WIDE_32.S */ 759 /* const-wide/32 vAA, #+BBBBbbbb */ 760 FETCH(r0, 1) @ r0<- 0000bbbb (low) 761 mov r3, rINST, lsr #8 @ r3<- AA 762 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 763 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 764 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 765 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 766 mov r1, r0, asr #31 @ r1<- ssssssss 767 GET_INST_OPCODE(ip) @ extract opcode from rINST 768 stmia r3, {r0-r1} @ vAA<- r0/r1 769 GOTO_OPCODE(ip) @ jump to next instruction 770 771 772/* ------------------------------ */ 773 .balign 64 774.L_OP_CONST_WIDE: /* 0x18 */ 775/* File: armv5te/OP_CONST_WIDE.S */ 776 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 777 FETCH(r0, 1) @ r0<- bbbb (low) 778 FETCH(r1, 2) @ r1<- BBBB (low middle) 779 FETCH(r2, 3) @ r2<- hhhh (high middle) 780 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 781 FETCH(r3, 4) @ r3<- HHHH (high) 782 mov r9, rINST, lsr #8 @ r9<- AA 783 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 784 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 785 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 786 GET_INST_OPCODE(ip) @ extract opcode from rINST 787 stmia r9, {r0-r1} @ vAA<- r0/r1 788 GOTO_OPCODE(ip) @ jump to next instruction 789 790 791/* ------------------------------ */ 792 .balign 64 793.L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 794/* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 795 /* const-wide/high16 vAA, #+BBBB000000000000 */ 796 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 797 mov r3, rINST, lsr #8 @ r3<- AA 798 mov r0, #0 @ r0<- 00000000 799 mov r1, r1, lsl #16 @ r1<- BBBB0000 800 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 801 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 802 GET_INST_OPCODE(ip) @ extract opcode from rINST 803 stmia r3, {r0-r1} @ vAA<- r0/r1 804 GOTO_OPCODE(ip) @ jump to next instruction 805 806 807/* ------------------------------ */ 808 .balign 64 809.L_OP_CONST_STRING: /* 0x1a */ 810/* File: armv5te/OP_CONST_STRING.S */ 811 /* const/string vAA, String@BBBB */ 812 FETCH(r1, 1) @ r1<- BBBB 813 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 814 mov r9, rINST, lsr #8 @ r9<- AA 815 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 816 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 817 cmp r0, #0 @ not yet resolved? 818 beq .LOP_CONST_STRING_resolve 819 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 820 GET_INST_OPCODE(ip) @ extract opcode from rINST 821 SET_VREG(r0, r9) @ vAA<- r0 822 GOTO_OPCODE(ip) @ jump to next instruction 823 824/* ------------------------------ */ 825 .balign 64 826.L_OP_CONST_STRING_JUMBO: /* 0x1b */ 827/* File: armv5te/OP_CONST_STRING_JUMBO.S */ 828 /* const/string vAA, String@BBBBBBBB */ 829 FETCH(r0, 1) @ r0<- bbbb (low) 830 FETCH(r1, 2) @ r1<- BBBB (high) 831 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 832 mov r9, rINST, lsr #8 @ r9<- AA 833 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 834 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 835 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 836 cmp r0, #0 837 beq .LOP_CONST_STRING_JUMBO_resolve 838 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 839 GET_INST_OPCODE(ip) @ extract opcode from rINST 840 SET_VREG(r0, r9) @ vAA<- r0 841 GOTO_OPCODE(ip) @ jump to next instruction 842 843/* ------------------------------ */ 844 .balign 64 845.L_OP_CONST_CLASS: /* 0x1c */ 846/* File: armv5te/OP_CONST_CLASS.S */ 847 /* const/class vAA, Class@BBBB */ 848 FETCH(r1, 1) @ r1<- BBBB 849 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 850 mov r9, rINST, lsr #8 @ r9<- AA 851 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 852 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 853 cmp r0, #0 @ not yet resolved? 854 beq .LOP_CONST_CLASS_resolve 855 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 856 GET_INST_OPCODE(ip) @ extract opcode from rINST 857 SET_VREG(r0, r9) @ vAA<- r0 858 GOTO_OPCODE(ip) @ jump to next instruction 859 860/* ------------------------------ */ 861 .balign 64 862.L_OP_MONITOR_ENTER: /* 0x1d */ 863/* File: armv5te/OP_MONITOR_ENTER.S */ 864 /* 865 * Synchronize on an object. 866 */ 867 /* monitor-enter vAA */ 868 mov r2, rINST, lsr #8 @ r2<- AA 869 GET_VREG(r1, r2) @ r1<- vAA (object) 870 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 871 cmp r1, #0 @ null object? 872 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 873 beq common_errNullObject @ null object, throw an exception 874 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 875 bl dvmLockObject @ call(self, obj) 876#ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 877 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 878 ldr r1, [r0, #offThread_exception] @ check for exception 879 cmp r1, #0 880 bne common_exceptionThrown @ exception raised, bail out 881#endif 882 GET_INST_OPCODE(ip) @ extract opcode from rINST 883 GOTO_OPCODE(ip) @ jump to next instruction 884 885 886/* ------------------------------ */ 887 .balign 64 888.L_OP_MONITOR_EXIT: /* 0x1e */ 889/* File: armv5te/OP_MONITOR_EXIT.S */ 890 /* 891 * Unlock an object. 892 * 893 * Exceptions that occur when unlocking a monitor need to appear as 894 * if they happened at the following instruction. See the Dalvik 895 * instruction spec. 896 */ 897 /* monitor-exit vAA */ 898 mov r2, rINST, lsr #8 @ r2<- AA 899 EXPORT_PC() @ before fetch: export the PC 900 GET_VREG(r1, r2) @ r1<- vAA (object) 901 cmp r1, #0 @ null object? 902 beq common_errNullObject @ yes 903 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 904 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 905 cmp r0, #0 @ failed? 906 beq common_exceptionThrown @ yes, exception is pending 907 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 908 GET_INST_OPCODE(ip) @ extract opcode from rINST 909 GOTO_OPCODE(ip) @ jump to next instruction 910 911 912/* ------------------------------ */ 913 .balign 64 914.L_OP_CHECK_CAST: /* 0x1f */ 915/* File: armv5te/OP_CHECK_CAST.S */ 916 /* 917 * Check to see if a cast from one class to another is allowed. 918 */ 919 /* check-cast vAA, class@BBBB */ 920 mov r3, rINST, lsr #8 @ r3<- AA 921 FETCH(r2, 1) @ r2<- BBBB 922 GET_VREG(r9, r3) @ r9<- object 923 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 924 cmp r9, #0 @ is object null? 925 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 926 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 927 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 928 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 929 cmp r1, #0 @ have we resolved this before? 930 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 931.LOP_CHECK_CAST_resolved: 932 cmp r0, r1 @ same class (trivial success)? 933 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 934.LOP_CHECK_CAST_okay: 935 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 936 GET_INST_OPCODE(ip) @ extract opcode from rINST 937 GOTO_OPCODE(ip) @ jump to next instruction 938 939/* ------------------------------ */ 940 .balign 64 941.L_OP_INSTANCE_OF: /* 0x20 */ 942/* File: armv5te/OP_INSTANCE_OF.S */ 943 /* 944 * Check to see if an object reference is an instance of a class. 945 * 946 * Most common situation is a non-null object, being compared against 947 * an already-resolved class. 948 */ 949 /* instance-of vA, vB, class@CCCC */ 950 mov r3, rINST, lsr #12 @ r3<- B 951 mov r9, rINST, lsr #8 @ r9<- A+ 952 GET_VREG(r0, r3) @ r0<- vB (object) 953 and r9, r9, #15 @ r9<- A 954 cmp r0, #0 @ is object null? 955 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 956 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 957 FETCH(r3, 1) @ r3<- CCCC 958 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 959 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 960 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 961 cmp r1, #0 @ have we resolved this before? 962 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 963.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 964 cmp r0, r1 @ same class (trivial success)? 965 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 966 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 967 968/* ------------------------------ */ 969 .balign 64 970.L_OP_ARRAY_LENGTH: /* 0x21 */ 971/* File: armv6t2/OP_ARRAY_LENGTH.S */ 972 /* 973 * Return the length of an array. 974 */ 975 mov r1, rINST, lsr #12 @ r1<- B 976 ubfx r2, rINST, #8, #4 @ r2<- A 977 GET_VREG(r0, r1) @ r0<- vB (object ref) 978 cmp r0, #0 @ is object null? 979 beq common_errNullObject @ yup, fail 980 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 981 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 982 GET_INST_OPCODE(ip) @ extract opcode from rINST 983 SET_VREG(r3, r2) @ vB<- length 984 GOTO_OPCODE(ip) @ jump to next instruction 985 986 987/* ------------------------------ */ 988 .balign 64 989.L_OP_NEW_INSTANCE: /* 0x22 */ 990/* File: armv5te/OP_NEW_INSTANCE.S */ 991 /* 992 * Create a new instance of a class. 993 */ 994 /* new-instance vAA, class@BBBB */ 995 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 996 FETCH(r1, 1) @ r1<- BBBB 997 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 998 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 999 EXPORT_PC() @ req'd for init, resolve, alloc 1000 cmp r0, #0 @ already resolved? 1001 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1002.LOP_NEW_INSTANCE_resolved: @ r0=class 1003 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1004 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1005 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1006.LOP_NEW_INSTANCE_initialized: @ r0=class 1007 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1008 bl dvmAllocObject @ r0<- new object 1009 b .LOP_NEW_INSTANCE_finish @ continue 1010 1011/* ------------------------------ */ 1012 .balign 64 1013.L_OP_NEW_ARRAY: /* 0x23 */ 1014/* File: armv5te/OP_NEW_ARRAY.S */ 1015 /* 1016 * Allocate an array of objects, specified with the array class 1017 * and a count. 1018 * 1019 * The verifier guarantees that this is an array class, so we don't 1020 * check for it here. 1021 */ 1022 /* new-array vA, vB, class@CCCC */ 1023 mov r0, rINST, lsr #12 @ r0<- B 1024 FETCH(r2, 1) @ r2<- CCCC 1025 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1026 GET_VREG(r1, r0) @ r1<- vB (array length) 1027 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1028 cmp r1, #0 @ check length 1029 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1030 bmi common_errNegativeArraySize @ negative length, bail 1031 cmp r0, #0 @ already resolved? 1032 EXPORT_PC() @ req'd for resolve, alloc 1033 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1034 b .LOP_NEW_ARRAY_resolve @ do resolve now 1035 1036/* ------------------------------ */ 1037 .balign 64 1038.L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1039/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1040 /* 1041 * Create a new array with elements filled from registers. 1042 * 1043 * for: filled-new-array, filled-new-array/range 1044 */ 1045 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1046 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1047 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1048 FETCH(r1, 1) @ r1<- BBBB 1049 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1050 EXPORT_PC() @ need for resolve and alloc 1051 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1052 mov r10, rINST, lsr #8 @ r10<- AA or BA 1053 cmp r0, #0 @ already resolved? 1054 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 10558: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1056 mov r2, #0 @ r2<- false 1057 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1058 bl dvmResolveClass @ r0<- call(clazz, ref) 1059 cmp r0, #0 @ got null? 1060 beq common_exceptionThrown @ yes, handle exception 1061 b .LOP_FILLED_NEW_ARRAY_continue 1062 1063/* ------------------------------ */ 1064 .balign 64 1065.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1066/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1067/* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1068 /* 1069 * Create a new array with elements filled from registers. 1070 * 1071 * for: filled-new-array, filled-new-array/range 1072 */ 1073 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1074 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1075 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1076 FETCH(r1, 1) @ r1<- BBBB 1077 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1078 EXPORT_PC() @ need for resolve and alloc 1079 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1080 mov r10, rINST, lsr #8 @ r10<- AA or BA 1081 cmp r0, #0 @ already resolved? 1082 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 10838: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1084 mov r2, #0 @ r2<- false 1085 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1086 bl dvmResolveClass @ r0<- call(clazz, ref) 1087 cmp r0, #0 @ got null? 1088 beq common_exceptionThrown @ yes, handle exception 1089 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1090 1091 1092/* ------------------------------ */ 1093 .balign 64 1094.L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1095/* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1096 /* fill-array-data vAA, +BBBBBBBB */ 1097 FETCH(r0, 1) @ r0<- bbbb (lo) 1098 FETCH(r1, 2) @ r1<- BBBB (hi) 1099 mov r3, rINST, lsr #8 @ r3<- AA 1100 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1101 GET_VREG(r0, r3) @ r0<- vAA (array object) 1102 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1103 EXPORT_PC(); 1104 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1105 cmp r0, #0 @ 0 means an exception is thrown 1106 beq common_exceptionThrown @ has exception 1107 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1108 GET_INST_OPCODE(ip) @ extract opcode from rINST 1109 GOTO_OPCODE(ip) @ jump to next instruction 1110 1111/* ------------------------------ */ 1112 .balign 64 1113.L_OP_THROW: /* 0x27 */ 1114/* File: armv5te/OP_THROW.S */ 1115 /* 1116 * Throw an exception object in the current thread. 1117 */ 1118 /* throw vAA */ 1119 mov r2, rINST, lsr #8 @ r2<- AA 1120 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1121 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1122 cmp r1, #0 @ null object? 1123 beq common_errNullObject @ yes, throw an NPE instead 1124 @ bypass dvmSetException, just store it 1125 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1126 b common_exceptionThrown 1127 1128 1129/* ------------------------------ */ 1130 .balign 64 1131.L_OP_GOTO: /* 0x28 */ 1132/* File: armv5te/OP_GOTO.S */ 1133 /* 1134 * Unconditional branch, 8-bit offset. 1135 * 1136 * The branch distance is a signed code-unit offset, which we need to 1137 * double to get a byte offset. 1138 */ 1139 /* goto +AA */ 1140 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1141 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1142 mov r9, r9, lsl #1 @ r9<- byte offset 1143 bmi common_backwardBranch @ backward branch, do periodic checks 1144#if defined(WITH_JIT) 1145 GET_JIT_PROF_TABLE(r0) 1146 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1147 cmp r0,#0 1148 bne common_updateProfile 1149 GET_INST_OPCODE(ip) @ extract opcode from rINST 1150 GOTO_OPCODE(ip) @ jump to next instruction 1151#else 1152 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1153 GET_INST_OPCODE(ip) @ extract opcode from rINST 1154 GOTO_OPCODE(ip) @ jump to next instruction 1155#endif 1156 1157/* ------------------------------ */ 1158 .balign 64 1159.L_OP_GOTO_16: /* 0x29 */ 1160/* File: armv5te/OP_GOTO_16.S */ 1161 /* 1162 * Unconditional branch, 16-bit offset. 1163 * 1164 * The branch distance is a signed code-unit offset, which we need to 1165 * double to get a byte offset. 1166 */ 1167 /* goto/16 +AAAA */ 1168 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1169 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1170 bmi common_backwardBranch @ backward branch, do periodic checks 1171#if defined(WITH_JIT) 1172 GET_JIT_PROF_TABLE(r0) 1173 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1174 cmp r0,#0 1175 bne common_updateProfile 1176 GET_INST_OPCODE(ip) @ extract opcode from rINST 1177 GOTO_OPCODE(ip) @ jump to next instruction 1178#else 1179 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1180 GET_INST_OPCODE(ip) @ extract opcode from rINST 1181 GOTO_OPCODE(ip) @ jump to next instruction 1182#endif 1183 1184 1185/* ------------------------------ */ 1186 .balign 64 1187.L_OP_GOTO_32: /* 0x2a */ 1188/* File: armv5te/OP_GOTO_32.S */ 1189 /* 1190 * Unconditional branch, 32-bit offset. 1191 * 1192 * The branch distance is a signed code-unit offset, which we need to 1193 * double to get a byte offset. 1194 * 1195 * Unlike most opcodes, this one is allowed to branch to itself, so 1196 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1197 * instruction doesn't affect the V flag, so we need to clear it 1198 * explicitly. 1199 */ 1200 /* goto/32 +AAAAAAAA */ 1201 FETCH(r0, 1) @ r0<- aaaa (lo) 1202 FETCH(r1, 2) @ r1<- AAAA (hi) 1203 cmp ip, ip @ (clear V flag during stall) 1204 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1205 mov r9, r0, asl #1 @ r9<- byte offset 1206 ble common_backwardBranch @ backward branch, do periodic checks 1207#if defined(WITH_JIT) 1208 GET_JIT_PROF_TABLE(r0) 1209 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1210 cmp r0,#0 1211 bne common_updateProfile 1212 GET_INST_OPCODE(ip) @ extract opcode from rINST 1213 GOTO_OPCODE(ip) @ jump to next instruction 1214#else 1215 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1216 GET_INST_OPCODE(ip) @ extract opcode from rINST 1217 GOTO_OPCODE(ip) @ jump to next instruction 1218#endif 1219 1220/* ------------------------------ */ 1221 .balign 64 1222.L_OP_PACKED_SWITCH: /* 0x2b */ 1223/* File: armv5te/OP_PACKED_SWITCH.S */ 1224 /* 1225 * Handle a packed-switch or sparse-switch instruction. In both cases 1226 * we decode it and hand it off to a helper function. 1227 * 1228 * We don't really expect backward branches in a switch statement, but 1229 * they're perfectly legal, so we check for them here. 1230 * 1231 * for: packed-switch, sparse-switch 1232 */ 1233 /* op vAA, +BBBB */ 1234 FETCH(r0, 1) @ r0<- bbbb (lo) 1235 FETCH(r1, 2) @ r1<- BBBB (hi) 1236 mov r3, rINST, lsr #8 @ r3<- AA 1237 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1238 GET_VREG(r1, r3) @ r1<- vAA 1239 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1240 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1241 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1242 bmi common_backwardBranch @ backward branch, do periodic checks 1243 beq common_backwardBranch @ (want to use BLE but V is unknown) 1244#if defined(WITH_JIT) 1245 GET_JIT_PROF_TABLE(r0) 1246 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1247 cmp r0,#0 1248 bne common_updateProfile 1249 GET_INST_OPCODE(ip) @ extract opcode from rINST 1250 GOTO_OPCODE(ip) @ jump to next instruction 1251#else 1252 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1253 GET_INST_OPCODE(ip) @ extract opcode from rINST 1254 GOTO_OPCODE(ip) @ jump to next instruction 1255#endif 1256 1257 1258/* ------------------------------ */ 1259 .balign 64 1260.L_OP_SPARSE_SWITCH: /* 0x2c */ 1261/* File: armv5te/OP_SPARSE_SWITCH.S */ 1262/* File: armv5te/OP_PACKED_SWITCH.S */ 1263 /* 1264 * Handle a packed-switch or sparse-switch instruction. In both cases 1265 * we decode it and hand it off to a helper function. 1266 * 1267 * We don't really expect backward branches in a switch statement, but 1268 * they're perfectly legal, so we check for them here. 1269 * 1270 * for: packed-switch, sparse-switch 1271 */ 1272 /* op vAA, +BBBB */ 1273 FETCH(r0, 1) @ r0<- bbbb (lo) 1274 FETCH(r1, 2) @ r1<- BBBB (hi) 1275 mov r3, rINST, lsr #8 @ r3<- AA 1276 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1277 GET_VREG(r1, r3) @ r1<- vAA 1278 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1279 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1280 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1281 bmi common_backwardBranch @ backward branch, do periodic checks 1282 beq common_backwardBranch @ (want to use BLE but V is unknown) 1283#if defined(WITH_JIT) 1284 GET_JIT_PROF_TABLE(r0) 1285 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1286 cmp r0,#0 1287 bne common_updateProfile 1288 GET_INST_OPCODE(ip) @ extract opcode from rINST 1289 GOTO_OPCODE(ip) @ jump to next instruction 1290#else 1291 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1292 GET_INST_OPCODE(ip) @ extract opcode from rINST 1293 GOTO_OPCODE(ip) @ jump to next instruction 1294#endif 1295 1296 1297 1298/* ------------------------------ */ 1299 .balign 64 1300.L_OP_CMPL_FLOAT: /* 0x2d */ 1301/* File: arm-vfp/OP_CMPL_FLOAT.S */ 1302 /* 1303 * Compare two floating-point values. Puts 0, 1, or -1 into the 1304 * destination register based on the results of the comparison. 1305 * 1306 * int compare(x, y) { 1307 * if (x == y) { 1308 * return 0; 1309 * } else if (x > y) { 1310 * return 1; 1311 * } else if (x < y) { 1312 * return -1; 1313 * } else { 1314 * return -1; 1315 * } 1316 * } 1317 */ 1318 /* op vAA, vBB, vCC */ 1319 FETCH(r0, 1) @ r0<- CCBB 1320 mov r9, rINST, lsr #8 @ r9<- AA 1321 and r2, r0, #255 @ r2<- BB 1322 mov r3, r0, lsr #8 @ r3<- CC 1323 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1324 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1325 flds s0, [r2] @ s0<- vBB 1326 flds s1, [r3] @ s1<- vCC 1327 fcmpes s0, s1 @ compare (vBB, vCC) 1328 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1329 mvn r0, #0 @ r0<- -1 (default) 1330 GET_INST_OPCODE(ip) @ extract opcode from rINST 1331 fmstat @ export status flags 1332 movgt r0, #1 @ (greater than) r1<- 1 1333 moveq r0, #0 @ (equal) r1<- 0 1334 b .LOP_CMPL_FLOAT_finish @ argh 1335 1336 1337/* ------------------------------ */ 1338 .balign 64 1339.L_OP_CMPG_FLOAT: /* 0x2e */ 1340/* File: arm-vfp/OP_CMPG_FLOAT.S */ 1341 /* 1342 * Compare two floating-point values. Puts 0, 1, or -1 into the 1343 * destination register based on the results of the comparison. 1344 * 1345 * int compare(x, y) { 1346 * if (x == y) { 1347 * return 0; 1348 * } else if (x < y) { 1349 * return -1; 1350 * } else if (x > y) { 1351 * return 1; 1352 * } else { 1353 * return 1; 1354 * } 1355 * } 1356 */ 1357 /* op vAA, vBB, vCC */ 1358 FETCH(r0, 1) @ r0<- CCBB 1359 mov r9, rINST, lsr #8 @ r9<- AA 1360 and r2, r0, #255 @ r2<- BB 1361 mov r3, r0, lsr #8 @ r3<- CC 1362 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1363 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1364 flds s0, [r2] @ s0<- vBB 1365 flds s1, [r3] @ s1<- vCC 1366 fcmpes s0, s1 @ compare (vBB, vCC) 1367 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1368 mov r0, #1 @ r0<- 1 (default) 1369 GET_INST_OPCODE(ip) @ extract opcode from rINST 1370 fmstat @ export status flags 1371 mvnmi r0, #0 @ (less than) r1<- -1 1372 moveq r0, #0 @ (equal) r1<- 0 1373 b .LOP_CMPG_FLOAT_finish @ argh 1374 1375 1376/* ------------------------------ */ 1377 .balign 64 1378.L_OP_CMPL_DOUBLE: /* 0x2f */ 1379/* File: arm-vfp/OP_CMPL_DOUBLE.S */ 1380 /* 1381 * Compare two floating-point values. Puts 0, 1, or -1 into the 1382 * destination register based on the results of the comparison. 1383 * 1384 * int compare(x, y) { 1385 * if (x == y) { 1386 * return 0; 1387 * } else if (x > y) { 1388 * return 1; 1389 * } else if (x < y) { 1390 * return -1; 1391 * } else { 1392 * return -1; 1393 * } 1394 * } 1395 */ 1396 /* op vAA, vBB, vCC */ 1397 FETCH(r0, 1) @ r0<- CCBB 1398 mov r9, rINST, lsr #8 @ r9<- AA 1399 and r2, r0, #255 @ r2<- BB 1400 mov r3, r0, lsr #8 @ r3<- CC 1401 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1402 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1403 fldd d0, [r2] @ d0<- vBB 1404 fldd d1, [r3] @ d1<- vCC 1405 fcmped d0, d1 @ compare (vBB, vCC) 1406 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1407 mvn r0, #0 @ r0<- -1 (default) 1408 GET_INST_OPCODE(ip) @ extract opcode from rINST 1409 fmstat @ export status flags 1410 movgt r0, #1 @ (greater than) r1<- 1 1411 moveq r0, #0 @ (equal) r1<- 0 1412 b .LOP_CMPL_DOUBLE_finish @ argh 1413 1414 1415/* ------------------------------ */ 1416 .balign 64 1417.L_OP_CMPG_DOUBLE: /* 0x30 */ 1418/* File: arm-vfp/OP_CMPG_DOUBLE.S */ 1419 /* 1420 * Compare two floating-point values. Puts 0, 1, or -1 into the 1421 * destination register based on the results of the comparison. 1422 * 1423 * int compare(x, y) { 1424 * if (x == y) { 1425 * return 0; 1426 * } else if (x < y) { 1427 * return -1; 1428 * } else if (x > y) { 1429 * return 1; 1430 * } else { 1431 * return 1; 1432 * } 1433 * } 1434 */ 1435 /* op vAA, vBB, vCC */ 1436 FETCH(r0, 1) @ r0<- CCBB 1437 mov r9, rINST, lsr #8 @ r9<- AA 1438 and r2, r0, #255 @ r2<- BB 1439 mov r3, r0, lsr #8 @ r3<- CC 1440 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 1441 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 1442 fldd d0, [r2] @ d0<- vBB 1443 fldd d1, [r3] @ d1<- vCC 1444 fcmped d0, d1 @ compare (vBB, vCC) 1445 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1446 mov r0, #1 @ r0<- 1 (default) 1447 GET_INST_OPCODE(ip) @ extract opcode from rINST 1448 fmstat @ export status flags 1449 mvnmi r0, #0 @ (less than) r1<- -1 1450 moveq r0, #0 @ (equal) r1<- 0 1451 b .LOP_CMPG_DOUBLE_finish @ argh 1452 1453 1454/* ------------------------------ */ 1455 .balign 64 1456.L_OP_CMP_LONG: /* 0x31 */ 1457/* File: armv5te/OP_CMP_LONG.S */ 1458 /* 1459 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1460 * register based on the results of the comparison. 1461 * 1462 * We load the full values with LDM, but in practice many values could 1463 * be resolved by only looking at the high word. This could be made 1464 * faster or slower by splitting the LDM into a pair of LDRs. 1465 * 1466 * If we just wanted to set condition flags, we could do this: 1467 * subs ip, r0, r2 1468 * sbcs ip, r1, r3 1469 * subeqs ip, r0, r2 1470 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1471 * integer value, which we can do with 2 conditional mov/mvn instructions 1472 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1473 * us a constant 5-cycle path plus a branch at the end to the 1474 * instruction epilogue code. The multi-compare approach below needs 1475 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1476 * in the worst case (the 64-bit values are equal). 1477 */ 1478 /* cmp-long vAA, vBB, vCC */ 1479 FETCH(r0, 1) @ r0<- CCBB 1480 mov r9, rINST, lsr #8 @ r9<- AA 1481 and r2, r0, #255 @ r2<- BB 1482 mov r3, r0, lsr #8 @ r3<- CC 1483 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1484 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1485 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1486 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1487 cmp r1, r3 @ compare (vBB+1, vCC+1) 1488 blt .LOP_CMP_LONG_less @ signed compare on high part 1489 bgt .LOP_CMP_LONG_greater 1490 subs r1, r0, r2 @ r1<- r0 - r2 1491 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1492 bne .LOP_CMP_LONG_less 1493 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1494 1495/* ------------------------------ */ 1496 .balign 64 1497.L_OP_IF_EQ: /* 0x32 */ 1498/* File: armv6t2/OP_IF_EQ.S */ 1499/* File: armv6t2/bincmp.S */ 1500 /* 1501 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1502 * fragment that specifies the *reverse* comparison to perform, e.g. 1503 * for "if-le" you would use "gt". 1504 * 1505 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1506 */ 1507 /* if-cmp vA, vB, +CCCC */ 1508 mov r1, rINST, lsr #12 @ r1<- B 1509 ubfx r0, rINST, #8, #4 @ r0<- A 1510 GET_VREG(r3, r1) @ r3<- vB 1511 GET_VREG(r2, r0) @ r2<- vA 1512 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1513 cmp r2, r3 @ compare (vA, vB) 1514 bne 1f @ branch to 1 if comparison failed 1515 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1516 movs r9, r9, asl #1 @ convert to bytes, check sign 1517 bmi common_backwardBranch @ yes, do periodic checks 15181: 1519#if defined(WITH_JIT) 1520 GET_JIT_PROF_TABLE(r0) 1521 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1522 b common_testUpdateProfile 1523#else 1524 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1525 GET_INST_OPCODE(ip) @ extract opcode from rINST 1526 GOTO_OPCODE(ip) @ jump to next instruction 1527#endif 1528 1529 1530 1531/* ------------------------------ */ 1532 .balign 64 1533.L_OP_IF_NE: /* 0x33 */ 1534/* File: armv6t2/OP_IF_NE.S */ 1535/* File: armv6t2/bincmp.S */ 1536 /* 1537 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1538 * fragment that specifies the *reverse* comparison to perform, e.g. 1539 * for "if-le" you would use "gt". 1540 * 1541 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1542 */ 1543 /* if-cmp vA, vB, +CCCC */ 1544 mov r1, rINST, lsr #12 @ r1<- B 1545 ubfx r0, rINST, #8, #4 @ r0<- A 1546 GET_VREG(r3, r1) @ r3<- vB 1547 GET_VREG(r2, r0) @ r2<- vA 1548 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1549 cmp r2, r3 @ compare (vA, vB) 1550 beq 1f @ branch to 1 if comparison failed 1551 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1552 movs r9, r9, asl #1 @ convert to bytes, check sign 1553 bmi common_backwardBranch @ yes, do periodic checks 15541: 1555#if defined(WITH_JIT) 1556 GET_JIT_PROF_TABLE(r0) 1557 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1558 b common_testUpdateProfile 1559#else 1560 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1561 GET_INST_OPCODE(ip) @ extract opcode from rINST 1562 GOTO_OPCODE(ip) @ jump to next instruction 1563#endif 1564 1565 1566 1567/* ------------------------------ */ 1568 .balign 64 1569.L_OP_IF_LT: /* 0x34 */ 1570/* File: armv6t2/OP_IF_LT.S */ 1571/* File: armv6t2/bincmp.S */ 1572 /* 1573 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1574 * fragment that specifies the *reverse* comparison to perform, e.g. 1575 * for "if-le" you would use "gt". 1576 * 1577 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1578 */ 1579 /* if-cmp vA, vB, +CCCC */ 1580 mov r1, rINST, lsr #12 @ r1<- B 1581 ubfx r0, rINST, #8, #4 @ r0<- A 1582 GET_VREG(r3, r1) @ r3<- vB 1583 GET_VREG(r2, r0) @ r2<- vA 1584 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1585 cmp r2, r3 @ compare (vA, vB) 1586 bge 1f @ branch to 1 if comparison failed 1587 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1588 movs r9, r9, asl #1 @ convert to bytes, check sign 1589 bmi common_backwardBranch @ yes, do periodic checks 15901: 1591#if defined(WITH_JIT) 1592 GET_JIT_PROF_TABLE(r0) 1593 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1594 b common_testUpdateProfile 1595#else 1596 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1597 GET_INST_OPCODE(ip) @ extract opcode from rINST 1598 GOTO_OPCODE(ip) @ jump to next instruction 1599#endif 1600 1601 1602 1603/* ------------------------------ */ 1604 .balign 64 1605.L_OP_IF_GE: /* 0x35 */ 1606/* File: armv6t2/OP_IF_GE.S */ 1607/* File: armv6t2/bincmp.S */ 1608 /* 1609 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1610 * fragment that specifies the *reverse* comparison to perform, e.g. 1611 * for "if-le" you would use "gt". 1612 * 1613 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1614 */ 1615 /* if-cmp vA, vB, +CCCC */ 1616 mov r1, rINST, lsr #12 @ r1<- B 1617 ubfx r0, rINST, #8, #4 @ r0<- A 1618 GET_VREG(r3, r1) @ r3<- vB 1619 GET_VREG(r2, r0) @ r2<- vA 1620 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1621 cmp r2, r3 @ compare (vA, vB) 1622 blt 1f @ branch to 1 if comparison failed 1623 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1624 movs r9, r9, asl #1 @ convert to bytes, check sign 1625 bmi common_backwardBranch @ yes, do periodic checks 16261: 1627#if defined(WITH_JIT) 1628 GET_JIT_PROF_TABLE(r0) 1629 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1630 b common_testUpdateProfile 1631#else 1632 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1633 GET_INST_OPCODE(ip) @ extract opcode from rINST 1634 GOTO_OPCODE(ip) @ jump to next instruction 1635#endif 1636 1637 1638 1639/* ------------------------------ */ 1640 .balign 64 1641.L_OP_IF_GT: /* 0x36 */ 1642/* File: armv6t2/OP_IF_GT.S */ 1643/* File: armv6t2/bincmp.S */ 1644 /* 1645 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1646 * fragment that specifies the *reverse* comparison to perform, e.g. 1647 * for "if-le" you would use "gt". 1648 * 1649 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1650 */ 1651 /* if-cmp vA, vB, +CCCC */ 1652 mov r1, rINST, lsr #12 @ r1<- B 1653 ubfx r0, rINST, #8, #4 @ r0<- A 1654 GET_VREG(r3, r1) @ r3<- vB 1655 GET_VREG(r2, r0) @ r2<- vA 1656 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1657 cmp r2, r3 @ compare (vA, vB) 1658 ble 1f @ branch to 1 if comparison failed 1659 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1660 movs r9, r9, asl #1 @ convert to bytes, check sign 1661 bmi common_backwardBranch @ yes, do periodic checks 16621: 1663#if defined(WITH_JIT) 1664 GET_JIT_PROF_TABLE(r0) 1665 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1666 b common_testUpdateProfile 1667#else 1668 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1669 GET_INST_OPCODE(ip) @ extract opcode from rINST 1670 GOTO_OPCODE(ip) @ jump to next instruction 1671#endif 1672 1673 1674 1675/* ------------------------------ */ 1676 .balign 64 1677.L_OP_IF_LE: /* 0x37 */ 1678/* File: armv6t2/OP_IF_LE.S */ 1679/* File: armv6t2/bincmp.S */ 1680 /* 1681 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1682 * fragment that specifies the *reverse* comparison to perform, e.g. 1683 * for "if-le" you would use "gt". 1684 * 1685 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1686 */ 1687 /* if-cmp vA, vB, +CCCC */ 1688 mov r1, rINST, lsr #12 @ r1<- B 1689 ubfx r0, rINST, #8, #4 @ r0<- A 1690 GET_VREG(r3, r1) @ r3<- vB 1691 GET_VREG(r2, r0) @ r2<- vA 1692 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1693 cmp r2, r3 @ compare (vA, vB) 1694 bgt 1f @ branch to 1 if comparison failed 1695 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1696 movs r9, r9, asl #1 @ convert to bytes, check sign 1697 bmi common_backwardBranch @ yes, do periodic checks 16981: 1699#if defined(WITH_JIT) 1700 GET_JIT_PROF_TABLE(r0) 1701 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1702 b common_testUpdateProfile 1703#else 1704 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1705 GET_INST_OPCODE(ip) @ extract opcode from rINST 1706 GOTO_OPCODE(ip) @ jump to next instruction 1707#endif 1708 1709 1710 1711/* ------------------------------ */ 1712 .balign 64 1713.L_OP_IF_EQZ: /* 0x38 */ 1714/* File: armv5te/OP_IF_EQZ.S */ 1715/* File: armv5te/zcmp.S */ 1716 /* 1717 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1718 * fragment that specifies the *reverse* comparison to perform, e.g. 1719 * for "if-le" you would use "gt". 1720 * 1721 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1722 */ 1723 /* if-cmp vAA, +BBBB */ 1724 mov r0, rINST, lsr #8 @ r0<- AA 1725 GET_VREG(r2, r0) @ r2<- vAA 1726 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1727 cmp r2, #0 @ compare (vA, 0) 1728 bne 1f @ branch to 1 if comparison failed 1729 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1730 movs r9, r9, asl #1 @ convert to bytes, check sign 1731 bmi common_backwardBranch @ backward branch, do periodic checks 17321: 1733#if defined(WITH_JIT) 1734 GET_JIT_PROF_TABLE(r0) 1735 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1736 cmp r0,#0 1737 bne common_updateProfile 1738 GET_INST_OPCODE(ip) @ extract opcode from rINST 1739 GOTO_OPCODE(ip) @ jump to next instruction 1740#else 1741 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1742 GET_INST_OPCODE(ip) @ extract opcode from rINST 1743 GOTO_OPCODE(ip) @ jump to next instruction 1744#endif 1745 1746 1747 1748/* ------------------------------ */ 1749 .balign 64 1750.L_OP_IF_NEZ: /* 0x39 */ 1751/* File: armv5te/OP_IF_NEZ.S */ 1752/* File: armv5te/zcmp.S */ 1753 /* 1754 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1755 * fragment that specifies the *reverse* comparison to perform, e.g. 1756 * for "if-le" you would use "gt". 1757 * 1758 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1759 */ 1760 /* if-cmp vAA, +BBBB */ 1761 mov r0, rINST, lsr #8 @ r0<- AA 1762 GET_VREG(r2, r0) @ r2<- vAA 1763 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1764 cmp r2, #0 @ compare (vA, 0) 1765 beq 1f @ branch to 1 if comparison failed 1766 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1767 movs r9, r9, asl #1 @ convert to bytes, check sign 1768 bmi common_backwardBranch @ backward branch, do periodic checks 17691: 1770#if defined(WITH_JIT) 1771 GET_JIT_PROF_TABLE(r0) 1772 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1773 cmp r0,#0 1774 bne common_updateProfile 1775 GET_INST_OPCODE(ip) @ extract opcode from rINST 1776 GOTO_OPCODE(ip) @ jump to next instruction 1777#else 1778 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1779 GET_INST_OPCODE(ip) @ extract opcode from rINST 1780 GOTO_OPCODE(ip) @ jump to next instruction 1781#endif 1782 1783 1784 1785/* ------------------------------ */ 1786 .balign 64 1787.L_OP_IF_LTZ: /* 0x3a */ 1788/* File: armv5te/OP_IF_LTZ.S */ 1789/* File: armv5te/zcmp.S */ 1790 /* 1791 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1792 * fragment that specifies the *reverse* comparison to perform, e.g. 1793 * for "if-le" you would use "gt". 1794 * 1795 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1796 */ 1797 /* if-cmp vAA, +BBBB */ 1798 mov r0, rINST, lsr #8 @ r0<- AA 1799 GET_VREG(r2, r0) @ r2<- vAA 1800 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1801 cmp r2, #0 @ compare (vA, 0) 1802 bge 1f @ branch to 1 if comparison failed 1803 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1804 movs r9, r9, asl #1 @ convert to bytes, check sign 1805 bmi common_backwardBranch @ backward branch, do periodic checks 18061: 1807#if defined(WITH_JIT) 1808 GET_JIT_PROF_TABLE(r0) 1809 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1810 cmp r0,#0 1811 bne common_updateProfile 1812 GET_INST_OPCODE(ip) @ extract opcode from rINST 1813 GOTO_OPCODE(ip) @ jump to next instruction 1814#else 1815 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1816 GET_INST_OPCODE(ip) @ extract opcode from rINST 1817 GOTO_OPCODE(ip) @ jump to next instruction 1818#endif 1819 1820 1821 1822/* ------------------------------ */ 1823 .balign 64 1824.L_OP_IF_GEZ: /* 0x3b */ 1825/* File: armv5te/OP_IF_GEZ.S */ 1826/* File: armv5te/zcmp.S */ 1827 /* 1828 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1829 * fragment that specifies the *reverse* comparison to perform, e.g. 1830 * for "if-le" you would use "gt". 1831 * 1832 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1833 */ 1834 /* if-cmp vAA, +BBBB */ 1835 mov r0, rINST, lsr #8 @ r0<- AA 1836 GET_VREG(r2, r0) @ r2<- vAA 1837 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1838 cmp r2, #0 @ compare (vA, 0) 1839 blt 1f @ branch to 1 if comparison failed 1840 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1841 movs r9, r9, asl #1 @ convert to bytes, check sign 1842 bmi common_backwardBranch @ backward branch, do periodic checks 18431: 1844#if defined(WITH_JIT) 1845 GET_JIT_PROF_TABLE(r0) 1846 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1847 cmp r0,#0 1848 bne common_updateProfile 1849 GET_INST_OPCODE(ip) @ extract opcode from rINST 1850 GOTO_OPCODE(ip) @ jump to next instruction 1851#else 1852 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1853 GET_INST_OPCODE(ip) @ extract opcode from rINST 1854 GOTO_OPCODE(ip) @ jump to next instruction 1855#endif 1856 1857 1858 1859/* ------------------------------ */ 1860 .balign 64 1861.L_OP_IF_GTZ: /* 0x3c */ 1862/* File: armv5te/OP_IF_GTZ.S */ 1863/* File: armv5te/zcmp.S */ 1864 /* 1865 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1866 * fragment that specifies the *reverse* comparison to perform, e.g. 1867 * for "if-le" you would use "gt". 1868 * 1869 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1870 */ 1871 /* if-cmp vAA, +BBBB */ 1872 mov r0, rINST, lsr #8 @ r0<- AA 1873 GET_VREG(r2, r0) @ r2<- vAA 1874 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1875 cmp r2, #0 @ compare (vA, 0) 1876 ble 1f @ branch to 1 if comparison failed 1877 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1878 movs r9, r9, asl #1 @ convert to bytes, check sign 1879 bmi common_backwardBranch @ backward branch, do periodic checks 18801: 1881#if defined(WITH_JIT) 1882 GET_JIT_PROF_TABLE(r0) 1883 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1884 cmp r0,#0 1885 bne common_updateProfile 1886 GET_INST_OPCODE(ip) @ extract opcode from rINST 1887 GOTO_OPCODE(ip) @ jump to next instruction 1888#else 1889 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1890 GET_INST_OPCODE(ip) @ extract opcode from rINST 1891 GOTO_OPCODE(ip) @ jump to next instruction 1892#endif 1893 1894 1895 1896/* ------------------------------ */ 1897 .balign 64 1898.L_OP_IF_LEZ: /* 0x3d */ 1899/* File: armv5te/OP_IF_LEZ.S */ 1900/* File: armv5te/zcmp.S */ 1901 /* 1902 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1903 * fragment that specifies the *reverse* comparison to perform, e.g. 1904 * for "if-le" you would use "gt". 1905 * 1906 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1907 */ 1908 /* if-cmp vAA, +BBBB */ 1909 mov r0, rINST, lsr #8 @ r0<- AA 1910 GET_VREG(r2, r0) @ r2<- vAA 1911 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1912 cmp r2, #0 @ compare (vA, 0) 1913 bgt 1f @ branch to 1 if comparison failed 1914 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1915 movs r9, r9, asl #1 @ convert to bytes, check sign 1916 bmi common_backwardBranch @ backward branch, do periodic checks 19171: 1918#if defined(WITH_JIT) 1919 GET_JIT_PROF_TABLE(r0) 1920 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1921 cmp r0,#0 1922 bne common_updateProfile 1923 GET_INST_OPCODE(ip) @ extract opcode from rINST 1924 GOTO_OPCODE(ip) @ jump to next instruction 1925#else 1926 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1927 GET_INST_OPCODE(ip) @ extract opcode from rINST 1928 GOTO_OPCODE(ip) @ jump to next instruction 1929#endif 1930 1931 1932 1933/* ------------------------------ */ 1934 .balign 64 1935.L_OP_UNUSED_3E: /* 0x3e */ 1936/* File: armv5te/OP_UNUSED_3E.S */ 1937/* File: armv5te/unused.S */ 1938 bl common_abort 1939 1940 1941 1942/* ------------------------------ */ 1943 .balign 64 1944.L_OP_UNUSED_3F: /* 0x3f */ 1945/* File: armv5te/OP_UNUSED_3F.S */ 1946/* File: armv5te/unused.S */ 1947 bl common_abort 1948 1949 1950 1951/* ------------------------------ */ 1952 .balign 64 1953.L_OP_UNUSED_40: /* 0x40 */ 1954/* File: armv5te/OP_UNUSED_40.S */ 1955/* File: armv5te/unused.S */ 1956 bl common_abort 1957 1958 1959 1960/* ------------------------------ */ 1961 .balign 64 1962.L_OP_UNUSED_41: /* 0x41 */ 1963/* File: armv5te/OP_UNUSED_41.S */ 1964/* File: armv5te/unused.S */ 1965 bl common_abort 1966 1967 1968 1969/* ------------------------------ */ 1970 .balign 64 1971.L_OP_UNUSED_42: /* 0x42 */ 1972/* File: armv5te/OP_UNUSED_42.S */ 1973/* File: armv5te/unused.S */ 1974 bl common_abort 1975 1976 1977 1978/* ------------------------------ */ 1979 .balign 64 1980.L_OP_UNUSED_43: /* 0x43 */ 1981/* File: armv5te/OP_UNUSED_43.S */ 1982/* File: armv5te/unused.S */ 1983 bl common_abort 1984 1985 1986 1987/* ------------------------------ */ 1988 .balign 64 1989.L_OP_AGET: /* 0x44 */ 1990/* File: armv5te/OP_AGET.S */ 1991 /* 1992 * Array get, 32 bits or less. vAA <- vBB[vCC]. 1993 * 1994 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 1995 * instructions. We use a pair of FETCH_Bs instead. 1996 * 1997 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 1998 */ 1999 /* op vAA, vBB, vCC */ 2000 FETCH_B(r2, 1, 0) @ r2<- BB 2001 mov r9, rINST, lsr #8 @ r9<- AA 2002 FETCH_B(r3, 1, 1) @ r3<- CC 2003 GET_VREG(r0, r2) @ r0<- vBB (array object) 2004 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2005 cmp r0, #0 @ null array object? 2006 beq common_errNullObject @ yes, bail 2007 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2008 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2009 cmp r1, r3 @ compare unsigned index, length 2010 bcs common_errArrayIndex @ index >= length, bail 2011 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2012 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2013 GET_INST_OPCODE(ip) @ extract opcode from rINST 2014 SET_VREG(r2, r9) @ vAA<- r2 2015 GOTO_OPCODE(ip) @ jump to next instruction 2016 2017 2018/* ------------------------------ */ 2019 .balign 64 2020.L_OP_AGET_WIDE: /* 0x45 */ 2021/* File: armv5te/OP_AGET_WIDE.S */ 2022 /* 2023 * Array get, 64 bits. vAA <- vBB[vCC]. 2024 * 2025 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2026 */ 2027 /* aget-wide vAA, vBB, vCC */ 2028 FETCH(r0, 1) @ r0<- CCBB 2029 mov r9, rINST, lsr #8 @ r9<- AA 2030 and r2, r0, #255 @ r2<- BB 2031 mov r3, r0, lsr #8 @ r3<- CC 2032 GET_VREG(r0, r2) @ r0<- vBB (array object) 2033 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2034 cmp r0, #0 @ null array object? 2035 beq common_errNullObject @ yes, bail 2036 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2037 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2038 cmp r1, r3 @ compare unsigned index, length 2039 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2040 b common_errArrayIndex @ index >= length, bail 2041 @ May want to swap the order of these two branches depending on how the 2042 @ branch prediction (if any) handles conditional forward branches vs. 2043 @ unconditional forward branches. 2044 2045/* ------------------------------ */ 2046 .balign 64 2047.L_OP_AGET_OBJECT: /* 0x46 */ 2048/* File: armv5te/OP_AGET_OBJECT.S */ 2049/* File: armv5te/OP_AGET.S */ 2050 /* 2051 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2052 * 2053 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2054 * instructions. We use a pair of FETCH_Bs instead. 2055 * 2056 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2057 */ 2058 /* op vAA, vBB, vCC */ 2059 FETCH_B(r2, 1, 0) @ r2<- BB 2060 mov r9, rINST, lsr #8 @ r9<- AA 2061 FETCH_B(r3, 1, 1) @ r3<- CC 2062 GET_VREG(r0, r2) @ r0<- vBB (array object) 2063 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2064 cmp r0, #0 @ null array object? 2065 beq common_errNullObject @ yes, bail 2066 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2067 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2068 cmp r1, r3 @ compare unsigned index, length 2069 bcs common_errArrayIndex @ index >= length, bail 2070 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2071 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2072 GET_INST_OPCODE(ip) @ extract opcode from rINST 2073 SET_VREG(r2, r9) @ vAA<- r2 2074 GOTO_OPCODE(ip) @ jump to next instruction 2075 2076 2077 2078/* ------------------------------ */ 2079 .balign 64 2080.L_OP_AGET_BOOLEAN: /* 0x47 */ 2081/* File: armv5te/OP_AGET_BOOLEAN.S */ 2082/* File: armv5te/OP_AGET.S */ 2083 /* 2084 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2085 * 2086 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2087 * instructions. We use a pair of FETCH_Bs instead. 2088 * 2089 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2090 */ 2091 /* op vAA, vBB, vCC */ 2092 FETCH_B(r2, 1, 0) @ r2<- BB 2093 mov r9, rINST, lsr #8 @ r9<- AA 2094 FETCH_B(r3, 1, 1) @ r3<- CC 2095 GET_VREG(r0, r2) @ r0<- vBB (array object) 2096 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2097 cmp r0, #0 @ null array object? 2098 beq common_errNullObject @ yes, bail 2099 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2100 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2101 cmp r1, r3 @ compare unsigned index, length 2102 bcs common_errArrayIndex @ index >= length, bail 2103 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2104 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2105 GET_INST_OPCODE(ip) @ extract opcode from rINST 2106 SET_VREG(r2, r9) @ vAA<- r2 2107 GOTO_OPCODE(ip) @ jump to next instruction 2108 2109 2110 2111/* ------------------------------ */ 2112 .balign 64 2113.L_OP_AGET_BYTE: /* 0x48 */ 2114/* File: armv5te/OP_AGET_BYTE.S */ 2115/* File: armv5te/OP_AGET.S */ 2116 /* 2117 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2118 * 2119 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2120 * instructions. We use a pair of FETCH_Bs instead. 2121 * 2122 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2123 */ 2124 /* op vAA, vBB, vCC */ 2125 FETCH_B(r2, 1, 0) @ r2<- BB 2126 mov r9, rINST, lsr #8 @ r9<- AA 2127 FETCH_B(r3, 1, 1) @ r3<- CC 2128 GET_VREG(r0, r2) @ r0<- vBB (array object) 2129 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2130 cmp r0, #0 @ null array object? 2131 beq common_errNullObject @ yes, bail 2132 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2133 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2134 cmp r1, r3 @ compare unsigned index, length 2135 bcs common_errArrayIndex @ index >= length, bail 2136 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2137 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2138 GET_INST_OPCODE(ip) @ extract opcode from rINST 2139 SET_VREG(r2, r9) @ vAA<- r2 2140 GOTO_OPCODE(ip) @ jump to next instruction 2141 2142 2143 2144/* ------------------------------ */ 2145 .balign 64 2146.L_OP_AGET_CHAR: /* 0x49 */ 2147/* File: armv5te/OP_AGET_CHAR.S */ 2148/* File: armv5te/OP_AGET.S */ 2149 /* 2150 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2151 * 2152 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2153 * instructions. We use a pair of FETCH_Bs instead. 2154 * 2155 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2156 */ 2157 /* op vAA, vBB, vCC */ 2158 FETCH_B(r2, 1, 0) @ r2<- BB 2159 mov r9, rINST, lsr #8 @ r9<- AA 2160 FETCH_B(r3, 1, 1) @ r3<- CC 2161 GET_VREG(r0, r2) @ r0<- vBB (array object) 2162 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2163 cmp r0, #0 @ null array object? 2164 beq common_errNullObject @ yes, bail 2165 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2166 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2167 cmp r1, r3 @ compare unsigned index, length 2168 bcs common_errArrayIndex @ index >= length, bail 2169 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2170 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2171 GET_INST_OPCODE(ip) @ extract opcode from rINST 2172 SET_VREG(r2, r9) @ vAA<- r2 2173 GOTO_OPCODE(ip) @ jump to next instruction 2174 2175 2176 2177/* ------------------------------ */ 2178 .balign 64 2179.L_OP_AGET_SHORT: /* 0x4a */ 2180/* File: armv5te/OP_AGET_SHORT.S */ 2181/* File: armv5te/OP_AGET.S */ 2182 /* 2183 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2184 * 2185 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2186 * instructions. We use a pair of FETCH_Bs instead. 2187 * 2188 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2189 */ 2190 /* op vAA, vBB, vCC */ 2191 FETCH_B(r2, 1, 0) @ r2<- BB 2192 mov r9, rINST, lsr #8 @ r9<- AA 2193 FETCH_B(r3, 1, 1) @ r3<- CC 2194 GET_VREG(r0, r2) @ r0<- vBB (array object) 2195 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2196 cmp r0, #0 @ null array object? 2197 beq common_errNullObject @ yes, bail 2198 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2199 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2200 cmp r1, r3 @ compare unsigned index, length 2201 bcs common_errArrayIndex @ index >= length, bail 2202 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2203 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2204 GET_INST_OPCODE(ip) @ extract opcode from rINST 2205 SET_VREG(r2, r9) @ vAA<- r2 2206 GOTO_OPCODE(ip) @ jump to next instruction 2207 2208 2209 2210/* ------------------------------ */ 2211 .balign 64 2212.L_OP_APUT: /* 0x4b */ 2213/* File: armv5te/OP_APUT.S */ 2214 /* 2215 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2216 * 2217 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2218 * instructions. We use a pair of FETCH_Bs instead. 2219 * 2220 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2221 */ 2222 /* op vAA, vBB, vCC */ 2223 FETCH_B(r2, 1, 0) @ r2<- BB 2224 mov r9, rINST, lsr #8 @ r9<- AA 2225 FETCH_B(r3, 1, 1) @ r3<- CC 2226 GET_VREG(r0, r2) @ r0<- vBB (array object) 2227 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2228 cmp r0, #0 @ null array object? 2229 beq common_errNullObject @ yes, bail 2230 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2231 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2232 cmp r1, r3 @ compare unsigned index, length 2233 bcs common_errArrayIndex @ index >= length, bail 2234 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2235 GET_VREG(r2, r9) @ r2<- vAA 2236 GET_INST_OPCODE(ip) @ extract opcode from rINST 2237 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2238 GOTO_OPCODE(ip) @ jump to next instruction 2239 2240 2241/* ------------------------------ */ 2242 .balign 64 2243.L_OP_APUT_WIDE: /* 0x4c */ 2244/* File: armv5te/OP_APUT_WIDE.S */ 2245 /* 2246 * Array put, 64 bits. vBB[vCC] <- vAA. 2247 * 2248 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2249 */ 2250 /* aput-wide vAA, vBB, vCC */ 2251 FETCH(r0, 1) @ r0<- CCBB 2252 mov r9, rINST, lsr #8 @ r9<- AA 2253 and r2, r0, #255 @ r2<- BB 2254 mov r3, r0, lsr #8 @ r3<- CC 2255 GET_VREG(r0, r2) @ r0<- vBB (array object) 2256 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2257 cmp r0, #0 @ null array object? 2258 beq common_errNullObject @ yes, bail 2259 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2260 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2261 cmp r1, r3 @ compare unsigned index, length 2262 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2263 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2264 b common_errArrayIndex @ index >= length, bail 2265 @ May want to swap the order of these two branches depending on how the 2266 @ branch prediction (if any) handles conditional forward branches vs. 2267 @ unconditional forward branches. 2268 2269/* ------------------------------ */ 2270 .balign 64 2271.L_OP_APUT_OBJECT: /* 0x4d */ 2272/* File: armv5te/OP_APUT_OBJECT.S */ 2273 /* 2274 * Store an object into an array. vBB[vCC] <- vAA. 2275 * 2276 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2277 * instructions. We use a pair of FETCH_Bs instead. 2278 */ 2279 /* op vAA, vBB, vCC */ 2280 FETCH(r0, 1) @ r0<- CCBB 2281 mov r9, rINST, lsr #8 @ r9<- AA 2282 and r2, r0, #255 @ r2<- BB 2283 mov r3, r0, lsr #8 @ r3<- CC 2284 GET_VREG(r1, r2) @ r1<- vBB (array object) 2285 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2286 cmp r1, #0 @ null array object? 2287 GET_VREG(r9, r9) @ r9<- vAA 2288 beq common_errNullObject @ yes, bail 2289 ldr r3, [r1, #offArrayObject_length] @ r3<- arrayObj->length 2290 add r10, r1, r0, lsl #2 @ r10<- arrayObj + index*width 2291 cmp r0, r3 @ compare unsigned index, length 2292 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2293 b common_errArrayIndex @ index >= length, bail 2294 2295 2296/* ------------------------------ */ 2297 .balign 64 2298.L_OP_APUT_BOOLEAN: /* 0x4e */ 2299/* File: armv5te/OP_APUT_BOOLEAN.S */ 2300/* File: armv5te/OP_APUT.S */ 2301 /* 2302 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2303 * 2304 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2305 * instructions. We use a pair of FETCH_Bs instead. 2306 * 2307 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2308 */ 2309 /* op vAA, vBB, vCC */ 2310 FETCH_B(r2, 1, 0) @ r2<- BB 2311 mov r9, rINST, lsr #8 @ r9<- AA 2312 FETCH_B(r3, 1, 1) @ r3<- CC 2313 GET_VREG(r0, r2) @ r0<- vBB (array object) 2314 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2315 cmp r0, #0 @ null array object? 2316 beq common_errNullObject @ yes, bail 2317 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2318 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2319 cmp r1, r3 @ compare unsigned index, length 2320 bcs common_errArrayIndex @ index >= length, bail 2321 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2322 GET_VREG(r2, r9) @ r2<- vAA 2323 GET_INST_OPCODE(ip) @ extract opcode from rINST 2324 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2325 GOTO_OPCODE(ip) @ jump to next instruction 2326 2327 2328 2329/* ------------------------------ */ 2330 .balign 64 2331.L_OP_APUT_BYTE: /* 0x4f */ 2332/* File: armv5te/OP_APUT_BYTE.S */ 2333/* File: armv5te/OP_APUT.S */ 2334 /* 2335 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2336 * 2337 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2338 * instructions. We use a pair of FETCH_Bs instead. 2339 * 2340 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2341 */ 2342 /* op vAA, vBB, vCC */ 2343 FETCH_B(r2, 1, 0) @ r2<- BB 2344 mov r9, rINST, lsr #8 @ r9<- AA 2345 FETCH_B(r3, 1, 1) @ r3<- CC 2346 GET_VREG(r0, r2) @ r0<- vBB (array object) 2347 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2348 cmp r0, #0 @ null array object? 2349 beq common_errNullObject @ yes, bail 2350 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2351 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2352 cmp r1, r3 @ compare unsigned index, length 2353 bcs common_errArrayIndex @ index >= length, bail 2354 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2355 GET_VREG(r2, r9) @ r2<- vAA 2356 GET_INST_OPCODE(ip) @ extract opcode from rINST 2357 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2358 GOTO_OPCODE(ip) @ jump to next instruction 2359 2360 2361 2362/* ------------------------------ */ 2363 .balign 64 2364.L_OP_APUT_CHAR: /* 0x50 */ 2365/* File: armv5te/OP_APUT_CHAR.S */ 2366/* File: armv5te/OP_APUT.S */ 2367 /* 2368 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2369 * 2370 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2371 * instructions. We use a pair of FETCH_Bs instead. 2372 * 2373 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2374 */ 2375 /* op vAA, vBB, vCC */ 2376 FETCH_B(r2, 1, 0) @ r2<- BB 2377 mov r9, rINST, lsr #8 @ r9<- AA 2378 FETCH_B(r3, 1, 1) @ r3<- CC 2379 GET_VREG(r0, r2) @ r0<- vBB (array object) 2380 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2381 cmp r0, #0 @ null array object? 2382 beq common_errNullObject @ yes, bail 2383 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2384 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2385 cmp r1, r3 @ compare unsigned index, length 2386 bcs common_errArrayIndex @ index >= length, bail 2387 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2388 GET_VREG(r2, r9) @ r2<- vAA 2389 GET_INST_OPCODE(ip) @ extract opcode from rINST 2390 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2391 GOTO_OPCODE(ip) @ jump to next instruction 2392 2393 2394 2395/* ------------------------------ */ 2396 .balign 64 2397.L_OP_APUT_SHORT: /* 0x51 */ 2398/* File: armv5te/OP_APUT_SHORT.S */ 2399/* File: armv5te/OP_APUT.S */ 2400 /* 2401 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2402 * 2403 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2404 * instructions. We use a pair of FETCH_Bs instead. 2405 * 2406 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2407 */ 2408 /* op vAA, vBB, vCC */ 2409 FETCH_B(r2, 1, 0) @ r2<- BB 2410 mov r9, rINST, lsr #8 @ r9<- AA 2411 FETCH_B(r3, 1, 1) @ r3<- CC 2412 GET_VREG(r0, r2) @ r0<- vBB (array object) 2413 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2414 cmp r0, #0 @ null array object? 2415 beq common_errNullObject @ yes, bail 2416 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2417 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2418 cmp r1, r3 @ compare unsigned index, length 2419 bcs common_errArrayIndex @ index >= length, bail 2420 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2421 GET_VREG(r2, r9) @ r2<- vAA 2422 GET_INST_OPCODE(ip) @ extract opcode from rINST 2423 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2424 GOTO_OPCODE(ip) @ jump to next instruction 2425 2426 2427 2428/* ------------------------------ */ 2429 .balign 64 2430.L_OP_IGET: /* 0x52 */ 2431/* File: armv6t2/OP_IGET.S */ 2432 /* 2433 * General 32-bit instance field get. 2434 * 2435 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2436 */ 2437 /* op vA, vB, field@CCCC */ 2438 mov r0, rINST, lsr #12 @ r0<- B 2439 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2440 FETCH(r1, 1) @ r1<- field ref CCCC 2441 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2442 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2443 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2444 cmp r0, #0 @ is resolved entry null? 2445 bne .LOP_IGET_finish @ no, already resolved 24468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2447 EXPORT_PC() @ resolve() could throw 2448 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2449 bl dvmResolveInstField @ r0<- resolved InstField ptr 2450 cmp r0, #0 2451 bne .LOP_IGET_finish 2452 b common_exceptionThrown 2453 2454/* ------------------------------ */ 2455 .balign 64 2456.L_OP_IGET_WIDE: /* 0x53 */ 2457/* File: armv6t2/OP_IGET_WIDE.S */ 2458 /* 2459 * Wide 32-bit instance field get. 2460 */ 2461 /* iget-wide vA, vB, field@CCCC */ 2462 mov r0, rINST, lsr #12 @ r0<- B 2463 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2464 FETCH(r1, 1) @ r1<- field ref CCCC 2465 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2466 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2467 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2468 cmp r0, #0 @ is resolved entry null? 2469 bne .LOP_IGET_WIDE_finish @ no, already resolved 24708: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2471 EXPORT_PC() @ resolve() could throw 2472 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2473 bl dvmResolveInstField @ r0<- resolved InstField ptr 2474 cmp r0, #0 2475 bne .LOP_IGET_WIDE_finish 2476 b common_exceptionThrown 2477 2478/* ------------------------------ */ 2479 .balign 64 2480.L_OP_IGET_OBJECT: /* 0x54 */ 2481/* File: armv5te/OP_IGET_OBJECT.S */ 2482/* File: armv5te/OP_IGET.S */ 2483 /* 2484 * General 32-bit instance field get. 2485 * 2486 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2487 */ 2488 /* op vA, vB, field@CCCC */ 2489 mov r0, rINST, lsr #12 @ r0<- B 2490 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2491 FETCH(r1, 1) @ r1<- field ref CCCC 2492 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2493 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2494 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2495 cmp r0, #0 @ is resolved entry null? 2496 bne .LOP_IGET_OBJECT_finish @ no, already resolved 24978: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2498 EXPORT_PC() @ resolve() could throw 2499 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2500 bl dvmResolveInstField @ r0<- resolved InstField ptr 2501 cmp r0, #0 2502 bne .LOP_IGET_OBJECT_finish 2503 b common_exceptionThrown 2504 2505 2506/* ------------------------------ */ 2507 .balign 64 2508.L_OP_IGET_BOOLEAN: /* 0x55 */ 2509/* File: armv5te/OP_IGET_BOOLEAN.S */ 2510@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2511/* File: armv5te/OP_IGET.S */ 2512 /* 2513 * General 32-bit instance field get. 2514 * 2515 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2516 */ 2517 /* op vA, vB, field@CCCC */ 2518 mov r0, rINST, lsr #12 @ r0<- B 2519 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2520 FETCH(r1, 1) @ r1<- field ref CCCC 2521 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2522 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2523 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2524 cmp r0, #0 @ is resolved entry null? 2525 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 25268: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2527 EXPORT_PC() @ resolve() could throw 2528 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2529 bl dvmResolveInstField @ r0<- resolved InstField ptr 2530 cmp r0, #0 2531 bne .LOP_IGET_BOOLEAN_finish 2532 b common_exceptionThrown 2533 2534 2535/* ------------------------------ */ 2536 .balign 64 2537.L_OP_IGET_BYTE: /* 0x56 */ 2538/* File: armv5te/OP_IGET_BYTE.S */ 2539@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2540/* File: armv5te/OP_IGET.S */ 2541 /* 2542 * General 32-bit instance field get. 2543 * 2544 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2545 */ 2546 /* op vA, vB, field@CCCC */ 2547 mov r0, rINST, lsr #12 @ r0<- B 2548 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2549 FETCH(r1, 1) @ r1<- field ref CCCC 2550 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2551 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2552 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2553 cmp r0, #0 @ is resolved entry null? 2554 bne .LOP_IGET_BYTE_finish @ no, already resolved 25558: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2556 EXPORT_PC() @ resolve() could throw 2557 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2558 bl dvmResolveInstField @ r0<- resolved InstField ptr 2559 cmp r0, #0 2560 bne .LOP_IGET_BYTE_finish 2561 b common_exceptionThrown 2562 2563 2564/* ------------------------------ */ 2565 .balign 64 2566.L_OP_IGET_CHAR: /* 0x57 */ 2567/* File: armv5te/OP_IGET_CHAR.S */ 2568@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2569/* File: armv5te/OP_IGET.S */ 2570 /* 2571 * General 32-bit instance field get. 2572 * 2573 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2574 */ 2575 /* op vA, vB, field@CCCC */ 2576 mov r0, rINST, lsr #12 @ r0<- B 2577 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2578 FETCH(r1, 1) @ r1<- field ref CCCC 2579 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2580 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2581 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2582 cmp r0, #0 @ is resolved entry null? 2583 bne .LOP_IGET_CHAR_finish @ no, already resolved 25848: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2585 EXPORT_PC() @ resolve() could throw 2586 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2587 bl dvmResolveInstField @ r0<- resolved InstField ptr 2588 cmp r0, #0 2589 bne .LOP_IGET_CHAR_finish 2590 b common_exceptionThrown 2591 2592 2593/* ------------------------------ */ 2594 .balign 64 2595.L_OP_IGET_SHORT: /* 0x58 */ 2596/* File: armv5te/OP_IGET_SHORT.S */ 2597@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2598/* File: armv5te/OP_IGET.S */ 2599 /* 2600 * General 32-bit instance field get. 2601 * 2602 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2603 */ 2604 /* op vA, vB, field@CCCC */ 2605 mov r0, rINST, lsr #12 @ r0<- B 2606 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2607 FETCH(r1, 1) @ r1<- field ref CCCC 2608 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2609 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2610 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2611 cmp r0, #0 @ is resolved entry null? 2612 bne .LOP_IGET_SHORT_finish @ no, already resolved 26138: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2614 EXPORT_PC() @ resolve() could throw 2615 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2616 bl dvmResolveInstField @ r0<- resolved InstField ptr 2617 cmp r0, #0 2618 bne .LOP_IGET_SHORT_finish 2619 b common_exceptionThrown 2620 2621 2622/* ------------------------------ */ 2623 .balign 64 2624.L_OP_IPUT: /* 0x59 */ 2625/* File: armv6t2/OP_IPUT.S */ 2626 /* 2627 * General 32-bit instance field put. 2628 * 2629 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2630 */ 2631 /* op vA, vB, field@CCCC */ 2632 mov r0, rINST, lsr #12 @ r0<- B 2633 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2634 FETCH(r1, 1) @ r1<- field ref CCCC 2635 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2636 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2637 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2638 cmp r0, #0 @ is resolved entry null? 2639 bne .LOP_IPUT_finish @ no, already resolved 26408: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2641 EXPORT_PC() @ resolve() could throw 2642 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2643 bl dvmResolveInstField @ r0<- resolved InstField ptr 2644 cmp r0, #0 @ success? 2645 bne .LOP_IPUT_finish @ yes, finish up 2646 b common_exceptionThrown 2647 2648/* ------------------------------ */ 2649 .balign 64 2650.L_OP_IPUT_WIDE: /* 0x5a */ 2651/* File: armv6t2/OP_IPUT_WIDE.S */ 2652 /* iput-wide vA, vB, field@CCCC */ 2653 mov r0, rINST, lsr #12 @ r0<- B 2654 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2655 FETCH(r1, 1) @ r1<- field ref CCCC 2656 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2657 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2658 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2659 cmp r0, #0 @ is resolved entry null? 2660 bne .LOP_IPUT_WIDE_finish @ no, already resolved 26618: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2662 EXPORT_PC() @ resolve() could throw 2663 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2664 bl dvmResolveInstField @ r0<- resolved InstField ptr 2665 cmp r0, #0 @ success? 2666 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2667 b common_exceptionThrown 2668 2669/* ------------------------------ */ 2670 .balign 64 2671.L_OP_IPUT_OBJECT: /* 0x5b */ 2672/* File: armv5te/OP_IPUT_OBJECT.S */ 2673/* File: armv5te/OP_IPUT.S */ 2674 /* 2675 * General 32-bit instance field put. 2676 * 2677 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2678 */ 2679 /* op vA, vB, field@CCCC */ 2680 mov r0, rINST, lsr #12 @ r0<- B 2681 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2682 FETCH(r1, 1) @ r1<- field ref CCCC 2683 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2684 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2685 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2686 cmp r0, #0 @ is resolved entry null? 2687 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 26888: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2689 EXPORT_PC() @ resolve() could throw 2690 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2691 bl dvmResolveInstField @ r0<- resolved InstField ptr 2692 cmp r0, #0 @ success? 2693 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2694 b common_exceptionThrown 2695 2696 2697/* ------------------------------ */ 2698 .balign 64 2699.L_OP_IPUT_BOOLEAN: /* 0x5c */ 2700/* File: armv5te/OP_IPUT_BOOLEAN.S */ 2701@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2702/* File: armv5te/OP_IPUT.S */ 2703 /* 2704 * General 32-bit instance field put. 2705 * 2706 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2707 */ 2708 /* op vA, vB, field@CCCC */ 2709 mov r0, rINST, lsr #12 @ r0<- B 2710 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2711 FETCH(r1, 1) @ r1<- field ref CCCC 2712 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2713 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2714 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2715 cmp r0, #0 @ is resolved entry null? 2716 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 27178: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2718 EXPORT_PC() @ resolve() could throw 2719 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2720 bl dvmResolveInstField @ r0<- resolved InstField ptr 2721 cmp r0, #0 @ success? 2722 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2723 b common_exceptionThrown 2724 2725 2726/* ------------------------------ */ 2727 .balign 64 2728.L_OP_IPUT_BYTE: /* 0x5d */ 2729/* File: armv5te/OP_IPUT_BYTE.S */ 2730@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2731/* File: armv5te/OP_IPUT.S */ 2732 /* 2733 * General 32-bit instance field put. 2734 * 2735 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2736 */ 2737 /* op vA, vB, field@CCCC */ 2738 mov r0, rINST, lsr #12 @ r0<- B 2739 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2740 FETCH(r1, 1) @ r1<- field ref CCCC 2741 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2742 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2743 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2744 cmp r0, #0 @ is resolved entry null? 2745 bne .LOP_IPUT_BYTE_finish @ no, already resolved 27468: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2747 EXPORT_PC() @ resolve() could throw 2748 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2749 bl dvmResolveInstField @ r0<- resolved InstField ptr 2750 cmp r0, #0 @ success? 2751 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2752 b common_exceptionThrown 2753 2754 2755/* ------------------------------ */ 2756 .balign 64 2757.L_OP_IPUT_CHAR: /* 0x5e */ 2758/* File: armv5te/OP_IPUT_CHAR.S */ 2759@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2760/* File: armv5te/OP_IPUT.S */ 2761 /* 2762 * General 32-bit instance field put. 2763 * 2764 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2765 */ 2766 /* op vA, vB, field@CCCC */ 2767 mov r0, rINST, lsr #12 @ r0<- B 2768 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2769 FETCH(r1, 1) @ r1<- field ref CCCC 2770 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2771 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2772 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2773 cmp r0, #0 @ is resolved entry null? 2774 bne .LOP_IPUT_CHAR_finish @ no, already resolved 27758: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2776 EXPORT_PC() @ resolve() could throw 2777 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2778 bl dvmResolveInstField @ r0<- resolved InstField ptr 2779 cmp r0, #0 @ success? 2780 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2781 b common_exceptionThrown 2782 2783 2784/* ------------------------------ */ 2785 .balign 64 2786.L_OP_IPUT_SHORT: /* 0x5f */ 2787/* File: armv5te/OP_IPUT_SHORT.S */ 2788@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2789/* File: armv5te/OP_IPUT.S */ 2790 /* 2791 * General 32-bit instance field put. 2792 * 2793 * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short 2794 */ 2795 /* op vA, vB, field@CCCC */ 2796 mov r0, rINST, lsr #12 @ r0<- B 2797 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2798 FETCH(r1, 1) @ r1<- field ref CCCC 2799 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2800 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2801 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2802 cmp r0, #0 @ is resolved entry null? 2803 bne .LOP_IPUT_SHORT_finish @ no, already resolved 28048: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2805 EXPORT_PC() @ resolve() could throw 2806 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2807 bl dvmResolveInstField @ r0<- resolved InstField ptr 2808 cmp r0, #0 @ success? 2809 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2810 b common_exceptionThrown 2811 2812 2813/* ------------------------------ */ 2814 .balign 64 2815.L_OP_SGET: /* 0x60 */ 2816/* File: armv5te/OP_SGET.S */ 2817 /* 2818 * General 32-bit SGET handler. 2819 * 2820 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2821 */ 2822 /* op vAA, field@BBBB */ 2823 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2824 FETCH(r1, 1) @ r1<- field ref BBBB 2825 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2826 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2827 cmp r0, #0 @ is resolved entry null? 2828 beq .LOP_SGET_resolve @ yes, do resolve 2829.LOP_SGET_finish: @ field ptr in r0 2830 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2831 mov r2, rINST, lsr #8 @ r2<- AA 2832 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2833 SET_VREG(r1, r2) @ fp[AA]<- r1 2834 GET_INST_OPCODE(ip) @ extract opcode from rINST 2835 GOTO_OPCODE(ip) @ jump to next instruction 2836 2837/* ------------------------------ */ 2838 .balign 64 2839.L_OP_SGET_WIDE: /* 0x61 */ 2840/* File: armv5te/OP_SGET_WIDE.S */ 2841 /* 2842 * 64-bit SGET handler. 2843 */ 2844 /* sget-wide vAA, field@BBBB */ 2845 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2846 FETCH(r1, 1) @ r1<- field ref BBBB 2847 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2848 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2849 cmp r0, #0 @ is resolved entry null? 2850 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2851.LOP_SGET_WIDE_finish: 2852 mov r1, rINST, lsr #8 @ r1<- AA 2853 ldrd r2, [r0, #offStaticField_value] @ r2/r3<- field value (aligned) 2854 add r1, rFP, r1, lsl #2 @ r1<- &fp[AA] 2855 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2856 stmia r1, {r2-r3} @ vAA/vAA+1<- r2/r3 2857 GET_INST_OPCODE(ip) @ extract opcode from rINST 2858 GOTO_OPCODE(ip) @ jump to next instruction 2859 2860/* ------------------------------ */ 2861 .balign 64 2862.L_OP_SGET_OBJECT: /* 0x62 */ 2863/* File: armv5te/OP_SGET_OBJECT.S */ 2864/* File: armv5te/OP_SGET.S */ 2865 /* 2866 * General 32-bit SGET handler. 2867 * 2868 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2869 */ 2870 /* op vAA, field@BBBB */ 2871 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2872 FETCH(r1, 1) @ r1<- field ref BBBB 2873 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2874 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2875 cmp r0, #0 @ is resolved entry null? 2876 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2877.LOP_SGET_OBJECT_finish: @ field ptr in r0 2878 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2879 mov r2, rINST, lsr #8 @ r2<- AA 2880 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2881 SET_VREG(r1, r2) @ fp[AA]<- r1 2882 GET_INST_OPCODE(ip) @ extract opcode from rINST 2883 GOTO_OPCODE(ip) @ jump to next instruction 2884 2885 2886/* ------------------------------ */ 2887 .balign 64 2888.L_OP_SGET_BOOLEAN: /* 0x63 */ 2889/* File: armv5te/OP_SGET_BOOLEAN.S */ 2890/* File: armv5te/OP_SGET.S */ 2891 /* 2892 * General 32-bit SGET handler. 2893 * 2894 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2895 */ 2896 /* op vAA, field@BBBB */ 2897 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2898 FETCH(r1, 1) @ r1<- field ref BBBB 2899 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2900 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2901 cmp r0, #0 @ is resolved entry null? 2902 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2903.LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2904 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2905 mov r2, rINST, lsr #8 @ r2<- AA 2906 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2907 SET_VREG(r1, r2) @ fp[AA]<- r1 2908 GET_INST_OPCODE(ip) @ extract opcode from rINST 2909 GOTO_OPCODE(ip) @ jump to next instruction 2910 2911 2912/* ------------------------------ */ 2913 .balign 64 2914.L_OP_SGET_BYTE: /* 0x64 */ 2915/* File: armv5te/OP_SGET_BYTE.S */ 2916/* File: armv5te/OP_SGET.S */ 2917 /* 2918 * General 32-bit SGET handler. 2919 * 2920 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2921 */ 2922 /* op vAA, field@BBBB */ 2923 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2924 FETCH(r1, 1) @ r1<- field ref BBBB 2925 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2926 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2927 cmp r0, #0 @ is resolved entry null? 2928 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2929.LOP_SGET_BYTE_finish: @ field ptr in r0 2930 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2931 mov r2, rINST, lsr #8 @ r2<- AA 2932 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2933 SET_VREG(r1, r2) @ fp[AA]<- r1 2934 GET_INST_OPCODE(ip) @ extract opcode from rINST 2935 GOTO_OPCODE(ip) @ jump to next instruction 2936 2937 2938/* ------------------------------ */ 2939 .balign 64 2940.L_OP_SGET_CHAR: /* 0x65 */ 2941/* File: armv5te/OP_SGET_CHAR.S */ 2942/* File: armv5te/OP_SGET.S */ 2943 /* 2944 * General 32-bit SGET handler. 2945 * 2946 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2947 */ 2948 /* op vAA, field@BBBB */ 2949 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2950 FETCH(r1, 1) @ r1<- field ref BBBB 2951 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2952 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2953 cmp r0, #0 @ is resolved entry null? 2954 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2955.LOP_SGET_CHAR_finish: @ field ptr in r0 2956 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2957 mov r2, rINST, lsr #8 @ r2<- AA 2958 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2959 SET_VREG(r1, r2) @ fp[AA]<- r1 2960 GET_INST_OPCODE(ip) @ extract opcode from rINST 2961 GOTO_OPCODE(ip) @ jump to next instruction 2962 2963 2964/* ------------------------------ */ 2965 .balign 64 2966.L_OP_SGET_SHORT: /* 0x66 */ 2967/* File: armv5te/OP_SGET_SHORT.S */ 2968/* File: armv5te/OP_SGET.S */ 2969 /* 2970 * General 32-bit SGET handler. 2971 * 2972 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2973 */ 2974 /* op vAA, field@BBBB */ 2975 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2976 FETCH(r1, 1) @ r1<- field ref BBBB 2977 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2978 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2979 cmp r0, #0 @ is resolved entry null? 2980 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2981.LOP_SGET_SHORT_finish: @ field ptr in r0 2982 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2983 mov r2, rINST, lsr #8 @ r2<- AA 2984 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2985 SET_VREG(r1, r2) @ fp[AA]<- r1 2986 GET_INST_OPCODE(ip) @ extract opcode from rINST 2987 GOTO_OPCODE(ip) @ jump to next instruction 2988 2989 2990/* ------------------------------ */ 2991 .balign 64 2992.L_OP_SPUT: /* 0x67 */ 2993/* File: armv5te/OP_SPUT.S */ 2994 /* 2995 * General 32-bit SPUT handler. 2996 * 2997 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 2998 */ 2999 /* op vAA, field@BBBB */ 3000 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3001 FETCH(r1, 1) @ r1<- field ref BBBB 3002 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3003 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3004 cmp r0, #0 @ is resolved entry null? 3005 beq .LOP_SPUT_resolve @ yes, do resolve 3006.LOP_SPUT_finish: @ field ptr in r0 3007 mov r2, rINST, lsr #8 @ r2<- AA 3008 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3009 GET_VREG(r1, r2) @ r1<- fp[AA] 3010 GET_INST_OPCODE(ip) @ extract opcode from rINST 3011 str r1, [r0, #offStaticField_value] @ field<- vAA 3012 GOTO_OPCODE(ip) @ jump to next instruction 3013 3014/* ------------------------------ */ 3015 .balign 64 3016.L_OP_SPUT_WIDE: /* 0x68 */ 3017/* File: armv5te/OP_SPUT_WIDE.S */ 3018 /* 3019 * 64-bit SPUT handler. 3020 */ 3021 /* sput-wide vAA, field@BBBB */ 3022 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3023 FETCH(r1, 1) @ r1<- field ref BBBB 3024 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3025 mov r9, rINST, lsr #8 @ r9<- AA 3026 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3027 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3028 cmp r0, #0 @ is resolved entry null? 3029 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3030.LOP_SPUT_WIDE_finish: @ field ptr in r0, AA in r9 3031 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3032 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 3033 GET_INST_OPCODE(ip) @ extract opcode from rINST 3034 strd r2, [r0, #offStaticField_value] @ field<- vAA/vAA+1 3035 GOTO_OPCODE(ip) @ jump to next instruction 3036 3037/* ------------------------------ */ 3038 .balign 64 3039.L_OP_SPUT_OBJECT: /* 0x69 */ 3040/* File: armv5te/OP_SPUT_OBJECT.S */ 3041/* File: armv5te/OP_SPUT.S */ 3042 /* 3043 * General 32-bit SPUT handler. 3044 * 3045 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3046 */ 3047 /* op vAA, field@BBBB */ 3048 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3049 FETCH(r1, 1) @ r1<- field ref BBBB 3050 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3051 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3052 cmp r0, #0 @ is resolved entry null? 3053 beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve 3054.LOP_SPUT_OBJECT_finish: @ field ptr in r0 3055 mov r2, rINST, lsr #8 @ r2<- AA 3056 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3057 GET_VREG(r1, r2) @ r1<- fp[AA] 3058 GET_INST_OPCODE(ip) @ extract opcode from rINST 3059 str r1, [r0, #offStaticField_value] @ field<- vAA 3060 GOTO_OPCODE(ip) @ jump to next instruction 3061 3062 3063/* ------------------------------ */ 3064 .balign 64 3065.L_OP_SPUT_BOOLEAN: /* 0x6a */ 3066/* File: armv5te/OP_SPUT_BOOLEAN.S */ 3067/* File: armv5te/OP_SPUT.S */ 3068 /* 3069 * General 32-bit SPUT handler. 3070 * 3071 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3072 */ 3073 /* op vAA, field@BBBB */ 3074 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3075 FETCH(r1, 1) @ r1<- field ref BBBB 3076 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3077 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3078 cmp r0, #0 @ is resolved entry null? 3079 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3080.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3081 mov r2, rINST, lsr #8 @ r2<- AA 3082 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3083 GET_VREG(r1, r2) @ r1<- fp[AA] 3084 GET_INST_OPCODE(ip) @ extract opcode from rINST 3085 str r1, [r0, #offStaticField_value] @ field<- vAA 3086 GOTO_OPCODE(ip) @ jump to next instruction 3087 3088 3089/* ------------------------------ */ 3090 .balign 64 3091.L_OP_SPUT_BYTE: /* 0x6b */ 3092/* File: armv5te/OP_SPUT_BYTE.S */ 3093/* File: armv5te/OP_SPUT.S */ 3094 /* 3095 * General 32-bit SPUT handler. 3096 * 3097 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3098 */ 3099 /* op vAA, field@BBBB */ 3100 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3101 FETCH(r1, 1) @ r1<- field ref BBBB 3102 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3103 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3104 cmp r0, #0 @ is resolved entry null? 3105 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3106.LOP_SPUT_BYTE_finish: @ field ptr in r0 3107 mov r2, rINST, lsr #8 @ r2<- AA 3108 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3109 GET_VREG(r1, r2) @ r1<- fp[AA] 3110 GET_INST_OPCODE(ip) @ extract opcode from rINST 3111 str r1, [r0, #offStaticField_value] @ field<- vAA 3112 GOTO_OPCODE(ip) @ jump to next instruction 3113 3114 3115/* ------------------------------ */ 3116 .balign 64 3117.L_OP_SPUT_CHAR: /* 0x6c */ 3118/* File: armv5te/OP_SPUT_CHAR.S */ 3119/* File: armv5te/OP_SPUT.S */ 3120 /* 3121 * General 32-bit SPUT handler. 3122 * 3123 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3124 */ 3125 /* op vAA, field@BBBB */ 3126 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3127 FETCH(r1, 1) @ r1<- field ref BBBB 3128 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3129 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3130 cmp r0, #0 @ is resolved entry null? 3131 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3132.LOP_SPUT_CHAR_finish: @ field ptr in r0 3133 mov r2, rINST, lsr #8 @ r2<- AA 3134 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3135 GET_VREG(r1, r2) @ r1<- fp[AA] 3136 GET_INST_OPCODE(ip) @ extract opcode from rINST 3137 str r1, [r0, #offStaticField_value] @ field<- vAA 3138 GOTO_OPCODE(ip) @ jump to next instruction 3139 3140 3141/* ------------------------------ */ 3142 .balign 64 3143.L_OP_SPUT_SHORT: /* 0x6d */ 3144/* File: armv5te/OP_SPUT_SHORT.S */ 3145/* File: armv5te/OP_SPUT.S */ 3146 /* 3147 * General 32-bit SPUT handler. 3148 * 3149 * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short 3150 */ 3151 /* op vAA, field@BBBB */ 3152 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3153 FETCH(r1, 1) @ r1<- field ref BBBB 3154 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3155 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3156 cmp r0, #0 @ is resolved entry null? 3157 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3158.LOP_SPUT_SHORT_finish: @ field ptr in r0 3159 mov r2, rINST, lsr #8 @ r2<- AA 3160 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3161 GET_VREG(r1, r2) @ r1<- fp[AA] 3162 GET_INST_OPCODE(ip) @ extract opcode from rINST 3163 str r1, [r0, #offStaticField_value] @ field<- vAA 3164 GOTO_OPCODE(ip) @ jump to next instruction 3165 3166 3167/* ------------------------------ */ 3168 .balign 64 3169.L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3170/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3171 /* 3172 * Handle a virtual method call. 3173 * 3174 * for: invoke-virtual, invoke-virtual/range 3175 */ 3176 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3177 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3178 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3179 FETCH(r1, 1) @ r1<- BBBB 3180 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3181 FETCH(r10, 2) @ r10<- GFED or CCCC 3182 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3183 .if (!0) 3184 and r10, r10, #15 @ r10<- D (or stays CCCC) 3185 .endif 3186 cmp r0, #0 @ already resolved? 3187 EXPORT_PC() @ must export for invoke 3188 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3189 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3190 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3191 mov r2, #METHOD_VIRTUAL @ resolver method type 3192 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3193 cmp r0, #0 @ got null? 3194 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3195 b common_exceptionThrown @ yes, handle exception 3196 3197/* ------------------------------ */ 3198 .balign 64 3199.L_OP_INVOKE_SUPER: /* 0x6f */ 3200/* File: armv5te/OP_INVOKE_SUPER.S */ 3201 /* 3202 * Handle a "super" method call. 3203 * 3204 * for: invoke-super, invoke-super/range 3205 */ 3206 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3207 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3208 FETCH(r10, 2) @ r10<- GFED or CCCC 3209 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3210 .if (!0) 3211 and r10, r10, #15 @ r10<- D (or stays CCCC) 3212 .endif 3213 FETCH(r1, 1) @ r1<- BBBB 3214 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3215 GET_VREG(r2, r10) @ r2<- "this" ptr 3216 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3217 cmp r2, #0 @ null "this"? 3218 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3219 beq common_errNullObject @ null "this", throw exception 3220 cmp r0, #0 @ already resolved? 3221 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3222 EXPORT_PC() @ must export for invoke 3223 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3224 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3225 3226/* ------------------------------ */ 3227 .balign 64 3228.L_OP_INVOKE_DIRECT: /* 0x70 */ 3229/* File: armv5te/OP_INVOKE_DIRECT.S */ 3230 /* 3231 * Handle a direct method call. 3232 * 3233 * (We could defer the "is 'this' pointer null" test to the common 3234 * method invocation code, and use a flag to indicate that static 3235 * calls don't count. If we do this as part of copying the arguments 3236 * out we could avoiding loading the first arg twice.) 3237 * 3238 * for: invoke-direct, invoke-direct/range 3239 */ 3240 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3241 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3242 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3243 FETCH(r1, 1) @ r1<- BBBB 3244 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3245 FETCH(r10, 2) @ r10<- GFED or CCCC 3246 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3247 .if (!0) 3248 and r10, r10, #15 @ r10<- D (or stays CCCC) 3249 .endif 3250 cmp r0, #0 @ already resolved? 3251 EXPORT_PC() @ must export for invoke 3252 GET_VREG(r2, r10) @ r2<- "this" ptr 3253 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3254.LOP_INVOKE_DIRECT_finish: 3255 cmp r2, #0 @ null "this" ref? 3256 bne common_invokeMethodNoRange @ no, continue on 3257 b common_errNullObject @ yes, throw exception 3258 3259/* ------------------------------ */ 3260 .balign 64 3261.L_OP_INVOKE_STATIC: /* 0x71 */ 3262/* File: armv5te/OP_INVOKE_STATIC.S */ 3263 /* 3264 * Handle a static method call. 3265 * 3266 * for: invoke-static, invoke-static/range 3267 */ 3268 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3269 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3270 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3271 FETCH(r1, 1) @ r1<- BBBB 3272 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3273 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3274 cmp r0, #0 @ already resolved? 3275 EXPORT_PC() @ must export for invoke 3276 bne common_invokeMethodNoRange @ yes, continue on 32770: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3278 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3279 mov r2, #METHOD_STATIC @ resolver method type 3280 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3281 cmp r0, #0 @ got null? 3282 bne common_invokeMethodNoRange @ no, continue 3283 b common_exceptionThrown @ yes, handle exception 3284 3285 3286/* ------------------------------ */ 3287 .balign 64 3288.L_OP_INVOKE_INTERFACE: /* 0x72 */ 3289/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3290 /* 3291 * Handle an interface method call. 3292 * 3293 * for: invoke-interface, invoke-interface/range 3294 */ 3295 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3296 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3297 FETCH(r2, 2) @ r2<- FEDC or CCCC 3298 FETCH(r1, 1) @ r1<- BBBB 3299 .if (!0) 3300 and r2, r2, #15 @ r2<- C (or stays CCCC) 3301 .endif 3302 EXPORT_PC() @ must export for invoke 3303 GET_VREG(r0, r2) @ r0<- first arg ("this") 3304 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3305 cmp r0, #0 @ null obj? 3306 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3307 beq common_errNullObject @ yes, fail 3308 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3309 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3310 cmp r0, #0 @ failed? 3311 beq common_exceptionThrown @ yes, handle exception 3312 b common_invokeMethodNoRange @ jump to common handler 3313 3314 3315/* ------------------------------ */ 3316 .balign 64 3317.L_OP_UNUSED_73: /* 0x73 */ 3318/* File: armv5te/OP_UNUSED_73.S */ 3319/* File: armv5te/unused.S */ 3320 bl common_abort 3321 3322 3323 3324/* ------------------------------ */ 3325 .balign 64 3326.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3327/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3328/* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3329 /* 3330 * Handle a virtual method call. 3331 * 3332 * for: invoke-virtual, invoke-virtual/range 3333 */ 3334 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3335 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3336 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3337 FETCH(r1, 1) @ r1<- BBBB 3338 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3339 FETCH(r10, 2) @ r10<- GFED or CCCC 3340 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3341 .if (!1) 3342 and r10, r10, #15 @ r10<- D (or stays CCCC) 3343 .endif 3344 cmp r0, #0 @ already resolved? 3345 EXPORT_PC() @ must export for invoke 3346 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3347 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3348 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3349 mov r2, #METHOD_VIRTUAL @ resolver method type 3350 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3351 cmp r0, #0 @ got null? 3352 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3353 b common_exceptionThrown @ yes, handle exception 3354 3355 3356/* ------------------------------ */ 3357 .balign 64 3358.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3359/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3360/* File: armv5te/OP_INVOKE_SUPER.S */ 3361 /* 3362 * Handle a "super" method call. 3363 * 3364 * for: invoke-super, invoke-super/range 3365 */ 3366 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3367 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3368 FETCH(r10, 2) @ r10<- GFED or CCCC 3369 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3370 .if (!1) 3371 and r10, r10, #15 @ r10<- D (or stays CCCC) 3372 .endif 3373 FETCH(r1, 1) @ r1<- BBBB 3374 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3375 GET_VREG(r2, r10) @ r2<- "this" ptr 3376 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3377 cmp r2, #0 @ null "this"? 3378 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3379 beq common_errNullObject @ null "this", throw exception 3380 cmp r0, #0 @ already resolved? 3381 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3382 EXPORT_PC() @ must export for invoke 3383 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3384 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3385 3386 3387/* ------------------------------ */ 3388 .balign 64 3389.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3390/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3391/* File: armv5te/OP_INVOKE_DIRECT.S */ 3392 /* 3393 * Handle a direct method call. 3394 * 3395 * (We could defer the "is 'this' pointer null" test to the common 3396 * method invocation code, and use a flag to indicate that static 3397 * calls don't count. If we do this as part of copying the arguments 3398 * out we could avoiding loading the first arg twice.) 3399 * 3400 * for: invoke-direct, invoke-direct/range 3401 */ 3402 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3403 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3404 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3405 FETCH(r1, 1) @ r1<- BBBB 3406 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3407 FETCH(r10, 2) @ r10<- GFED or CCCC 3408 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3409 .if (!1) 3410 and r10, r10, #15 @ r10<- D (or stays CCCC) 3411 .endif 3412 cmp r0, #0 @ already resolved? 3413 EXPORT_PC() @ must export for invoke 3414 GET_VREG(r2, r10) @ r2<- "this" ptr 3415 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3416.LOP_INVOKE_DIRECT_RANGE_finish: 3417 cmp r2, #0 @ null "this" ref? 3418 bne common_invokeMethodRange @ no, continue on 3419 b common_errNullObject @ yes, throw exception 3420 3421 3422/* ------------------------------ */ 3423 .balign 64 3424.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3425/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3426/* File: armv5te/OP_INVOKE_STATIC.S */ 3427 /* 3428 * Handle a static method call. 3429 * 3430 * for: invoke-static, invoke-static/range 3431 */ 3432 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3433 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3434 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3435 FETCH(r1, 1) @ r1<- BBBB 3436 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3437 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3438 cmp r0, #0 @ already resolved? 3439 EXPORT_PC() @ must export for invoke 3440 bne common_invokeMethodRange @ yes, continue on 34410: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3442 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3443 mov r2, #METHOD_STATIC @ resolver method type 3444 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3445 cmp r0, #0 @ got null? 3446 bne common_invokeMethodRange @ no, continue 3447 b common_exceptionThrown @ yes, handle exception 3448 3449 3450 3451/* ------------------------------ */ 3452 .balign 64 3453.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3454/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3455/* File: armv5te/OP_INVOKE_INTERFACE.S */ 3456 /* 3457 * Handle an interface method call. 3458 * 3459 * for: invoke-interface, invoke-interface/range 3460 */ 3461 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3462 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3463 FETCH(r2, 2) @ r2<- FEDC or CCCC 3464 FETCH(r1, 1) @ r1<- BBBB 3465 .if (!1) 3466 and r2, r2, #15 @ r2<- C (or stays CCCC) 3467 .endif 3468 EXPORT_PC() @ must export for invoke 3469 GET_VREG(r0, r2) @ r0<- first arg ("this") 3470 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3471 cmp r0, #0 @ null obj? 3472 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3473 beq common_errNullObject @ yes, fail 3474 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3475 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3476 cmp r0, #0 @ failed? 3477 beq common_exceptionThrown @ yes, handle exception 3478 b common_invokeMethodRange @ jump to common handler 3479 3480 3481 3482/* ------------------------------ */ 3483 .balign 64 3484.L_OP_UNUSED_79: /* 0x79 */ 3485/* File: armv5te/OP_UNUSED_79.S */ 3486/* File: armv5te/unused.S */ 3487 bl common_abort 3488 3489 3490 3491/* ------------------------------ */ 3492 .balign 64 3493.L_OP_UNUSED_7A: /* 0x7a */ 3494/* File: armv5te/OP_UNUSED_7A.S */ 3495/* File: armv5te/unused.S */ 3496 bl common_abort 3497 3498 3499 3500/* ------------------------------ */ 3501 .balign 64 3502.L_OP_NEG_INT: /* 0x7b */ 3503/* File: armv6t2/OP_NEG_INT.S */ 3504/* File: armv6t2/unop.S */ 3505 /* 3506 * Generic 32-bit unary operation. Provide an "instr" line that 3507 * specifies an instruction that performs "result = op r0". 3508 * This could be an ARM instruction or a function call. 3509 * 3510 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3511 * int-to-byte, int-to-char, int-to-short 3512 */ 3513 /* unop vA, vB */ 3514 mov r3, rINST, lsr #12 @ r3<- B 3515 ubfx r9, rINST, #8, #4 @ r9<- A 3516 GET_VREG(r0, r3) @ r0<- vB 3517 @ optional op; may set condition codes 3518 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3519 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3520 GET_INST_OPCODE(ip) @ extract opcode from rINST 3521 SET_VREG(r0, r9) @ vAA<- r0 3522 GOTO_OPCODE(ip) @ jump to next instruction 3523 /* 8-9 instructions */ 3524 3525 3526/* ------------------------------ */ 3527 .balign 64 3528.L_OP_NOT_INT: /* 0x7c */ 3529/* File: armv6t2/OP_NOT_INT.S */ 3530/* File: armv6t2/unop.S */ 3531 /* 3532 * Generic 32-bit unary operation. Provide an "instr" line that 3533 * specifies an instruction that performs "result = op r0". 3534 * This could be an ARM instruction or a function call. 3535 * 3536 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3537 * int-to-byte, int-to-char, int-to-short 3538 */ 3539 /* unop vA, vB */ 3540 mov r3, rINST, lsr #12 @ r3<- B 3541 ubfx r9, rINST, #8, #4 @ r9<- A 3542 GET_VREG(r0, r3) @ r0<- vB 3543 @ optional op; may set condition codes 3544 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3545 mvn r0, r0 @ r0<- op, r0-r3 changed 3546 GET_INST_OPCODE(ip) @ extract opcode from rINST 3547 SET_VREG(r0, r9) @ vAA<- r0 3548 GOTO_OPCODE(ip) @ jump to next instruction 3549 /* 8-9 instructions */ 3550 3551 3552/* ------------------------------ */ 3553 .balign 64 3554.L_OP_NEG_LONG: /* 0x7d */ 3555/* File: armv6t2/OP_NEG_LONG.S */ 3556/* File: armv6t2/unopWide.S */ 3557 /* 3558 * Generic 64-bit unary operation. Provide an "instr" line that 3559 * specifies an instruction that performs "result = op r0/r1". 3560 * This could be an ARM instruction or a function call. 3561 * 3562 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3563 */ 3564 /* unop vA, vB */ 3565 mov r3, rINST, lsr #12 @ r3<- B 3566 ubfx r9, rINST, #8, #4 @ r9<- A 3567 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3568 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3569 ldmia r3, {r0-r1} @ r0/r1<- vAA 3570 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3571 rsbs r0, r0, #0 @ optional op; may set condition codes 3572 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3573 GET_INST_OPCODE(ip) @ extract opcode from rINST 3574 stmia r9, {r0-r1} @ vAA<- r0/r1 3575 GOTO_OPCODE(ip) @ jump to next instruction 3576 /* 10-11 instructions */ 3577 3578 3579 3580/* ------------------------------ */ 3581 .balign 64 3582.L_OP_NOT_LONG: /* 0x7e */ 3583/* File: armv6t2/OP_NOT_LONG.S */ 3584/* File: armv6t2/unopWide.S */ 3585 /* 3586 * Generic 64-bit unary operation. Provide an "instr" line that 3587 * specifies an instruction that performs "result = op r0/r1". 3588 * This could be an ARM instruction or a function call. 3589 * 3590 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3591 */ 3592 /* unop vA, vB */ 3593 mov r3, rINST, lsr #12 @ r3<- B 3594 ubfx r9, rINST, #8, #4 @ r9<- A 3595 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3596 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3597 ldmia r3, {r0-r1} @ r0/r1<- vAA 3598 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3599 mvn r0, r0 @ optional op; may set condition codes 3600 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3601 GET_INST_OPCODE(ip) @ extract opcode from rINST 3602 stmia r9, {r0-r1} @ vAA<- r0/r1 3603 GOTO_OPCODE(ip) @ jump to next instruction 3604 /* 10-11 instructions */ 3605 3606 3607 3608/* ------------------------------ */ 3609 .balign 64 3610.L_OP_NEG_FLOAT: /* 0x7f */ 3611/* File: armv6t2/OP_NEG_FLOAT.S */ 3612/* File: armv6t2/unop.S */ 3613 /* 3614 * Generic 32-bit unary operation. Provide an "instr" line that 3615 * specifies an instruction that performs "result = op r0". 3616 * This could be an ARM instruction or a function call. 3617 * 3618 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3619 * int-to-byte, int-to-char, int-to-short 3620 */ 3621 /* unop vA, vB */ 3622 mov r3, rINST, lsr #12 @ r3<- B 3623 ubfx r9, rINST, #8, #4 @ r9<- A 3624 GET_VREG(r0, r3) @ r0<- vB 3625 @ optional op; may set condition codes 3626 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3627 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3628 GET_INST_OPCODE(ip) @ extract opcode from rINST 3629 SET_VREG(r0, r9) @ vAA<- r0 3630 GOTO_OPCODE(ip) @ jump to next instruction 3631 /* 8-9 instructions */ 3632 3633 3634/* ------------------------------ */ 3635 .balign 64 3636.L_OP_NEG_DOUBLE: /* 0x80 */ 3637/* File: armv6t2/OP_NEG_DOUBLE.S */ 3638/* File: armv6t2/unopWide.S */ 3639 /* 3640 * Generic 64-bit unary operation. Provide an "instr" line that 3641 * specifies an instruction that performs "result = op r0/r1". 3642 * This could be an ARM instruction or a function call. 3643 * 3644 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3645 */ 3646 /* unop vA, vB */ 3647 mov r3, rINST, lsr #12 @ r3<- B 3648 ubfx r9, rINST, #8, #4 @ r9<- A 3649 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3650 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3651 ldmia r3, {r0-r1} @ r0/r1<- vAA 3652 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3653 @ optional op; may set condition codes 3654 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3655 GET_INST_OPCODE(ip) @ extract opcode from rINST 3656 stmia r9, {r0-r1} @ vAA<- r0/r1 3657 GOTO_OPCODE(ip) @ jump to next instruction 3658 /* 10-11 instructions */ 3659 3660 3661 3662/* ------------------------------ */ 3663 .balign 64 3664.L_OP_INT_TO_LONG: /* 0x81 */ 3665/* File: armv6t2/OP_INT_TO_LONG.S */ 3666/* File: armv6t2/unopWider.S */ 3667 /* 3668 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3669 * that specifies an instruction that performs "result = op r0", where 3670 * "result" is a 64-bit quantity in r0/r1. 3671 * 3672 * For: int-to-long, int-to-double, float-to-long, float-to-double 3673 */ 3674 /* unop vA, vB */ 3675 mov r3, rINST, lsr #12 @ r3<- B 3676 ubfx r9, rINST, #8, #4 @ r9<- A 3677 GET_VREG(r0, r3) @ r0<- vB 3678 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3679 @ optional op; may set condition codes 3680 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3681 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3682 GET_INST_OPCODE(ip) @ extract opcode from rINST 3683 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3684 GOTO_OPCODE(ip) @ jump to next instruction 3685 /* 9-10 instructions */ 3686 3687 3688/* ------------------------------ */ 3689 .balign 64 3690.L_OP_INT_TO_FLOAT: /* 0x82 */ 3691/* File: arm-vfp/OP_INT_TO_FLOAT.S */ 3692/* File: arm-vfp/funop.S */ 3693 /* 3694 * Generic 32-bit unary floating-point operation. Provide an "instr" 3695 * line that specifies an instruction that performs "s1 = op s0". 3696 * 3697 * for: int-to-float, float-to-int 3698 */ 3699 /* unop vA, vB */ 3700 mov r3, rINST, lsr #12 @ r3<- B 3701 mov r9, rINST, lsr #8 @ r9<- A+ 3702 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3703 flds s0, [r3] @ s0<- vB 3704 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3705 and r9, r9, #15 @ r9<- A 3706 fsitos s1, s0 @ s1<- op 3707 GET_INST_OPCODE(ip) @ extract opcode from rINST 3708 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3709 fsts s1, [r9] @ vA<- s1 3710 GOTO_OPCODE(ip) @ jump to next instruction 3711 3712 3713/* ------------------------------ */ 3714 .balign 64 3715.L_OP_INT_TO_DOUBLE: /* 0x83 */ 3716/* File: arm-vfp/OP_INT_TO_DOUBLE.S */ 3717/* File: arm-vfp/funopWider.S */ 3718 /* 3719 * Generic 32bit-to-64bit floating point unary operation. Provide an 3720 * "instr" line that specifies an instruction that performs "d0 = op s0". 3721 * 3722 * For: int-to-double, float-to-double 3723 */ 3724 /* unop vA, vB */ 3725 mov r3, rINST, lsr #12 @ r3<- B 3726 mov r9, rINST, lsr #8 @ r9<- A+ 3727 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3728 flds s0, [r3] @ s0<- vB 3729 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3730 and r9, r9, #15 @ r9<- A 3731 fsitod d0, s0 @ d0<- op 3732 GET_INST_OPCODE(ip) @ extract opcode from rINST 3733 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3734 fstd d0, [r9] @ vA<- d0 3735 GOTO_OPCODE(ip) @ jump to next instruction 3736 3737 3738/* ------------------------------ */ 3739 .balign 64 3740.L_OP_LONG_TO_INT: /* 0x84 */ 3741/* File: armv5te/OP_LONG_TO_INT.S */ 3742/* we ignore the high word, making this equivalent to a 32-bit reg move */ 3743/* File: armv5te/OP_MOVE.S */ 3744 /* for move, move-object, long-to-int */ 3745 /* op vA, vB */ 3746 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3747 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3748 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3749 GET_VREG(r2, r1) @ r2<- fp[B] 3750 and r0, r0, #15 3751 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3752 SET_VREG(r2, r0) @ fp[A]<- r2 3753 GOTO_OPCODE(ip) @ execute next instruction 3754 3755 3756 3757/* ------------------------------ */ 3758 .balign 64 3759.L_OP_LONG_TO_FLOAT: /* 0x85 */ 3760/* File: armv6t2/OP_LONG_TO_FLOAT.S */ 3761/* File: armv6t2/unopNarrower.S */ 3762 /* 3763 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3764 * that specifies an instruction that performs "result = op r0/r1", where 3765 * "result" is a 32-bit quantity in r0. 3766 * 3767 * For: long-to-float, double-to-int, double-to-float 3768 * 3769 * (This would work for long-to-int, but that instruction is actually 3770 * an exact match for OP_MOVE.) 3771 */ 3772 /* unop vA, vB */ 3773 mov r3, rINST, lsr #12 @ r3<- B 3774 ubfx r9, rINST, #8, #4 @ r9<- A 3775 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3776 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3777 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3778 @ optional op; may set condition codes 3779 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3780 GET_INST_OPCODE(ip) @ extract opcode from rINST 3781 SET_VREG(r0, r9) @ vA<- r0 3782 GOTO_OPCODE(ip) @ jump to next instruction 3783 /* 9-10 instructions */ 3784 3785 3786/* ------------------------------ */ 3787 .balign 64 3788.L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3789/* File: armv6t2/OP_LONG_TO_DOUBLE.S */ 3790/* File: armv6t2/unopWide.S */ 3791 /* 3792 * Generic 64-bit unary operation. Provide an "instr" line that 3793 * specifies an instruction that performs "result = op r0/r1". 3794 * This could be an ARM instruction or a function call. 3795 * 3796 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3797 */ 3798 /* unop vA, vB */ 3799 mov r3, rINST, lsr #12 @ r3<- B 3800 ubfx r9, rINST, #8, #4 @ r9<- A 3801 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3802 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3803 ldmia r3, {r0-r1} @ r0/r1<- vAA 3804 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3805 @ optional op; may set condition codes 3806 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3807 GET_INST_OPCODE(ip) @ extract opcode from rINST 3808 stmia r9, {r0-r1} @ vAA<- r0/r1 3809 GOTO_OPCODE(ip) @ jump to next instruction 3810 /* 10-11 instructions */ 3811 3812 3813 3814/* ------------------------------ */ 3815 .balign 64 3816.L_OP_FLOAT_TO_INT: /* 0x87 */ 3817/* File: arm-vfp/OP_FLOAT_TO_INT.S */ 3818/* File: arm-vfp/funop.S */ 3819 /* 3820 * Generic 32-bit unary floating-point operation. Provide an "instr" 3821 * line that specifies an instruction that performs "s1 = op s0". 3822 * 3823 * for: int-to-float, float-to-int 3824 */ 3825 /* unop vA, vB */ 3826 mov r3, rINST, lsr #12 @ r3<- B 3827 mov r9, rINST, lsr #8 @ r9<- A+ 3828 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3829 flds s0, [r3] @ s0<- vB 3830 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3831 and r9, r9, #15 @ r9<- A 3832 ftosizs s1, s0 @ s1<- op 3833 GET_INST_OPCODE(ip) @ extract opcode from rINST 3834 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3835 fsts s1, [r9] @ vA<- s1 3836 GOTO_OPCODE(ip) @ jump to next instruction 3837 3838 3839/* ------------------------------ */ 3840 .balign 64 3841.L_OP_FLOAT_TO_LONG: /* 0x88 */ 3842/* File: armv6t2/OP_FLOAT_TO_LONG.S */ 3843@include "armv6t2/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3844/* File: armv6t2/unopWider.S */ 3845 /* 3846 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3847 * that specifies an instruction that performs "result = op r0", where 3848 * "result" is a 64-bit quantity in r0/r1. 3849 * 3850 * For: int-to-long, int-to-double, float-to-long, float-to-double 3851 */ 3852 /* unop vA, vB */ 3853 mov r3, rINST, lsr #12 @ r3<- B 3854 ubfx r9, rINST, #8, #4 @ r9<- A 3855 GET_VREG(r0, r3) @ r0<- vB 3856 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3857 @ optional op; may set condition codes 3858 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3859 bl f2l_doconv @ r0<- op, r0-r3 changed 3860 GET_INST_OPCODE(ip) @ extract opcode from rINST 3861 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3862 GOTO_OPCODE(ip) @ jump to next instruction 3863 /* 9-10 instructions */ 3864 3865 3866 3867/* ------------------------------ */ 3868 .balign 64 3869.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3870/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */ 3871/* File: arm-vfp/funopWider.S */ 3872 /* 3873 * Generic 32bit-to-64bit floating point unary operation. Provide an 3874 * "instr" line that specifies an instruction that performs "d0 = op s0". 3875 * 3876 * For: int-to-double, float-to-double 3877 */ 3878 /* unop vA, vB */ 3879 mov r3, rINST, lsr #12 @ r3<- B 3880 mov r9, rINST, lsr #8 @ r9<- A+ 3881 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3882 flds s0, [r3] @ s0<- vB 3883 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3884 and r9, r9, #15 @ r9<- A 3885 fcvtds d0, s0 @ d0<- op 3886 GET_INST_OPCODE(ip) @ extract opcode from rINST 3887 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3888 fstd d0, [r9] @ vA<- d0 3889 GOTO_OPCODE(ip) @ jump to next instruction 3890 3891 3892/* ------------------------------ */ 3893 .balign 64 3894.L_OP_DOUBLE_TO_INT: /* 0x8a */ 3895/* File: arm-vfp/OP_DOUBLE_TO_INT.S */ 3896/* File: arm-vfp/funopNarrower.S */ 3897 /* 3898 * Generic 64bit-to-32bit unary floating point operation. Provide an 3899 * "instr" line that specifies an instruction that performs "s0 = op d0". 3900 * 3901 * For: double-to-int, double-to-float 3902 */ 3903 /* unop vA, vB */ 3904 mov r3, rINST, lsr #12 @ r3<- B 3905 mov r9, rINST, lsr #8 @ r9<- A+ 3906 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3907 fldd d0, [r3] @ d0<- vB 3908 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3909 and r9, r9, #15 @ r9<- A 3910 ftosizd s0, d0 @ s0<- op 3911 GET_INST_OPCODE(ip) @ extract opcode from rINST 3912 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3913 fsts s0, [r9] @ vA<- s0 3914 GOTO_OPCODE(ip) @ jump to next instruction 3915 3916 3917/* ------------------------------ */ 3918 .balign 64 3919.L_OP_DOUBLE_TO_LONG: /* 0x8b */ 3920/* File: armv6t2/OP_DOUBLE_TO_LONG.S */ 3921@include "armv6t2/unopWide.S" {"instr":"bl __aeabi_d2lz"} 3922/* File: armv6t2/unopWide.S */ 3923 /* 3924 * Generic 64-bit unary operation. Provide an "instr" line that 3925 * specifies an instruction that performs "result = op r0/r1". 3926 * This could be an ARM instruction or a function call. 3927 * 3928 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3929 */ 3930 /* unop vA, vB */ 3931 mov r3, rINST, lsr #12 @ r3<- B 3932 ubfx r9, rINST, #8, #4 @ r9<- A 3933 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3934 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3935 ldmia r3, {r0-r1} @ r0/r1<- vAA 3936 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3937 @ optional op; may set condition codes 3938 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 3939 GET_INST_OPCODE(ip) @ extract opcode from rINST 3940 stmia r9, {r0-r1} @ vAA<- r0/r1 3941 GOTO_OPCODE(ip) @ jump to next instruction 3942 /* 10-11 instructions */ 3943 3944 3945 3946 3947/* ------------------------------ */ 3948 .balign 64 3949.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 3950/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */ 3951/* File: arm-vfp/funopNarrower.S */ 3952 /* 3953 * Generic 64bit-to-32bit unary floating point operation. Provide an 3954 * "instr" line that specifies an instruction that performs "s0 = op d0". 3955 * 3956 * For: double-to-int, double-to-float 3957 */ 3958 /* unop vA, vB */ 3959 mov r3, rINST, lsr #12 @ r3<- B 3960 mov r9, rINST, lsr #8 @ r9<- A+ 3961 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 3962 fldd d0, [r3] @ d0<- vB 3963 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3964 and r9, r9, #15 @ r9<- A 3965 fcvtsd s0, d0 @ s0<- op 3966 GET_INST_OPCODE(ip) @ extract opcode from rINST 3967 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 3968 fsts s0, [r9] @ vA<- s0 3969 GOTO_OPCODE(ip) @ jump to next instruction 3970 3971 3972/* ------------------------------ */ 3973 .balign 64 3974.L_OP_INT_TO_BYTE: /* 0x8d */ 3975/* File: armv6t2/OP_INT_TO_BYTE.S */ 3976/* File: armv6t2/unop.S */ 3977 /* 3978 * Generic 32-bit unary operation. Provide an "instr" line that 3979 * specifies an instruction that performs "result = op r0". 3980 * This could be an ARM instruction or a function call. 3981 * 3982 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3983 * int-to-byte, int-to-char, int-to-short 3984 */ 3985 /* unop vA, vB */ 3986 mov r3, rINST, lsr #12 @ r3<- B 3987 ubfx r9, rINST, #8, #4 @ r9<- A 3988 GET_VREG(r0, r3) @ r0<- vB 3989 @ optional op; may set condition codes 3990 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3991 sxtb r0, r0 @ r0<- op, r0-r3 changed 3992 GET_INST_OPCODE(ip) @ extract opcode from rINST 3993 SET_VREG(r0, r9) @ vAA<- r0 3994 GOTO_OPCODE(ip) @ jump to next instruction 3995 /* 8-9 instructions */ 3996 3997 3998/* ------------------------------ */ 3999 .balign 64 4000.L_OP_INT_TO_CHAR: /* 0x8e */ 4001/* File: armv6t2/OP_INT_TO_CHAR.S */ 4002/* File: armv6t2/unop.S */ 4003 /* 4004 * Generic 32-bit unary operation. Provide an "instr" line that 4005 * specifies an instruction that performs "result = op r0". 4006 * This could be an ARM instruction or a function call. 4007 * 4008 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4009 * int-to-byte, int-to-char, int-to-short 4010 */ 4011 /* unop vA, vB */ 4012 mov r3, rINST, lsr #12 @ r3<- B 4013 ubfx r9, rINST, #8, #4 @ r9<- A 4014 GET_VREG(r0, r3) @ r0<- vB 4015 @ optional op; may set condition codes 4016 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4017 uxth r0, r0 @ r0<- op, r0-r3 changed 4018 GET_INST_OPCODE(ip) @ extract opcode from rINST 4019 SET_VREG(r0, r9) @ vAA<- r0 4020 GOTO_OPCODE(ip) @ jump to next instruction 4021 /* 8-9 instructions */ 4022 4023 4024/* ------------------------------ */ 4025 .balign 64 4026.L_OP_INT_TO_SHORT: /* 0x8f */ 4027/* File: armv6t2/OP_INT_TO_SHORT.S */ 4028/* File: armv6t2/unop.S */ 4029 /* 4030 * Generic 32-bit unary operation. Provide an "instr" line that 4031 * specifies an instruction that performs "result = op r0". 4032 * This could be an ARM instruction or a function call. 4033 * 4034 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4035 * int-to-byte, int-to-char, int-to-short 4036 */ 4037 /* unop vA, vB */ 4038 mov r3, rINST, lsr #12 @ r3<- B 4039 ubfx r9, rINST, #8, #4 @ r9<- A 4040 GET_VREG(r0, r3) @ r0<- vB 4041 @ optional op; may set condition codes 4042 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4043 sxth r0, r0 @ r0<- op, r0-r3 changed 4044 GET_INST_OPCODE(ip) @ extract opcode from rINST 4045 SET_VREG(r0, r9) @ vAA<- r0 4046 GOTO_OPCODE(ip) @ jump to next instruction 4047 /* 8-9 instructions */ 4048 4049 4050/* ------------------------------ */ 4051 .balign 64 4052.L_OP_ADD_INT: /* 0x90 */ 4053/* File: armv5te/OP_ADD_INT.S */ 4054/* File: armv5te/binop.S */ 4055 /* 4056 * Generic 32-bit binary operation. Provide an "instr" line that 4057 * specifies an instruction that performs "result = r0 op r1". 4058 * This could be an ARM instruction or a function call. (If the result 4059 * comes back in a register other than r0, you can override "result".) 4060 * 4061 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4062 * vCC (r1). Useful for integer division and modulus. Note that we 4063 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4064 * handles it correctly. 4065 * 4066 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4067 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4068 * mul-float, div-float, rem-float 4069 */ 4070 /* binop vAA, vBB, vCC */ 4071 FETCH(r0, 1) @ r0<- CCBB 4072 mov r9, rINST, lsr #8 @ r9<- AA 4073 mov r3, r0, lsr #8 @ r3<- CC 4074 and r2, r0, #255 @ r2<- BB 4075 GET_VREG(r1, r3) @ r1<- vCC 4076 GET_VREG(r0, r2) @ r0<- vBB 4077 .if 0 4078 cmp r1, #0 @ is second operand zero? 4079 beq common_errDivideByZero 4080 .endif 4081 4082 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4083 @ optional op; may set condition codes 4084 add r0, r0, r1 @ r0<- op, r0-r3 changed 4085 GET_INST_OPCODE(ip) @ extract opcode from rINST 4086 SET_VREG(r0, r9) @ vAA<- r0 4087 GOTO_OPCODE(ip) @ jump to next instruction 4088 /* 11-14 instructions */ 4089 4090 4091 4092/* ------------------------------ */ 4093 .balign 64 4094.L_OP_SUB_INT: /* 0x91 */ 4095/* File: armv5te/OP_SUB_INT.S */ 4096/* File: armv5te/binop.S */ 4097 /* 4098 * Generic 32-bit binary operation. Provide an "instr" line that 4099 * specifies an instruction that performs "result = r0 op r1". 4100 * This could be an ARM instruction or a function call. (If the result 4101 * comes back in a register other than r0, you can override "result".) 4102 * 4103 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4104 * vCC (r1). Useful for integer division and modulus. Note that we 4105 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4106 * handles it correctly. 4107 * 4108 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4109 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4110 * mul-float, div-float, rem-float 4111 */ 4112 /* binop vAA, vBB, vCC */ 4113 FETCH(r0, 1) @ r0<- CCBB 4114 mov r9, rINST, lsr #8 @ r9<- AA 4115 mov r3, r0, lsr #8 @ r3<- CC 4116 and r2, r0, #255 @ r2<- BB 4117 GET_VREG(r1, r3) @ r1<- vCC 4118 GET_VREG(r0, r2) @ r0<- vBB 4119 .if 0 4120 cmp r1, #0 @ is second operand zero? 4121 beq common_errDivideByZero 4122 .endif 4123 4124 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4125 @ optional op; may set condition codes 4126 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4127 GET_INST_OPCODE(ip) @ extract opcode from rINST 4128 SET_VREG(r0, r9) @ vAA<- r0 4129 GOTO_OPCODE(ip) @ jump to next instruction 4130 /* 11-14 instructions */ 4131 4132 4133 4134/* ------------------------------ */ 4135 .balign 64 4136.L_OP_MUL_INT: /* 0x92 */ 4137/* File: armv5te/OP_MUL_INT.S */ 4138/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4139/* File: armv5te/binop.S */ 4140 /* 4141 * Generic 32-bit binary operation. Provide an "instr" line that 4142 * specifies an instruction that performs "result = r0 op r1". 4143 * This could be an ARM instruction or a function call. (If the result 4144 * comes back in a register other than r0, you can override "result".) 4145 * 4146 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4147 * vCC (r1). Useful for integer division and modulus. Note that we 4148 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4149 * handles it correctly. 4150 * 4151 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4152 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4153 * mul-float, div-float, rem-float 4154 */ 4155 /* binop vAA, vBB, vCC */ 4156 FETCH(r0, 1) @ r0<- CCBB 4157 mov r9, rINST, lsr #8 @ r9<- AA 4158 mov r3, r0, lsr #8 @ r3<- CC 4159 and r2, r0, #255 @ r2<- BB 4160 GET_VREG(r1, r3) @ r1<- vCC 4161 GET_VREG(r0, r2) @ r0<- vBB 4162 .if 0 4163 cmp r1, #0 @ is second operand zero? 4164 beq common_errDivideByZero 4165 .endif 4166 4167 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4168 @ optional op; may set condition codes 4169 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4170 GET_INST_OPCODE(ip) @ extract opcode from rINST 4171 SET_VREG(r0, r9) @ vAA<- r0 4172 GOTO_OPCODE(ip) @ jump to next instruction 4173 /* 11-14 instructions */ 4174 4175 4176 4177/* ------------------------------ */ 4178 .balign 64 4179.L_OP_DIV_INT: /* 0x93 */ 4180/* File: armv5te/OP_DIV_INT.S */ 4181/* File: armv5te/binop.S */ 4182 /* 4183 * Generic 32-bit binary operation. Provide an "instr" line that 4184 * specifies an instruction that performs "result = r0 op r1". 4185 * This could be an ARM instruction or a function call. (If the result 4186 * comes back in a register other than r0, you can override "result".) 4187 * 4188 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4189 * vCC (r1). Useful for integer division and modulus. Note that we 4190 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4191 * handles it correctly. 4192 * 4193 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4194 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4195 * mul-float, div-float, rem-float 4196 */ 4197 /* binop vAA, vBB, vCC */ 4198 FETCH(r0, 1) @ r0<- CCBB 4199 mov r9, rINST, lsr #8 @ r9<- AA 4200 mov r3, r0, lsr #8 @ r3<- CC 4201 and r2, r0, #255 @ r2<- BB 4202 GET_VREG(r1, r3) @ r1<- vCC 4203 GET_VREG(r0, r2) @ r0<- vBB 4204 .if 1 4205 cmp r1, #0 @ is second operand zero? 4206 beq common_errDivideByZero 4207 .endif 4208 4209 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4210 @ optional op; may set condition codes 4211 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4212 GET_INST_OPCODE(ip) @ extract opcode from rINST 4213 SET_VREG(r0, r9) @ vAA<- r0 4214 GOTO_OPCODE(ip) @ jump to next instruction 4215 /* 11-14 instructions */ 4216 4217 4218 4219/* ------------------------------ */ 4220 .balign 64 4221.L_OP_REM_INT: /* 0x94 */ 4222/* File: armv5te/OP_REM_INT.S */ 4223/* idivmod returns quotient in r0 and remainder in r1 */ 4224/* File: armv5te/binop.S */ 4225 /* 4226 * Generic 32-bit binary operation. Provide an "instr" line that 4227 * specifies an instruction that performs "result = r0 op r1". 4228 * This could be an ARM instruction or a function call. (If the result 4229 * comes back in a register other than r0, you can override "result".) 4230 * 4231 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4232 * vCC (r1). Useful for integer division and modulus. Note that we 4233 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4234 * handles it correctly. 4235 * 4236 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4237 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4238 * mul-float, div-float, rem-float 4239 */ 4240 /* binop vAA, vBB, vCC */ 4241 FETCH(r0, 1) @ r0<- CCBB 4242 mov r9, rINST, lsr #8 @ r9<- AA 4243 mov r3, r0, lsr #8 @ r3<- CC 4244 and r2, r0, #255 @ r2<- BB 4245 GET_VREG(r1, r3) @ r1<- vCC 4246 GET_VREG(r0, r2) @ r0<- vBB 4247 .if 1 4248 cmp r1, #0 @ is second operand zero? 4249 beq common_errDivideByZero 4250 .endif 4251 4252 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4253 @ optional op; may set condition codes 4254 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4255 GET_INST_OPCODE(ip) @ extract opcode from rINST 4256 SET_VREG(r1, r9) @ vAA<- r1 4257 GOTO_OPCODE(ip) @ jump to next instruction 4258 /* 11-14 instructions */ 4259 4260 4261 4262/* ------------------------------ */ 4263 .balign 64 4264.L_OP_AND_INT: /* 0x95 */ 4265/* File: armv5te/OP_AND_INT.S */ 4266/* File: armv5te/binop.S */ 4267 /* 4268 * Generic 32-bit binary operation. Provide an "instr" line that 4269 * specifies an instruction that performs "result = r0 op r1". 4270 * This could be an ARM instruction or a function call. (If the result 4271 * comes back in a register other than r0, you can override "result".) 4272 * 4273 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4274 * vCC (r1). Useful for integer division and modulus. Note that we 4275 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4276 * handles it correctly. 4277 * 4278 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4279 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4280 * mul-float, div-float, rem-float 4281 */ 4282 /* binop vAA, vBB, vCC */ 4283 FETCH(r0, 1) @ r0<- CCBB 4284 mov r9, rINST, lsr #8 @ r9<- AA 4285 mov r3, r0, lsr #8 @ r3<- CC 4286 and r2, r0, #255 @ r2<- BB 4287 GET_VREG(r1, r3) @ r1<- vCC 4288 GET_VREG(r0, r2) @ r0<- vBB 4289 .if 0 4290 cmp r1, #0 @ is second operand zero? 4291 beq common_errDivideByZero 4292 .endif 4293 4294 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4295 @ optional op; may set condition codes 4296 and r0, r0, r1 @ r0<- op, r0-r3 changed 4297 GET_INST_OPCODE(ip) @ extract opcode from rINST 4298 SET_VREG(r0, r9) @ vAA<- r0 4299 GOTO_OPCODE(ip) @ jump to next instruction 4300 /* 11-14 instructions */ 4301 4302 4303 4304/* ------------------------------ */ 4305 .balign 64 4306.L_OP_OR_INT: /* 0x96 */ 4307/* File: armv5te/OP_OR_INT.S */ 4308/* File: armv5te/binop.S */ 4309 /* 4310 * Generic 32-bit binary operation. Provide an "instr" line that 4311 * specifies an instruction that performs "result = r0 op r1". 4312 * This could be an ARM instruction or a function call. (If the result 4313 * comes back in a register other than r0, you can override "result".) 4314 * 4315 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4316 * vCC (r1). Useful for integer division and modulus. Note that we 4317 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4318 * handles it correctly. 4319 * 4320 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4321 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4322 * mul-float, div-float, rem-float 4323 */ 4324 /* binop vAA, vBB, vCC */ 4325 FETCH(r0, 1) @ r0<- CCBB 4326 mov r9, rINST, lsr #8 @ r9<- AA 4327 mov r3, r0, lsr #8 @ r3<- CC 4328 and r2, r0, #255 @ r2<- BB 4329 GET_VREG(r1, r3) @ r1<- vCC 4330 GET_VREG(r0, r2) @ r0<- vBB 4331 .if 0 4332 cmp r1, #0 @ is second operand zero? 4333 beq common_errDivideByZero 4334 .endif 4335 4336 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4337 @ optional op; may set condition codes 4338 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4339 GET_INST_OPCODE(ip) @ extract opcode from rINST 4340 SET_VREG(r0, r9) @ vAA<- r0 4341 GOTO_OPCODE(ip) @ jump to next instruction 4342 /* 11-14 instructions */ 4343 4344 4345 4346/* ------------------------------ */ 4347 .balign 64 4348.L_OP_XOR_INT: /* 0x97 */ 4349/* File: armv5te/OP_XOR_INT.S */ 4350/* File: armv5te/binop.S */ 4351 /* 4352 * Generic 32-bit binary operation. Provide an "instr" line that 4353 * specifies an instruction that performs "result = r0 op r1". 4354 * This could be an ARM instruction or a function call. (If the result 4355 * comes back in a register other than r0, you can override "result".) 4356 * 4357 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4358 * vCC (r1). Useful for integer division and modulus. Note that we 4359 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4360 * handles it correctly. 4361 * 4362 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4363 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4364 * mul-float, div-float, rem-float 4365 */ 4366 /* binop vAA, vBB, vCC */ 4367 FETCH(r0, 1) @ r0<- CCBB 4368 mov r9, rINST, lsr #8 @ r9<- AA 4369 mov r3, r0, lsr #8 @ r3<- CC 4370 and r2, r0, #255 @ r2<- BB 4371 GET_VREG(r1, r3) @ r1<- vCC 4372 GET_VREG(r0, r2) @ r0<- vBB 4373 .if 0 4374 cmp r1, #0 @ is second operand zero? 4375 beq common_errDivideByZero 4376 .endif 4377 4378 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4379 @ optional op; may set condition codes 4380 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4381 GET_INST_OPCODE(ip) @ extract opcode from rINST 4382 SET_VREG(r0, r9) @ vAA<- r0 4383 GOTO_OPCODE(ip) @ jump to next instruction 4384 /* 11-14 instructions */ 4385 4386 4387 4388/* ------------------------------ */ 4389 .balign 64 4390.L_OP_SHL_INT: /* 0x98 */ 4391/* File: armv5te/OP_SHL_INT.S */ 4392/* File: armv5te/binop.S */ 4393 /* 4394 * Generic 32-bit binary operation. Provide an "instr" line that 4395 * specifies an instruction that performs "result = r0 op r1". 4396 * This could be an ARM instruction or a function call. (If the result 4397 * comes back in a register other than r0, you can override "result".) 4398 * 4399 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4400 * vCC (r1). Useful for integer division and modulus. Note that we 4401 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4402 * handles it correctly. 4403 * 4404 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4405 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4406 * mul-float, div-float, rem-float 4407 */ 4408 /* binop vAA, vBB, vCC */ 4409 FETCH(r0, 1) @ r0<- CCBB 4410 mov r9, rINST, lsr #8 @ r9<- AA 4411 mov r3, r0, lsr #8 @ r3<- CC 4412 and r2, r0, #255 @ r2<- BB 4413 GET_VREG(r1, r3) @ r1<- vCC 4414 GET_VREG(r0, r2) @ r0<- vBB 4415 .if 0 4416 cmp r1, #0 @ is second operand zero? 4417 beq common_errDivideByZero 4418 .endif 4419 4420 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4421 and r1, r1, #31 @ optional op; may set condition codes 4422 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4423 GET_INST_OPCODE(ip) @ extract opcode from rINST 4424 SET_VREG(r0, r9) @ vAA<- r0 4425 GOTO_OPCODE(ip) @ jump to next instruction 4426 /* 11-14 instructions */ 4427 4428 4429 4430/* ------------------------------ */ 4431 .balign 64 4432.L_OP_SHR_INT: /* 0x99 */ 4433/* File: armv5te/OP_SHR_INT.S */ 4434/* File: armv5te/binop.S */ 4435 /* 4436 * Generic 32-bit binary operation. Provide an "instr" line that 4437 * specifies an instruction that performs "result = r0 op r1". 4438 * This could be an ARM instruction or a function call. (If the result 4439 * comes back in a register other than r0, you can override "result".) 4440 * 4441 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4442 * vCC (r1). Useful for integer division and modulus. Note that we 4443 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4444 * handles it correctly. 4445 * 4446 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4447 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4448 * mul-float, div-float, rem-float 4449 */ 4450 /* binop vAA, vBB, vCC */ 4451 FETCH(r0, 1) @ r0<- CCBB 4452 mov r9, rINST, lsr #8 @ r9<- AA 4453 mov r3, r0, lsr #8 @ r3<- CC 4454 and r2, r0, #255 @ r2<- BB 4455 GET_VREG(r1, r3) @ r1<- vCC 4456 GET_VREG(r0, r2) @ r0<- vBB 4457 .if 0 4458 cmp r1, #0 @ is second operand zero? 4459 beq common_errDivideByZero 4460 .endif 4461 4462 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4463 and r1, r1, #31 @ optional op; may set condition codes 4464 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4465 GET_INST_OPCODE(ip) @ extract opcode from rINST 4466 SET_VREG(r0, r9) @ vAA<- r0 4467 GOTO_OPCODE(ip) @ jump to next instruction 4468 /* 11-14 instructions */ 4469 4470 4471 4472/* ------------------------------ */ 4473 .balign 64 4474.L_OP_USHR_INT: /* 0x9a */ 4475/* File: armv5te/OP_USHR_INT.S */ 4476/* File: armv5te/binop.S */ 4477 /* 4478 * Generic 32-bit binary operation. Provide an "instr" line that 4479 * specifies an instruction that performs "result = r0 op r1". 4480 * This could be an ARM instruction or a function call. (If the result 4481 * comes back in a register other than r0, you can override "result".) 4482 * 4483 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4484 * vCC (r1). Useful for integer division and modulus. Note that we 4485 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4486 * handles it correctly. 4487 * 4488 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4489 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4490 * mul-float, div-float, rem-float 4491 */ 4492 /* binop vAA, vBB, vCC */ 4493 FETCH(r0, 1) @ r0<- CCBB 4494 mov r9, rINST, lsr #8 @ r9<- AA 4495 mov r3, r0, lsr #8 @ r3<- CC 4496 and r2, r0, #255 @ r2<- BB 4497 GET_VREG(r1, r3) @ r1<- vCC 4498 GET_VREG(r0, r2) @ r0<- vBB 4499 .if 0 4500 cmp r1, #0 @ is second operand zero? 4501 beq common_errDivideByZero 4502 .endif 4503 4504 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4505 and r1, r1, #31 @ optional op; may set condition codes 4506 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4507 GET_INST_OPCODE(ip) @ extract opcode from rINST 4508 SET_VREG(r0, r9) @ vAA<- r0 4509 GOTO_OPCODE(ip) @ jump to next instruction 4510 /* 11-14 instructions */ 4511 4512 4513 4514/* ------------------------------ */ 4515 .balign 64 4516.L_OP_ADD_LONG: /* 0x9b */ 4517/* File: armv5te/OP_ADD_LONG.S */ 4518/* File: armv5te/binopWide.S */ 4519 /* 4520 * Generic 64-bit binary operation. Provide an "instr" line that 4521 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4522 * This could be an ARM instruction or a function call. (If the result 4523 * comes back in a register other than r0, you can override "result".) 4524 * 4525 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4526 * vCC (r1). Useful for integer division and modulus. 4527 * 4528 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4529 * xor-long, add-double, sub-double, mul-double, div-double, 4530 * rem-double 4531 * 4532 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4533 */ 4534 /* binop vAA, vBB, vCC */ 4535 FETCH(r0, 1) @ r0<- CCBB 4536 mov r9, rINST, lsr #8 @ r9<- AA 4537 and r2, r0, #255 @ r2<- BB 4538 mov r3, r0, lsr #8 @ r3<- CC 4539 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4540 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4541 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4542 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4543 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4544 .if 0 4545 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4546 beq common_errDivideByZero 4547 .endif 4548 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4549 4550 adds r0, r0, r2 @ optional op; may set condition codes 4551 adc r1, r1, r3 @ result<- op, r0-r3 changed 4552 GET_INST_OPCODE(ip) @ extract opcode from rINST 4553 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4554 GOTO_OPCODE(ip) @ jump to next instruction 4555 /* 14-17 instructions */ 4556 4557 4558 4559/* ------------------------------ */ 4560 .balign 64 4561.L_OP_SUB_LONG: /* 0x9c */ 4562/* File: armv5te/OP_SUB_LONG.S */ 4563/* File: armv5te/binopWide.S */ 4564 /* 4565 * Generic 64-bit binary operation. Provide an "instr" line that 4566 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4567 * This could be an ARM instruction or a function call. (If the result 4568 * comes back in a register other than r0, you can override "result".) 4569 * 4570 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4571 * vCC (r1). Useful for integer division and modulus. 4572 * 4573 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4574 * xor-long, add-double, sub-double, mul-double, div-double, 4575 * rem-double 4576 * 4577 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4578 */ 4579 /* binop vAA, vBB, vCC */ 4580 FETCH(r0, 1) @ r0<- CCBB 4581 mov r9, rINST, lsr #8 @ r9<- AA 4582 and r2, r0, #255 @ r2<- BB 4583 mov r3, r0, lsr #8 @ r3<- CC 4584 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4585 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4586 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4587 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4588 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4589 .if 0 4590 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4591 beq common_errDivideByZero 4592 .endif 4593 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4594 4595 subs r0, r0, r2 @ optional op; may set condition codes 4596 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4597 GET_INST_OPCODE(ip) @ extract opcode from rINST 4598 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4599 GOTO_OPCODE(ip) @ jump to next instruction 4600 /* 14-17 instructions */ 4601 4602 4603 4604/* ------------------------------ */ 4605 .balign 64 4606.L_OP_MUL_LONG: /* 0x9d */ 4607/* File: armv5te/OP_MUL_LONG.S */ 4608 /* 4609 * Signed 64-bit integer multiply. 4610 * 4611 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4612 * WX 4613 * x YZ 4614 * -------- 4615 * ZW ZX 4616 * YW YX 4617 * 4618 * The low word of the result holds ZX, the high word holds 4619 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4620 * it doesn't fit in the low 64 bits. 4621 * 4622 * Unlike most ARM math operations, multiply instructions have 4623 * restrictions on using the same register more than once (Rd and Rm 4624 * cannot be the same). 4625 */ 4626 /* mul-long vAA, vBB, vCC */ 4627 FETCH(r0, 1) @ r0<- CCBB 4628 and r2, r0, #255 @ r2<- BB 4629 mov r3, r0, lsr #8 @ r3<- CC 4630 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4631 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4632 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4633 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4634 mul ip, r2, r1 @ ip<- ZxW 4635 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4636 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4637 mov r0, rINST, lsr #8 @ r0<- AA 4638 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4639 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4640 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4641 b .LOP_MUL_LONG_finish 4642 4643/* ------------------------------ */ 4644 .balign 64 4645.L_OP_DIV_LONG: /* 0x9e */ 4646/* File: armv5te/OP_DIV_LONG.S */ 4647/* File: armv5te/binopWide.S */ 4648 /* 4649 * Generic 64-bit binary operation. Provide an "instr" line that 4650 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4651 * This could be an ARM instruction or a function call. (If the result 4652 * comes back in a register other than r0, you can override "result".) 4653 * 4654 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4655 * vCC (r1). Useful for integer division and modulus. 4656 * 4657 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4658 * xor-long, add-double, sub-double, mul-double, div-double, 4659 * rem-double 4660 * 4661 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4662 */ 4663 /* binop vAA, vBB, vCC */ 4664 FETCH(r0, 1) @ r0<- CCBB 4665 mov r9, rINST, lsr #8 @ r9<- AA 4666 and r2, r0, #255 @ r2<- BB 4667 mov r3, r0, lsr #8 @ r3<- CC 4668 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4669 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4670 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4671 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4672 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4673 .if 1 4674 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4675 beq common_errDivideByZero 4676 .endif 4677 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4678 4679 @ optional op; may set condition codes 4680 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4681 GET_INST_OPCODE(ip) @ extract opcode from rINST 4682 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4683 GOTO_OPCODE(ip) @ jump to next instruction 4684 /* 14-17 instructions */ 4685 4686 4687 4688/* ------------------------------ */ 4689 .balign 64 4690.L_OP_REM_LONG: /* 0x9f */ 4691/* File: armv5te/OP_REM_LONG.S */ 4692/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4693/* File: armv5te/binopWide.S */ 4694 /* 4695 * Generic 64-bit binary operation. Provide an "instr" line that 4696 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4697 * This could be an ARM instruction or a function call. (If the result 4698 * comes back in a register other than r0, you can override "result".) 4699 * 4700 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4701 * vCC (r1). Useful for integer division and modulus. 4702 * 4703 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4704 * xor-long, add-double, sub-double, mul-double, div-double, 4705 * rem-double 4706 * 4707 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4708 */ 4709 /* binop vAA, vBB, vCC */ 4710 FETCH(r0, 1) @ r0<- CCBB 4711 mov r9, rINST, lsr #8 @ r9<- AA 4712 and r2, r0, #255 @ r2<- BB 4713 mov r3, r0, lsr #8 @ r3<- CC 4714 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4715 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4716 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4717 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4718 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4719 .if 1 4720 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4721 beq common_errDivideByZero 4722 .endif 4723 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4724 4725 @ optional op; may set condition codes 4726 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4727 GET_INST_OPCODE(ip) @ extract opcode from rINST 4728 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4729 GOTO_OPCODE(ip) @ jump to next instruction 4730 /* 14-17 instructions */ 4731 4732 4733 4734/* ------------------------------ */ 4735 .balign 64 4736.L_OP_AND_LONG: /* 0xa0 */ 4737/* File: armv5te/OP_AND_LONG.S */ 4738/* File: armv5te/binopWide.S */ 4739 /* 4740 * Generic 64-bit binary operation. Provide an "instr" line that 4741 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4742 * This could be an ARM instruction or a function call. (If the result 4743 * comes back in a register other than r0, you can override "result".) 4744 * 4745 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4746 * vCC (r1). Useful for integer division and modulus. 4747 * 4748 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4749 * xor-long, add-double, sub-double, mul-double, div-double, 4750 * rem-double 4751 * 4752 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4753 */ 4754 /* binop vAA, vBB, vCC */ 4755 FETCH(r0, 1) @ r0<- CCBB 4756 mov r9, rINST, lsr #8 @ r9<- AA 4757 and r2, r0, #255 @ r2<- BB 4758 mov r3, r0, lsr #8 @ r3<- CC 4759 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4760 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4761 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4762 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4763 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4764 .if 0 4765 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4766 beq common_errDivideByZero 4767 .endif 4768 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4769 4770 and r0, r0, r2 @ optional op; may set condition codes 4771 and r1, r1, r3 @ result<- op, r0-r3 changed 4772 GET_INST_OPCODE(ip) @ extract opcode from rINST 4773 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4774 GOTO_OPCODE(ip) @ jump to next instruction 4775 /* 14-17 instructions */ 4776 4777 4778 4779/* ------------------------------ */ 4780 .balign 64 4781.L_OP_OR_LONG: /* 0xa1 */ 4782/* File: armv5te/OP_OR_LONG.S */ 4783/* File: armv5te/binopWide.S */ 4784 /* 4785 * Generic 64-bit binary operation. Provide an "instr" line that 4786 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4787 * This could be an ARM instruction or a function call. (If the result 4788 * comes back in a register other than r0, you can override "result".) 4789 * 4790 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4791 * vCC (r1). Useful for integer division and modulus. 4792 * 4793 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4794 * xor-long, add-double, sub-double, mul-double, div-double, 4795 * rem-double 4796 * 4797 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4798 */ 4799 /* binop vAA, vBB, vCC */ 4800 FETCH(r0, 1) @ r0<- CCBB 4801 mov r9, rINST, lsr #8 @ r9<- AA 4802 and r2, r0, #255 @ r2<- BB 4803 mov r3, r0, lsr #8 @ r3<- CC 4804 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4805 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4806 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4807 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4808 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4809 .if 0 4810 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4811 beq common_errDivideByZero 4812 .endif 4813 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4814 4815 orr r0, r0, r2 @ optional op; may set condition codes 4816 orr r1, r1, r3 @ result<- op, r0-r3 changed 4817 GET_INST_OPCODE(ip) @ extract opcode from rINST 4818 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4819 GOTO_OPCODE(ip) @ jump to next instruction 4820 /* 14-17 instructions */ 4821 4822 4823 4824/* ------------------------------ */ 4825 .balign 64 4826.L_OP_XOR_LONG: /* 0xa2 */ 4827/* File: armv5te/OP_XOR_LONG.S */ 4828/* File: armv5te/binopWide.S */ 4829 /* 4830 * Generic 64-bit binary operation. Provide an "instr" line that 4831 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4832 * This could be an ARM instruction or a function call. (If the result 4833 * comes back in a register other than r0, you can override "result".) 4834 * 4835 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4836 * vCC (r1). Useful for integer division and modulus. 4837 * 4838 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4839 * xor-long, add-double, sub-double, mul-double, div-double, 4840 * rem-double 4841 * 4842 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4843 */ 4844 /* binop vAA, vBB, vCC */ 4845 FETCH(r0, 1) @ r0<- CCBB 4846 mov r9, rINST, lsr #8 @ r9<- AA 4847 and r2, r0, #255 @ r2<- BB 4848 mov r3, r0, lsr #8 @ r3<- CC 4849 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4850 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4851 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4852 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4853 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4854 .if 0 4855 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4856 beq common_errDivideByZero 4857 .endif 4858 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4859 4860 eor r0, r0, r2 @ optional op; may set condition codes 4861 eor r1, r1, r3 @ result<- op, r0-r3 changed 4862 GET_INST_OPCODE(ip) @ extract opcode from rINST 4863 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4864 GOTO_OPCODE(ip) @ jump to next instruction 4865 /* 14-17 instructions */ 4866 4867 4868 4869/* ------------------------------ */ 4870 .balign 64 4871.L_OP_SHL_LONG: /* 0xa3 */ 4872/* File: armv5te/OP_SHL_LONG.S */ 4873 /* 4874 * Long integer shift. This is different from the generic 32/64-bit 4875 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4876 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4877 * 6 bits of the shift distance. 4878 */ 4879 /* shl-long vAA, vBB, vCC */ 4880 FETCH(r0, 1) @ r0<- CCBB 4881 mov r9, rINST, lsr #8 @ r9<- AA 4882 and r3, r0, #255 @ r3<- BB 4883 mov r0, r0, lsr #8 @ r0<- CC 4884 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4885 GET_VREG(r2, r0) @ r2<- vCC 4886 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4887 and r2, r2, #63 @ r2<- r2 & 0x3f 4888 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4889 4890 mov r1, r1, asl r2 @ r1<- r1 << r2 4891 rsb r3, r2, #32 @ r3<- 32 - r2 4892 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 4893 subs ip, r2, #32 @ ip<- r2 - 32 4894 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 4895 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4896 b .LOP_SHL_LONG_finish 4897 4898/* ------------------------------ */ 4899 .balign 64 4900.L_OP_SHR_LONG: /* 0xa4 */ 4901/* File: armv5te/OP_SHR_LONG.S */ 4902 /* 4903 * Long integer shift. This is different from the generic 32/64-bit 4904 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4905 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4906 * 6 bits of the shift distance. 4907 */ 4908 /* shr-long vAA, vBB, vCC */ 4909 FETCH(r0, 1) @ r0<- CCBB 4910 mov r9, rINST, lsr #8 @ r9<- AA 4911 and r3, r0, #255 @ r3<- BB 4912 mov r0, r0, lsr #8 @ r0<- CC 4913 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4914 GET_VREG(r2, r0) @ r2<- vCC 4915 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4916 and r2, r2, #63 @ r0<- r0 & 0x3f 4917 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4918 4919 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4920 rsb r3, r2, #32 @ r3<- 32 - r2 4921 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4922 subs ip, r2, #32 @ ip<- r2 - 32 4923 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 4924 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4925 b .LOP_SHR_LONG_finish 4926 4927/* ------------------------------ */ 4928 .balign 64 4929.L_OP_USHR_LONG: /* 0xa5 */ 4930/* File: armv5te/OP_USHR_LONG.S */ 4931 /* 4932 * Long integer shift. This is different from the generic 32/64-bit 4933 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4934 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4935 * 6 bits of the shift distance. 4936 */ 4937 /* ushr-long vAA, vBB, vCC */ 4938 FETCH(r0, 1) @ r0<- CCBB 4939 mov r9, rINST, lsr #8 @ r9<- AA 4940 and r3, r0, #255 @ r3<- BB 4941 mov r0, r0, lsr #8 @ r0<- CC 4942 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4943 GET_VREG(r2, r0) @ r2<- vCC 4944 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4945 and r2, r2, #63 @ r0<- r0 & 0x3f 4946 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4947 4948 mov r0, r0, lsr r2 @ r0<- r2 >> r2 4949 rsb r3, r2, #32 @ r3<- 32 - r2 4950 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 4951 subs ip, r2, #32 @ ip<- r2 - 32 4952 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 4953 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4954 b .LOP_USHR_LONG_finish 4955 4956/* ------------------------------ */ 4957 .balign 64 4958.L_OP_ADD_FLOAT: /* 0xa6 */ 4959/* File: arm-vfp/OP_ADD_FLOAT.S */ 4960/* File: arm-vfp/fbinop.S */ 4961 /* 4962 * Generic 32-bit floating-point operation. Provide an "instr" line that 4963 * specifies an instruction that performs "s2 = s0 op s1". Because we 4964 * use the "softfp" ABI, this must be an instruction, not a function call. 4965 * 4966 * For: add-float, sub-float, mul-float, div-float 4967 */ 4968 /* floatop vAA, vBB, vCC */ 4969 FETCH(r0, 1) @ r0<- CCBB 4970 mov r9, rINST, lsr #8 @ r9<- AA 4971 mov r3, r0, lsr #8 @ r3<- CC 4972 and r2, r0, #255 @ r2<- BB 4973 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 4974 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 4975 flds s1, [r3] @ s1<- vCC 4976 flds s0, [r2] @ s0<- vBB 4977 4978 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4979 fadds s2, s0, s1 @ s2<- op 4980 GET_INST_OPCODE(ip) @ extract opcode from rINST 4981 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 4982 fsts s2, [r9] @ vAA<- s2 4983 GOTO_OPCODE(ip) @ jump to next instruction 4984 4985 4986/* ------------------------------ */ 4987 .balign 64 4988.L_OP_SUB_FLOAT: /* 0xa7 */ 4989/* File: arm-vfp/OP_SUB_FLOAT.S */ 4990/* File: arm-vfp/fbinop.S */ 4991 /* 4992 * Generic 32-bit floating-point operation. Provide an "instr" line that 4993 * specifies an instruction that performs "s2 = s0 op s1". Because we 4994 * use the "softfp" ABI, this must be an instruction, not a function call. 4995 * 4996 * For: add-float, sub-float, mul-float, div-float 4997 */ 4998 /* floatop vAA, vBB, vCC */ 4999 FETCH(r0, 1) @ r0<- CCBB 5000 mov r9, rINST, lsr #8 @ r9<- AA 5001 mov r3, r0, lsr #8 @ r3<- CC 5002 and r2, r0, #255 @ r2<- BB 5003 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5004 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5005 flds s1, [r3] @ s1<- vCC 5006 flds s0, [r2] @ s0<- vBB 5007 5008 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5009 fsubs s2, s0, s1 @ s2<- op 5010 GET_INST_OPCODE(ip) @ extract opcode from rINST 5011 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5012 fsts s2, [r9] @ vAA<- s2 5013 GOTO_OPCODE(ip) @ jump to next instruction 5014 5015 5016/* ------------------------------ */ 5017 .balign 64 5018.L_OP_MUL_FLOAT: /* 0xa8 */ 5019/* File: arm-vfp/OP_MUL_FLOAT.S */ 5020/* File: arm-vfp/fbinop.S */ 5021 /* 5022 * Generic 32-bit floating-point operation. Provide an "instr" line that 5023 * specifies an instruction that performs "s2 = s0 op s1". Because we 5024 * use the "softfp" ABI, this must be an instruction, not a function call. 5025 * 5026 * For: add-float, sub-float, mul-float, div-float 5027 */ 5028 /* floatop vAA, vBB, vCC */ 5029 FETCH(r0, 1) @ r0<- CCBB 5030 mov r9, rINST, lsr #8 @ r9<- AA 5031 mov r3, r0, lsr #8 @ r3<- CC 5032 and r2, r0, #255 @ r2<- BB 5033 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5034 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5035 flds s1, [r3] @ s1<- vCC 5036 flds s0, [r2] @ s0<- vBB 5037 5038 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5039 fmuls s2, s0, s1 @ s2<- op 5040 GET_INST_OPCODE(ip) @ extract opcode from rINST 5041 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5042 fsts s2, [r9] @ vAA<- s2 5043 GOTO_OPCODE(ip) @ jump to next instruction 5044 5045 5046/* ------------------------------ */ 5047 .balign 64 5048.L_OP_DIV_FLOAT: /* 0xa9 */ 5049/* File: arm-vfp/OP_DIV_FLOAT.S */ 5050/* File: arm-vfp/fbinop.S */ 5051 /* 5052 * Generic 32-bit floating-point operation. Provide an "instr" line that 5053 * specifies an instruction that performs "s2 = s0 op s1". Because we 5054 * use the "softfp" ABI, this must be an instruction, not a function call. 5055 * 5056 * For: add-float, sub-float, mul-float, div-float 5057 */ 5058 /* floatop vAA, vBB, vCC */ 5059 FETCH(r0, 1) @ r0<- CCBB 5060 mov r9, rINST, lsr #8 @ r9<- AA 5061 mov r3, r0, lsr #8 @ r3<- CC 5062 and r2, r0, #255 @ r2<- BB 5063 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5064 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5065 flds s1, [r3] @ s1<- vCC 5066 flds s0, [r2] @ s0<- vBB 5067 5068 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5069 fdivs s2, s0, s1 @ s2<- op 5070 GET_INST_OPCODE(ip) @ extract opcode from rINST 5071 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5072 fsts s2, [r9] @ vAA<- s2 5073 GOTO_OPCODE(ip) @ jump to next instruction 5074 5075 5076/* ------------------------------ */ 5077 .balign 64 5078.L_OP_REM_FLOAT: /* 0xaa */ 5079/* File: armv5te/OP_REM_FLOAT.S */ 5080/* EABI doesn't define a float remainder function, but libm does */ 5081/* File: armv5te/binop.S */ 5082 /* 5083 * Generic 32-bit binary operation. Provide an "instr" line that 5084 * specifies an instruction that performs "result = r0 op r1". 5085 * This could be an ARM instruction or a function call. (If the result 5086 * comes back in a register other than r0, you can override "result".) 5087 * 5088 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5089 * vCC (r1). Useful for integer division and modulus. Note that we 5090 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5091 * handles it correctly. 5092 * 5093 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5094 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5095 * mul-float, div-float, rem-float 5096 */ 5097 /* binop vAA, vBB, vCC */ 5098 FETCH(r0, 1) @ r0<- CCBB 5099 mov r9, rINST, lsr #8 @ r9<- AA 5100 mov r3, r0, lsr #8 @ r3<- CC 5101 and r2, r0, #255 @ r2<- BB 5102 GET_VREG(r1, r3) @ r1<- vCC 5103 GET_VREG(r0, r2) @ r0<- vBB 5104 .if 0 5105 cmp r1, #0 @ is second operand zero? 5106 beq common_errDivideByZero 5107 .endif 5108 5109 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5110 @ optional op; may set condition codes 5111 bl fmodf @ r0<- op, r0-r3 changed 5112 GET_INST_OPCODE(ip) @ extract opcode from rINST 5113 SET_VREG(r0, r9) @ vAA<- r0 5114 GOTO_OPCODE(ip) @ jump to next instruction 5115 /* 11-14 instructions */ 5116 5117 5118 5119/* ------------------------------ */ 5120 .balign 64 5121.L_OP_ADD_DOUBLE: /* 0xab */ 5122/* File: arm-vfp/OP_ADD_DOUBLE.S */ 5123/* File: arm-vfp/fbinopWide.S */ 5124 /* 5125 * Generic 64-bit double-precision floating point binary operation. 5126 * Provide an "instr" line that specifies an instruction that performs 5127 * "d2 = d0 op d1". 5128 * 5129 * for: add-double, sub-double, mul-double, div-double 5130 */ 5131 /* doubleop vAA, vBB, vCC */ 5132 FETCH(r0, 1) @ r0<- CCBB 5133 mov r9, rINST, lsr #8 @ r9<- AA 5134 mov r3, r0, lsr #8 @ r3<- CC 5135 and r2, r0, #255 @ r2<- BB 5136 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5137 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5138 fldd d1, [r3] @ d1<- vCC 5139 fldd d0, [r2] @ d0<- vBB 5140 5141 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5142 faddd d2, d0, d1 @ s2<- op 5143 GET_INST_OPCODE(ip) @ extract opcode from rINST 5144 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5145 fstd d2, [r9] @ vAA<- d2 5146 GOTO_OPCODE(ip) @ jump to next instruction 5147 5148 5149/* ------------------------------ */ 5150 .balign 64 5151.L_OP_SUB_DOUBLE: /* 0xac */ 5152/* File: arm-vfp/OP_SUB_DOUBLE.S */ 5153/* File: arm-vfp/fbinopWide.S */ 5154 /* 5155 * Generic 64-bit double-precision floating point binary operation. 5156 * Provide an "instr" line that specifies an instruction that performs 5157 * "d2 = d0 op d1". 5158 * 5159 * for: add-double, sub-double, mul-double, div-double 5160 */ 5161 /* doubleop vAA, vBB, vCC */ 5162 FETCH(r0, 1) @ r0<- CCBB 5163 mov r9, rINST, lsr #8 @ r9<- AA 5164 mov r3, r0, lsr #8 @ r3<- CC 5165 and r2, r0, #255 @ r2<- BB 5166 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5167 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5168 fldd d1, [r3] @ d1<- vCC 5169 fldd d0, [r2] @ d0<- vBB 5170 5171 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5172 fsubd d2, d0, d1 @ s2<- op 5173 GET_INST_OPCODE(ip) @ extract opcode from rINST 5174 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5175 fstd d2, [r9] @ vAA<- d2 5176 GOTO_OPCODE(ip) @ jump to next instruction 5177 5178 5179/* ------------------------------ */ 5180 .balign 64 5181.L_OP_MUL_DOUBLE: /* 0xad */ 5182/* File: arm-vfp/OP_MUL_DOUBLE.S */ 5183/* File: arm-vfp/fbinopWide.S */ 5184 /* 5185 * Generic 64-bit double-precision floating point binary operation. 5186 * Provide an "instr" line that specifies an instruction that performs 5187 * "d2 = d0 op d1". 5188 * 5189 * for: add-double, sub-double, mul-double, div-double 5190 */ 5191 /* doubleop vAA, vBB, vCC */ 5192 FETCH(r0, 1) @ r0<- CCBB 5193 mov r9, rINST, lsr #8 @ r9<- AA 5194 mov r3, r0, lsr #8 @ r3<- CC 5195 and r2, r0, #255 @ r2<- BB 5196 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5197 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5198 fldd d1, [r3] @ d1<- vCC 5199 fldd d0, [r2] @ d0<- vBB 5200 5201 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5202 fmuld d2, d0, d1 @ s2<- op 5203 GET_INST_OPCODE(ip) @ extract opcode from rINST 5204 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5205 fstd d2, [r9] @ vAA<- d2 5206 GOTO_OPCODE(ip) @ jump to next instruction 5207 5208 5209/* ------------------------------ */ 5210 .balign 64 5211.L_OP_DIV_DOUBLE: /* 0xae */ 5212/* File: arm-vfp/OP_DIV_DOUBLE.S */ 5213/* File: arm-vfp/fbinopWide.S */ 5214 /* 5215 * Generic 64-bit double-precision floating point binary operation. 5216 * Provide an "instr" line that specifies an instruction that performs 5217 * "d2 = d0 op d1". 5218 * 5219 * for: add-double, sub-double, mul-double, div-double 5220 */ 5221 /* doubleop vAA, vBB, vCC */ 5222 FETCH(r0, 1) @ r0<- CCBB 5223 mov r9, rINST, lsr #8 @ r9<- AA 5224 mov r3, r0, lsr #8 @ r3<- CC 5225 and r2, r0, #255 @ r2<- BB 5226 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC 5227 VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB 5228 fldd d1, [r3] @ d1<- vCC 5229 fldd d0, [r2] @ d0<- vBB 5230 5231 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5232 fdivd d2, d0, d1 @ s2<- op 5233 GET_INST_OPCODE(ip) @ extract opcode from rINST 5234 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA 5235 fstd d2, [r9] @ vAA<- d2 5236 GOTO_OPCODE(ip) @ jump to next instruction 5237 5238 5239/* ------------------------------ */ 5240 .balign 64 5241.L_OP_REM_DOUBLE: /* 0xaf */ 5242/* File: armv5te/OP_REM_DOUBLE.S */ 5243/* EABI doesn't define a double remainder function, but libm does */ 5244/* File: armv5te/binopWide.S */ 5245 /* 5246 * Generic 64-bit binary operation. Provide an "instr" line that 5247 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5248 * This could be an ARM instruction or a function call. (If the result 5249 * comes back in a register other than r0, you can override "result".) 5250 * 5251 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5252 * vCC (r1). Useful for integer division and modulus. 5253 * 5254 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5255 * xor-long, add-double, sub-double, mul-double, div-double, 5256 * rem-double 5257 * 5258 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5259 */ 5260 /* binop vAA, vBB, vCC */ 5261 FETCH(r0, 1) @ r0<- CCBB 5262 mov r9, rINST, lsr #8 @ r9<- AA 5263 and r2, r0, #255 @ r2<- BB 5264 mov r3, r0, lsr #8 @ r3<- CC 5265 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5266 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5267 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5268 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5269 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5270 .if 0 5271 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5272 beq common_errDivideByZero 5273 .endif 5274 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5275 5276 @ optional op; may set condition codes 5277 bl fmod @ result<- op, r0-r3 changed 5278 GET_INST_OPCODE(ip) @ extract opcode from rINST 5279 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5280 GOTO_OPCODE(ip) @ jump to next instruction 5281 /* 14-17 instructions */ 5282 5283 5284 5285/* ------------------------------ */ 5286 .balign 64 5287.L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5288/* File: armv6t2/OP_ADD_INT_2ADDR.S */ 5289/* File: armv6t2/binop2addr.S */ 5290 /* 5291 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5292 * that specifies an instruction that performs "result = r0 op r1". 5293 * This could be an ARM instruction or a function call. (If the result 5294 * comes back in a register other than r0, you can override "result".) 5295 * 5296 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5297 * vCC (r1). Useful for integer division and modulus. 5298 * 5299 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5300 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5301 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5302 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5303 */ 5304 /* binop/2addr vA, vB */ 5305 mov r3, rINST, lsr #12 @ r3<- B 5306 ubfx r9, rINST, #8, #4 @ r9<- A 5307 GET_VREG(r1, r3) @ r1<- vB 5308 GET_VREG(r0, r9) @ r0<- vA 5309 .if 0 5310 cmp r1, #0 @ is second operand zero? 5311 beq common_errDivideByZero 5312 .endif 5313 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5314 5315 @ optional op; may set condition codes 5316 add r0, r0, r1 @ r0<- op, r0-r3 changed 5317 GET_INST_OPCODE(ip) @ extract opcode from rINST 5318 SET_VREG(r0, r9) @ vAA<- r0 5319 GOTO_OPCODE(ip) @ jump to next instruction 5320 /* 10-13 instructions */ 5321 5322 5323 5324/* ------------------------------ */ 5325 .balign 64 5326.L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5327/* File: armv6t2/OP_SUB_INT_2ADDR.S */ 5328/* File: armv6t2/binop2addr.S */ 5329 /* 5330 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5331 * that specifies an instruction that performs "result = r0 op r1". 5332 * This could be an ARM instruction or a function call. (If the result 5333 * comes back in a register other than r0, you can override "result".) 5334 * 5335 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5336 * vCC (r1). Useful for integer division and modulus. 5337 * 5338 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5339 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5340 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5341 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5342 */ 5343 /* binop/2addr vA, vB */ 5344 mov r3, rINST, lsr #12 @ r3<- B 5345 ubfx r9, rINST, #8, #4 @ r9<- A 5346 GET_VREG(r1, r3) @ r1<- vB 5347 GET_VREG(r0, r9) @ r0<- vA 5348 .if 0 5349 cmp r1, #0 @ is second operand zero? 5350 beq common_errDivideByZero 5351 .endif 5352 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5353 5354 @ optional op; may set condition codes 5355 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5356 GET_INST_OPCODE(ip) @ extract opcode from rINST 5357 SET_VREG(r0, r9) @ vAA<- r0 5358 GOTO_OPCODE(ip) @ jump to next instruction 5359 /* 10-13 instructions */ 5360 5361 5362 5363/* ------------------------------ */ 5364 .balign 64 5365.L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5366/* File: armv6t2/OP_MUL_INT_2ADDR.S */ 5367/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5368/* File: armv6t2/binop2addr.S */ 5369 /* 5370 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5371 * that specifies an instruction that performs "result = r0 op r1". 5372 * This could be an ARM instruction or a function call. (If the result 5373 * comes back in a register other than r0, you can override "result".) 5374 * 5375 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5376 * vCC (r1). Useful for integer division and modulus. 5377 * 5378 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5379 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5380 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5381 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5382 */ 5383 /* binop/2addr vA, vB */ 5384 mov r3, rINST, lsr #12 @ r3<- B 5385 ubfx r9, rINST, #8, #4 @ r9<- A 5386 GET_VREG(r1, r3) @ r1<- vB 5387 GET_VREG(r0, r9) @ r0<- vA 5388 .if 0 5389 cmp r1, #0 @ is second operand zero? 5390 beq common_errDivideByZero 5391 .endif 5392 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5393 5394 @ optional op; may set condition codes 5395 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5396 GET_INST_OPCODE(ip) @ extract opcode from rINST 5397 SET_VREG(r0, r9) @ vAA<- r0 5398 GOTO_OPCODE(ip) @ jump to next instruction 5399 /* 10-13 instructions */ 5400 5401 5402 5403/* ------------------------------ */ 5404 .balign 64 5405.L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5406/* File: armv6t2/OP_DIV_INT_2ADDR.S */ 5407/* File: armv6t2/binop2addr.S */ 5408 /* 5409 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5410 * that specifies an instruction that performs "result = r0 op r1". 5411 * This could be an ARM instruction or a function call. (If the result 5412 * comes back in a register other than r0, you can override "result".) 5413 * 5414 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5415 * vCC (r1). Useful for integer division and modulus. 5416 * 5417 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5418 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5419 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5420 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5421 */ 5422 /* binop/2addr vA, vB */ 5423 mov r3, rINST, lsr #12 @ r3<- B 5424 ubfx r9, rINST, #8, #4 @ r9<- A 5425 GET_VREG(r1, r3) @ r1<- vB 5426 GET_VREG(r0, r9) @ r0<- vA 5427 .if 1 5428 cmp r1, #0 @ is second operand zero? 5429 beq common_errDivideByZero 5430 .endif 5431 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5432 5433 @ optional op; may set condition codes 5434 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5435 GET_INST_OPCODE(ip) @ extract opcode from rINST 5436 SET_VREG(r0, r9) @ vAA<- r0 5437 GOTO_OPCODE(ip) @ jump to next instruction 5438 /* 10-13 instructions */ 5439 5440 5441 5442/* ------------------------------ */ 5443 .balign 64 5444.L_OP_REM_INT_2ADDR: /* 0xb4 */ 5445/* File: armv6t2/OP_REM_INT_2ADDR.S */ 5446/* idivmod returns quotient in r0 and remainder in r1 */ 5447/* File: armv6t2/binop2addr.S */ 5448 /* 5449 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5450 * that specifies an instruction that performs "result = r0 op r1". 5451 * This could be an ARM instruction or a function call. (If the result 5452 * comes back in a register other than r0, you can override "result".) 5453 * 5454 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5455 * vCC (r1). Useful for integer division and modulus. 5456 * 5457 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5458 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5459 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5460 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5461 */ 5462 /* binop/2addr vA, vB */ 5463 mov r3, rINST, lsr #12 @ r3<- B 5464 ubfx r9, rINST, #8, #4 @ r9<- A 5465 GET_VREG(r1, r3) @ r1<- vB 5466 GET_VREG(r0, r9) @ r0<- vA 5467 .if 1 5468 cmp r1, #0 @ is second operand zero? 5469 beq common_errDivideByZero 5470 .endif 5471 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5472 5473 @ optional op; may set condition codes 5474 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5475 GET_INST_OPCODE(ip) @ extract opcode from rINST 5476 SET_VREG(r1, r9) @ vAA<- r1 5477 GOTO_OPCODE(ip) @ jump to next instruction 5478 /* 10-13 instructions */ 5479 5480 5481 5482/* ------------------------------ */ 5483 .balign 64 5484.L_OP_AND_INT_2ADDR: /* 0xb5 */ 5485/* File: armv6t2/OP_AND_INT_2ADDR.S */ 5486/* File: armv6t2/binop2addr.S */ 5487 /* 5488 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5489 * that specifies an instruction that performs "result = r0 op r1". 5490 * This could be an ARM instruction or a function call. (If the result 5491 * comes back in a register other than r0, you can override "result".) 5492 * 5493 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5494 * vCC (r1). Useful for integer division and modulus. 5495 * 5496 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5497 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5498 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5499 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5500 */ 5501 /* binop/2addr vA, vB */ 5502 mov r3, rINST, lsr #12 @ r3<- B 5503 ubfx r9, rINST, #8, #4 @ r9<- A 5504 GET_VREG(r1, r3) @ r1<- vB 5505 GET_VREG(r0, r9) @ r0<- vA 5506 .if 0 5507 cmp r1, #0 @ is second operand zero? 5508 beq common_errDivideByZero 5509 .endif 5510 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5511 5512 @ optional op; may set condition codes 5513 and r0, r0, r1 @ r0<- op, r0-r3 changed 5514 GET_INST_OPCODE(ip) @ extract opcode from rINST 5515 SET_VREG(r0, r9) @ vAA<- r0 5516 GOTO_OPCODE(ip) @ jump to next instruction 5517 /* 10-13 instructions */ 5518 5519 5520 5521/* ------------------------------ */ 5522 .balign 64 5523.L_OP_OR_INT_2ADDR: /* 0xb6 */ 5524/* File: armv6t2/OP_OR_INT_2ADDR.S */ 5525/* File: armv6t2/binop2addr.S */ 5526 /* 5527 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5528 * that specifies an instruction that performs "result = r0 op r1". 5529 * This could be an ARM instruction or a function call. (If the result 5530 * comes back in a register other than r0, you can override "result".) 5531 * 5532 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5533 * vCC (r1). Useful for integer division and modulus. 5534 * 5535 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5536 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5537 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5538 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5539 */ 5540 /* binop/2addr vA, vB */ 5541 mov r3, rINST, lsr #12 @ r3<- B 5542 ubfx r9, rINST, #8, #4 @ r9<- A 5543 GET_VREG(r1, r3) @ r1<- vB 5544 GET_VREG(r0, r9) @ r0<- vA 5545 .if 0 5546 cmp r1, #0 @ is second operand zero? 5547 beq common_errDivideByZero 5548 .endif 5549 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5550 5551 @ optional op; may set condition codes 5552 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5553 GET_INST_OPCODE(ip) @ extract opcode from rINST 5554 SET_VREG(r0, r9) @ vAA<- r0 5555 GOTO_OPCODE(ip) @ jump to next instruction 5556 /* 10-13 instructions */ 5557 5558 5559 5560/* ------------------------------ */ 5561 .balign 64 5562.L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5563/* File: armv6t2/OP_XOR_INT_2ADDR.S */ 5564/* File: armv6t2/binop2addr.S */ 5565 /* 5566 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5567 * that specifies an instruction that performs "result = r0 op r1". 5568 * This could be an ARM instruction or a function call. (If the result 5569 * comes back in a register other than r0, you can override "result".) 5570 * 5571 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5572 * vCC (r1). Useful for integer division and modulus. 5573 * 5574 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5575 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5576 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5577 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5578 */ 5579 /* binop/2addr vA, vB */ 5580 mov r3, rINST, lsr #12 @ r3<- B 5581 ubfx r9, rINST, #8, #4 @ r9<- A 5582 GET_VREG(r1, r3) @ r1<- vB 5583 GET_VREG(r0, r9) @ r0<- vA 5584 .if 0 5585 cmp r1, #0 @ is second operand zero? 5586 beq common_errDivideByZero 5587 .endif 5588 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5589 5590 @ optional op; may set condition codes 5591 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5592 GET_INST_OPCODE(ip) @ extract opcode from rINST 5593 SET_VREG(r0, r9) @ vAA<- r0 5594 GOTO_OPCODE(ip) @ jump to next instruction 5595 /* 10-13 instructions */ 5596 5597 5598 5599/* ------------------------------ */ 5600 .balign 64 5601.L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5602/* File: armv6t2/OP_SHL_INT_2ADDR.S */ 5603/* File: armv6t2/binop2addr.S */ 5604 /* 5605 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5606 * that specifies an instruction that performs "result = r0 op r1". 5607 * This could be an ARM instruction or a function call. (If the result 5608 * comes back in a register other than r0, you can override "result".) 5609 * 5610 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5611 * vCC (r1). Useful for integer division and modulus. 5612 * 5613 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5614 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5615 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5616 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5617 */ 5618 /* binop/2addr vA, vB */ 5619 mov r3, rINST, lsr #12 @ r3<- B 5620 ubfx r9, rINST, #8, #4 @ r9<- A 5621 GET_VREG(r1, r3) @ r1<- vB 5622 GET_VREG(r0, r9) @ r0<- vA 5623 .if 0 5624 cmp r1, #0 @ is second operand zero? 5625 beq common_errDivideByZero 5626 .endif 5627 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5628 5629 and r1, r1, #31 @ optional op; may set condition codes 5630 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5631 GET_INST_OPCODE(ip) @ extract opcode from rINST 5632 SET_VREG(r0, r9) @ vAA<- r0 5633 GOTO_OPCODE(ip) @ jump to next instruction 5634 /* 10-13 instructions */ 5635 5636 5637 5638/* ------------------------------ */ 5639 .balign 64 5640.L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5641/* File: armv6t2/OP_SHR_INT_2ADDR.S */ 5642/* File: armv6t2/binop2addr.S */ 5643 /* 5644 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5645 * that specifies an instruction that performs "result = r0 op r1". 5646 * This could be an ARM instruction or a function call. (If the result 5647 * comes back in a register other than r0, you can override "result".) 5648 * 5649 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5650 * vCC (r1). Useful for integer division and modulus. 5651 * 5652 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5653 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5654 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5655 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5656 */ 5657 /* binop/2addr vA, vB */ 5658 mov r3, rINST, lsr #12 @ r3<- B 5659 ubfx r9, rINST, #8, #4 @ r9<- A 5660 GET_VREG(r1, r3) @ r1<- vB 5661 GET_VREG(r0, r9) @ r0<- vA 5662 .if 0 5663 cmp r1, #0 @ is second operand zero? 5664 beq common_errDivideByZero 5665 .endif 5666 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5667 5668 and r1, r1, #31 @ optional op; may set condition codes 5669 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5670 GET_INST_OPCODE(ip) @ extract opcode from rINST 5671 SET_VREG(r0, r9) @ vAA<- r0 5672 GOTO_OPCODE(ip) @ jump to next instruction 5673 /* 10-13 instructions */ 5674 5675 5676 5677/* ------------------------------ */ 5678 .balign 64 5679.L_OP_USHR_INT_2ADDR: /* 0xba */ 5680/* File: armv6t2/OP_USHR_INT_2ADDR.S */ 5681/* File: armv6t2/binop2addr.S */ 5682 /* 5683 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5684 * that specifies an instruction that performs "result = r0 op r1". 5685 * This could be an ARM instruction or a function call. (If the result 5686 * comes back in a register other than r0, you can override "result".) 5687 * 5688 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5689 * vCC (r1). Useful for integer division and modulus. 5690 * 5691 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5692 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5693 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5694 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5695 */ 5696 /* binop/2addr vA, vB */ 5697 mov r3, rINST, lsr #12 @ r3<- B 5698 ubfx r9, rINST, #8, #4 @ r9<- A 5699 GET_VREG(r1, r3) @ r1<- vB 5700 GET_VREG(r0, r9) @ r0<- vA 5701 .if 0 5702 cmp r1, #0 @ is second operand zero? 5703 beq common_errDivideByZero 5704 .endif 5705 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5706 5707 and r1, r1, #31 @ optional op; may set condition codes 5708 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5709 GET_INST_OPCODE(ip) @ extract opcode from rINST 5710 SET_VREG(r0, r9) @ vAA<- r0 5711 GOTO_OPCODE(ip) @ jump to next instruction 5712 /* 10-13 instructions */ 5713 5714 5715 5716/* ------------------------------ */ 5717 .balign 64 5718.L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5719/* File: armv6t2/OP_ADD_LONG_2ADDR.S */ 5720/* File: armv6t2/binopWide2addr.S */ 5721 /* 5722 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5723 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5724 * This could be an ARM instruction or a function call. (If the result 5725 * comes back in a register other than r0, you can override "result".) 5726 * 5727 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5728 * vCC (r1). Useful for integer division and modulus. 5729 * 5730 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5731 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5732 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5733 * rem-double/2addr 5734 */ 5735 /* binop/2addr vA, vB */ 5736 mov r1, rINST, lsr #12 @ r1<- B 5737 ubfx r9, rINST, #8, #4 @ r9<- A 5738 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5739 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5740 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5741 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5742 .if 0 5743 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5744 beq common_errDivideByZero 5745 .endif 5746 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5747 5748 adds r0, r0, r2 @ optional op; may set condition codes 5749 adc r1, r1, r3 @ result<- op, r0-r3 changed 5750 GET_INST_OPCODE(ip) @ extract opcode from rINST 5751 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5752 GOTO_OPCODE(ip) @ jump to next instruction 5753 /* 12-15 instructions */ 5754 5755 5756 5757/* ------------------------------ */ 5758 .balign 64 5759.L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5760/* File: armv6t2/OP_SUB_LONG_2ADDR.S */ 5761/* File: armv6t2/binopWide2addr.S */ 5762 /* 5763 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5764 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5765 * This could be an ARM instruction or a function call. (If the result 5766 * comes back in a register other than r0, you can override "result".) 5767 * 5768 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5769 * vCC (r1). Useful for integer division and modulus. 5770 * 5771 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5772 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5773 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5774 * rem-double/2addr 5775 */ 5776 /* binop/2addr vA, vB */ 5777 mov r1, rINST, lsr #12 @ r1<- B 5778 ubfx r9, rINST, #8, #4 @ r9<- A 5779 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5780 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5781 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5782 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5783 .if 0 5784 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5785 beq common_errDivideByZero 5786 .endif 5787 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5788 5789 subs r0, r0, r2 @ optional op; may set condition codes 5790 sbc r1, r1, r3 @ result<- op, r0-r3 changed 5791 GET_INST_OPCODE(ip) @ extract opcode from rINST 5792 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5793 GOTO_OPCODE(ip) @ jump to next instruction 5794 /* 12-15 instructions */ 5795 5796 5797 5798/* ------------------------------ */ 5799 .balign 64 5800.L_OP_MUL_LONG_2ADDR: /* 0xbd */ 5801/* File: armv6t2/OP_MUL_LONG_2ADDR.S */ 5802 /* 5803 * Signed 64-bit integer multiply, "/2addr" version. 5804 * 5805 * See OP_MUL_LONG for an explanation. 5806 * 5807 * We get a little tight on registers, so to avoid looking up &fp[A] 5808 * again we stuff it into rINST. 5809 */ 5810 /* mul-long/2addr vA, vB */ 5811 mov r1, rINST, lsr #12 @ r1<- B 5812 ubfx r9, rINST, #8, #4 @ r9<- A 5813 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5814 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 5815 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5816 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 5817 mul ip, r2, r1 @ ip<- ZxW 5818 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 5819 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 5820 mov r0, rINST @ r0<- &fp[A] (free up rINST) 5821 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5822 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 5823 GET_INST_OPCODE(ip) @ extract opcode from rINST 5824 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 5825 GOTO_OPCODE(ip) @ jump to next instruction 5826 5827 5828/* ------------------------------ */ 5829 .balign 64 5830.L_OP_DIV_LONG_2ADDR: /* 0xbe */ 5831/* File: armv6t2/OP_DIV_LONG_2ADDR.S */ 5832/* File: armv6t2/binopWide2addr.S */ 5833 /* 5834 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5835 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5836 * This could be an ARM instruction or a function call. (If the result 5837 * comes back in a register other than r0, you can override "result".) 5838 * 5839 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5840 * vCC (r1). Useful for integer division and modulus. 5841 * 5842 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5843 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5844 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5845 * rem-double/2addr 5846 */ 5847 /* binop/2addr vA, vB */ 5848 mov r1, rINST, lsr #12 @ r1<- B 5849 ubfx r9, rINST, #8, #4 @ r9<- A 5850 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5851 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5852 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5853 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5854 .if 1 5855 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5856 beq common_errDivideByZero 5857 .endif 5858 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5859 5860 @ optional op; may set condition codes 5861 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5862 GET_INST_OPCODE(ip) @ extract opcode from rINST 5863 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5864 GOTO_OPCODE(ip) @ jump to next instruction 5865 /* 12-15 instructions */ 5866 5867 5868 5869/* ------------------------------ */ 5870 .balign 64 5871.L_OP_REM_LONG_2ADDR: /* 0xbf */ 5872/* File: armv6t2/OP_REM_LONG_2ADDR.S */ 5873/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 5874/* File: armv6t2/binopWide2addr.S */ 5875 /* 5876 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5877 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5878 * This could be an ARM instruction or a function call. (If the result 5879 * comes back in a register other than r0, you can override "result".) 5880 * 5881 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5882 * vCC (r1). Useful for integer division and modulus. 5883 * 5884 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5885 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5886 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5887 * rem-double/2addr 5888 */ 5889 /* binop/2addr vA, vB */ 5890 mov r1, rINST, lsr #12 @ r1<- B 5891 ubfx r9, rINST, #8, #4 @ r9<- A 5892 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5893 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5894 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5895 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5896 .if 1 5897 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5898 beq common_errDivideByZero 5899 .endif 5900 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5901 5902 @ optional op; may set condition codes 5903 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 5904 GET_INST_OPCODE(ip) @ extract opcode from rINST 5905 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 5906 GOTO_OPCODE(ip) @ jump to next instruction 5907 /* 12-15 instructions */ 5908 5909 5910 5911/* ------------------------------ */ 5912 .balign 64 5913.L_OP_AND_LONG_2ADDR: /* 0xc0 */ 5914/* File: armv6t2/OP_AND_LONG_2ADDR.S */ 5915/* File: armv6t2/binopWide2addr.S */ 5916 /* 5917 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5918 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5919 * This could be an ARM instruction or a function call. (If the result 5920 * comes back in a register other than r0, you can override "result".) 5921 * 5922 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5923 * vCC (r1). Useful for integer division and modulus. 5924 * 5925 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5926 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5927 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5928 * rem-double/2addr 5929 */ 5930 /* binop/2addr vA, vB */ 5931 mov r1, rINST, lsr #12 @ r1<- B 5932 ubfx r9, rINST, #8, #4 @ r9<- A 5933 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5934 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5935 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5936 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5937 .if 0 5938 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5939 beq common_errDivideByZero 5940 .endif 5941 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5942 5943 and r0, r0, r2 @ optional op; may set condition codes 5944 and r1, r1, r3 @ result<- op, r0-r3 changed 5945 GET_INST_OPCODE(ip) @ extract opcode from rINST 5946 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5947 GOTO_OPCODE(ip) @ jump to next instruction 5948 /* 12-15 instructions */ 5949 5950 5951 5952/* ------------------------------ */ 5953 .balign 64 5954.L_OP_OR_LONG_2ADDR: /* 0xc1 */ 5955/* File: armv6t2/OP_OR_LONG_2ADDR.S */ 5956/* File: armv6t2/binopWide2addr.S */ 5957 /* 5958 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5959 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5960 * This could be an ARM instruction or a function call. (If the result 5961 * comes back in a register other than r0, you can override "result".) 5962 * 5963 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5964 * vCC (r1). Useful for integer division and modulus. 5965 * 5966 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5967 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5968 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5969 * rem-double/2addr 5970 */ 5971 /* binop/2addr vA, vB */ 5972 mov r1, rINST, lsr #12 @ r1<- B 5973 ubfx r9, rINST, #8, #4 @ r9<- A 5974 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5975 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5976 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5977 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5978 .if 0 5979 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5980 beq common_errDivideByZero 5981 .endif 5982 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5983 5984 orr r0, r0, r2 @ optional op; may set condition codes 5985 orr r1, r1, r3 @ result<- op, r0-r3 changed 5986 GET_INST_OPCODE(ip) @ extract opcode from rINST 5987 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5988 GOTO_OPCODE(ip) @ jump to next instruction 5989 /* 12-15 instructions */ 5990 5991 5992 5993/* ------------------------------ */ 5994 .balign 64 5995.L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 5996/* File: armv6t2/OP_XOR_LONG_2ADDR.S */ 5997/* File: armv6t2/binopWide2addr.S */ 5998 /* 5999 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6000 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6001 * This could be an ARM instruction or a function call. (If the result 6002 * comes back in a register other than r0, you can override "result".) 6003 * 6004 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6005 * vCC (r1). Useful for integer division and modulus. 6006 * 6007 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6008 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6009 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6010 * rem-double/2addr 6011 */ 6012 /* binop/2addr vA, vB */ 6013 mov r1, rINST, lsr #12 @ r1<- B 6014 ubfx r9, rINST, #8, #4 @ r9<- A 6015 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6016 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6017 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6018 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6019 .if 0 6020 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6021 beq common_errDivideByZero 6022 .endif 6023 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6024 6025 eor r0, r0, r2 @ optional op; may set condition codes 6026 eor r1, r1, r3 @ result<- op, r0-r3 changed 6027 GET_INST_OPCODE(ip) @ extract opcode from rINST 6028 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6029 GOTO_OPCODE(ip) @ jump to next instruction 6030 /* 12-15 instructions */ 6031 6032 6033 6034/* ------------------------------ */ 6035 .balign 64 6036.L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6037/* File: armv6t2/OP_SHL_LONG_2ADDR.S */ 6038 /* 6039 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6040 * 32-bit shift distance. 6041 */ 6042 /* shl-long/2addr vA, vB */ 6043 mov r3, rINST, lsr #12 @ r3<- B 6044 ubfx r9, rINST, #8, #4 @ r9<- A 6045 GET_VREG(r2, r3) @ r2<- vB 6046 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6047 and r2, r2, #63 @ r2<- r2 & 0x3f 6048 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6049 6050 mov r1, r1, asl r2 @ r1<- r1 << r2 6051 rsb r3, r2, #32 @ r3<- 32 - r2 6052 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6053 subs ip, r2, #32 @ ip<- r2 - 32 6054 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6055 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6056 mov r0, r0, asl r2 @ r0<- r0 << r2 6057 b .LOP_SHL_LONG_2ADDR_finish 6058 6059/* ------------------------------ */ 6060 .balign 64 6061.L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6062/* File: armv6t2/OP_SHR_LONG_2ADDR.S */ 6063 /* 6064 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6065 * 32-bit shift distance. 6066 */ 6067 /* shr-long/2addr vA, vB */ 6068 mov r3, rINST, lsr #12 @ r3<- B 6069 ubfx r9, rINST, #8, #4 @ r9<- A 6070 GET_VREG(r2, r3) @ r2<- vB 6071 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6072 and r2, r2, #63 @ r2<- r2 & 0x3f 6073 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6074 6075 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6076 rsb r3, r2, #32 @ r3<- 32 - r2 6077 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6078 subs ip, r2, #32 @ ip<- r2 - 32 6079 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6080 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6081 mov r1, r1, asr r2 @ r1<- r1 >> r2 6082 b .LOP_SHR_LONG_2ADDR_finish 6083 6084/* ------------------------------ */ 6085 .balign 64 6086.L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6087/* File: armv6t2/OP_USHR_LONG_2ADDR.S */ 6088 /* 6089 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6090 * 32-bit shift distance. 6091 */ 6092 /* ushr-long/2addr vA, vB */ 6093 mov r3, rINST, lsr #12 @ r3<- B 6094 ubfx r9, rINST, #8, #4 @ r9<- A 6095 GET_VREG(r2, r3) @ r2<- vB 6096 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6097 and r2, r2, #63 @ r2<- r2 & 0x3f 6098 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6099 6100 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6101 rsb r3, r2, #32 @ r3<- 32 - r2 6102 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6103 subs ip, r2, #32 @ ip<- r2 - 32 6104 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6105 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6106 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6107 b .LOP_USHR_LONG_2ADDR_finish 6108 6109/* ------------------------------ */ 6110 .balign 64 6111.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6112/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */ 6113/* File: arm-vfp/fbinop2addr.S */ 6114 /* 6115 * Generic 32-bit floating point "/2addr" binary operation. Provide 6116 * an "instr" line that specifies an instruction that performs 6117 * "s2 = s0 op s1". 6118 * 6119 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6120 */ 6121 /* binop/2addr vA, vB */ 6122 mov r3, rINST, lsr #12 @ r3<- B 6123 mov r9, rINST, lsr #8 @ r9<- A+ 6124 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6125 and r9, r9, #15 @ r9<- A 6126 flds s1, [r3] @ s1<- vB 6127 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6128 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6129 flds s0, [r9] @ s0<- vA 6130 6131 fadds s2, s0, s1 @ s2<- op 6132 GET_INST_OPCODE(ip) @ extract opcode from rINST 6133 fsts s2, [r9] @ vAA<- s2 6134 GOTO_OPCODE(ip) @ jump to next instruction 6135 6136 6137/* ------------------------------ */ 6138 .balign 64 6139.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6140/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */ 6141/* File: arm-vfp/fbinop2addr.S */ 6142 /* 6143 * Generic 32-bit floating point "/2addr" binary operation. Provide 6144 * an "instr" line that specifies an instruction that performs 6145 * "s2 = s0 op s1". 6146 * 6147 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6148 */ 6149 /* binop/2addr vA, vB */ 6150 mov r3, rINST, lsr #12 @ r3<- B 6151 mov r9, rINST, lsr #8 @ r9<- A+ 6152 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6153 and r9, r9, #15 @ r9<- A 6154 flds s1, [r3] @ s1<- vB 6155 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6156 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6157 flds s0, [r9] @ s0<- vA 6158 6159 fsubs s2, s0, s1 @ s2<- op 6160 GET_INST_OPCODE(ip) @ extract opcode from rINST 6161 fsts s2, [r9] @ vAA<- s2 6162 GOTO_OPCODE(ip) @ jump to next instruction 6163 6164 6165/* ------------------------------ */ 6166 .balign 64 6167.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6168/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */ 6169/* File: arm-vfp/fbinop2addr.S */ 6170 /* 6171 * Generic 32-bit floating point "/2addr" binary operation. Provide 6172 * an "instr" line that specifies an instruction that performs 6173 * "s2 = s0 op s1". 6174 * 6175 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6176 */ 6177 /* binop/2addr vA, vB */ 6178 mov r3, rINST, lsr #12 @ r3<- B 6179 mov r9, rINST, lsr #8 @ r9<- A+ 6180 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6181 and r9, r9, #15 @ r9<- A 6182 flds s1, [r3] @ s1<- vB 6183 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6184 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6185 flds s0, [r9] @ s0<- vA 6186 6187 fmuls s2, s0, s1 @ s2<- op 6188 GET_INST_OPCODE(ip) @ extract opcode from rINST 6189 fsts s2, [r9] @ vAA<- s2 6190 GOTO_OPCODE(ip) @ jump to next instruction 6191 6192 6193/* ------------------------------ */ 6194 .balign 64 6195.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6196/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */ 6197/* File: arm-vfp/fbinop2addr.S */ 6198 /* 6199 * Generic 32-bit floating point "/2addr" binary operation. Provide 6200 * an "instr" line that specifies an instruction that performs 6201 * "s2 = s0 op s1". 6202 * 6203 * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr 6204 */ 6205 /* binop/2addr vA, vB */ 6206 mov r3, rINST, lsr #12 @ r3<- B 6207 mov r9, rINST, lsr #8 @ r9<- A+ 6208 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6209 and r9, r9, #15 @ r9<- A 6210 flds s1, [r3] @ s1<- vB 6211 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6212 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6213 flds s0, [r9] @ s0<- vA 6214 6215 fdivs s2, s0, s1 @ s2<- op 6216 GET_INST_OPCODE(ip) @ extract opcode from rINST 6217 fsts s2, [r9] @ vAA<- s2 6218 GOTO_OPCODE(ip) @ jump to next instruction 6219 6220 6221/* ------------------------------ */ 6222 .balign 64 6223.L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6224/* File: armv6t2/OP_REM_FLOAT_2ADDR.S */ 6225/* EABI doesn't define a float remainder function, but libm does */ 6226/* File: armv6t2/binop2addr.S */ 6227 /* 6228 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6229 * that specifies an instruction that performs "result = r0 op r1". 6230 * This could be an ARM instruction or a function call. (If the result 6231 * comes back in a register other than r0, you can override "result".) 6232 * 6233 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6234 * vCC (r1). Useful for integer division and modulus. 6235 * 6236 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6237 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6238 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6239 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6240 */ 6241 /* binop/2addr vA, vB */ 6242 mov r3, rINST, lsr #12 @ r3<- B 6243 ubfx r9, rINST, #8, #4 @ r9<- A 6244 GET_VREG(r1, r3) @ r1<- vB 6245 GET_VREG(r0, r9) @ r0<- vA 6246 .if 0 6247 cmp r1, #0 @ is second operand zero? 6248 beq common_errDivideByZero 6249 .endif 6250 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6251 6252 @ optional op; may set condition codes 6253 bl fmodf @ r0<- op, r0-r3 changed 6254 GET_INST_OPCODE(ip) @ extract opcode from rINST 6255 SET_VREG(r0, r9) @ vAA<- r0 6256 GOTO_OPCODE(ip) @ jump to next instruction 6257 /* 10-13 instructions */ 6258 6259 6260 6261/* ------------------------------ */ 6262 .balign 64 6263.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6264/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */ 6265/* File: arm-vfp/fbinopWide2addr.S */ 6266 /* 6267 * Generic 64-bit floating point "/2addr" binary operation. Provide 6268 * an "instr" line that specifies an instruction that performs 6269 * "d2 = d0 op d1". 6270 * 6271 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6272 * div-double/2addr 6273 */ 6274 /* binop/2addr vA, vB */ 6275 mov r3, rINST, lsr #12 @ r3<- B 6276 mov r9, rINST, lsr #8 @ r9<- A+ 6277 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6278 and r9, r9, #15 @ r9<- A 6279 fldd d1, [r3] @ d1<- vB 6280 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6281 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6282 fldd d0, [r9] @ d0<- vA 6283 6284 faddd d2, d0, d1 @ d2<- op 6285 GET_INST_OPCODE(ip) @ extract opcode from rINST 6286 fstd d2, [r9] @ vAA<- d2 6287 GOTO_OPCODE(ip) @ jump to next instruction 6288 6289 6290/* ------------------------------ */ 6291 .balign 64 6292.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6293/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */ 6294/* File: arm-vfp/fbinopWide2addr.S */ 6295 /* 6296 * Generic 64-bit floating point "/2addr" binary operation. Provide 6297 * an "instr" line that specifies an instruction that performs 6298 * "d2 = d0 op d1". 6299 * 6300 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6301 * div-double/2addr 6302 */ 6303 /* binop/2addr vA, vB */ 6304 mov r3, rINST, lsr #12 @ r3<- B 6305 mov r9, rINST, lsr #8 @ r9<- A+ 6306 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6307 and r9, r9, #15 @ r9<- A 6308 fldd d1, [r3] @ d1<- vB 6309 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6310 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6311 fldd d0, [r9] @ d0<- vA 6312 6313 fsubd d2, d0, d1 @ d2<- op 6314 GET_INST_OPCODE(ip) @ extract opcode from rINST 6315 fstd d2, [r9] @ vAA<- d2 6316 GOTO_OPCODE(ip) @ jump to next instruction 6317 6318 6319/* ------------------------------ */ 6320 .balign 64 6321.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6322/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */ 6323/* File: arm-vfp/fbinopWide2addr.S */ 6324 /* 6325 * Generic 64-bit floating point "/2addr" binary operation. Provide 6326 * an "instr" line that specifies an instruction that performs 6327 * "d2 = d0 op d1". 6328 * 6329 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6330 * div-double/2addr 6331 */ 6332 /* binop/2addr vA, vB */ 6333 mov r3, rINST, lsr #12 @ r3<- B 6334 mov r9, rINST, lsr #8 @ r9<- A+ 6335 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6336 and r9, r9, #15 @ r9<- A 6337 fldd d1, [r3] @ d1<- vB 6338 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6339 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6340 fldd d0, [r9] @ d0<- vA 6341 6342 fmuld d2, d0, d1 @ d2<- op 6343 GET_INST_OPCODE(ip) @ extract opcode from rINST 6344 fstd d2, [r9] @ vAA<- d2 6345 GOTO_OPCODE(ip) @ jump to next instruction 6346 6347 6348/* ------------------------------ */ 6349 .balign 64 6350.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6351/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */ 6352/* File: arm-vfp/fbinopWide2addr.S */ 6353 /* 6354 * Generic 64-bit floating point "/2addr" binary operation. Provide 6355 * an "instr" line that specifies an instruction that performs 6356 * "d2 = d0 op d1". 6357 * 6358 * For: add-double/2addr, sub-double/2addr, mul-double/2addr, 6359 * div-double/2addr 6360 */ 6361 /* binop/2addr vA, vB */ 6362 mov r3, rINST, lsr #12 @ r3<- B 6363 mov r9, rINST, lsr #8 @ r9<- A+ 6364 VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB 6365 and r9, r9, #15 @ r9<- A 6366 fldd d1, [r3] @ d1<- vB 6367 VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA 6368 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6369 fldd d0, [r9] @ d0<- vA 6370 6371 fdivd d2, d0, d1 @ d2<- op 6372 GET_INST_OPCODE(ip) @ extract opcode from rINST 6373 fstd d2, [r9] @ vAA<- d2 6374 GOTO_OPCODE(ip) @ jump to next instruction 6375 6376 6377/* ------------------------------ */ 6378 .balign 64 6379.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6380/* File: armv6t2/OP_REM_DOUBLE_2ADDR.S */ 6381/* EABI doesn't define a double remainder function, but libm does */ 6382/* File: armv6t2/binopWide2addr.S */ 6383 /* 6384 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6385 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6386 * This could be an ARM instruction or a function call. (If the result 6387 * comes back in a register other than r0, you can override "result".) 6388 * 6389 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6390 * vCC (r1). Useful for integer division and modulus. 6391 * 6392 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6393 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6394 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6395 * rem-double/2addr 6396 */ 6397 /* binop/2addr vA, vB */ 6398 mov r1, rINST, lsr #12 @ r1<- B 6399 ubfx r9, rINST, #8, #4 @ r9<- A 6400 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6401 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6402 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6403 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6404 .if 0 6405 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6406 beq common_errDivideByZero 6407 .endif 6408 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6409 6410 @ optional op; may set condition codes 6411 bl fmod @ result<- op, r0-r3 changed 6412 GET_INST_OPCODE(ip) @ extract opcode from rINST 6413 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6414 GOTO_OPCODE(ip) @ jump to next instruction 6415 /* 12-15 instructions */ 6416 6417 6418 6419/* ------------------------------ */ 6420 .balign 64 6421.L_OP_ADD_INT_LIT16: /* 0xd0 */ 6422/* File: armv6t2/OP_ADD_INT_LIT16.S */ 6423/* File: armv6t2/binopLit16.S */ 6424 /* 6425 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6426 * that specifies an instruction that performs "result = r0 op r1". 6427 * This could be an ARM instruction or a function call. (If the result 6428 * comes back in a register other than r0, you can override "result".) 6429 * 6430 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6431 * vCC (r1). Useful for integer division and modulus. 6432 * 6433 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6434 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6435 */ 6436 /* binop/lit16 vA, vB, #+CCCC */ 6437 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6438 mov r2, rINST, lsr #12 @ r2<- B 6439 ubfx r9, rINST, #8, #4 @ r9<- A 6440 GET_VREG(r0, r2) @ r0<- vB 6441 .if 0 6442 cmp r1, #0 @ is second operand zero? 6443 beq common_errDivideByZero 6444 .endif 6445 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6446 6447 add r0, r0, r1 @ r0<- op, r0-r3 changed 6448 GET_INST_OPCODE(ip) @ extract opcode from rINST 6449 SET_VREG(r0, r9) @ vAA<- r0 6450 GOTO_OPCODE(ip) @ jump to next instruction 6451 /* 10-13 instructions */ 6452 6453 6454 6455/* ------------------------------ */ 6456 .balign 64 6457.L_OP_RSUB_INT: /* 0xd1 */ 6458/* File: armv6t2/OP_RSUB_INT.S */ 6459/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6460/* File: armv6t2/binopLit16.S */ 6461 /* 6462 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6463 * that specifies an instruction that performs "result = r0 op r1". 6464 * This could be an ARM instruction or a function call. (If the result 6465 * comes back in a register other than r0, you can override "result".) 6466 * 6467 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6468 * vCC (r1). Useful for integer division and modulus. 6469 * 6470 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6471 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6472 */ 6473 /* binop/lit16 vA, vB, #+CCCC */ 6474 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6475 mov r2, rINST, lsr #12 @ r2<- B 6476 ubfx r9, rINST, #8, #4 @ r9<- A 6477 GET_VREG(r0, r2) @ r0<- vB 6478 .if 0 6479 cmp r1, #0 @ is second operand zero? 6480 beq common_errDivideByZero 6481 .endif 6482 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6483 6484 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6485 GET_INST_OPCODE(ip) @ extract opcode from rINST 6486 SET_VREG(r0, r9) @ vAA<- r0 6487 GOTO_OPCODE(ip) @ jump to next instruction 6488 /* 10-13 instructions */ 6489 6490 6491 6492/* ------------------------------ */ 6493 .balign 64 6494.L_OP_MUL_INT_LIT16: /* 0xd2 */ 6495/* File: armv6t2/OP_MUL_INT_LIT16.S */ 6496/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6497/* File: armv6t2/binopLit16.S */ 6498 /* 6499 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6500 * that specifies an instruction that performs "result = r0 op r1". 6501 * This could be an ARM instruction or a function call. (If the result 6502 * comes back in a register other than r0, you can override "result".) 6503 * 6504 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6505 * vCC (r1). Useful for integer division and modulus. 6506 * 6507 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6508 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6509 */ 6510 /* binop/lit16 vA, vB, #+CCCC */ 6511 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6512 mov r2, rINST, lsr #12 @ r2<- B 6513 ubfx r9, rINST, #8, #4 @ r9<- A 6514 GET_VREG(r0, r2) @ r0<- vB 6515 .if 0 6516 cmp r1, #0 @ is second operand zero? 6517 beq common_errDivideByZero 6518 .endif 6519 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6520 6521 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6522 GET_INST_OPCODE(ip) @ extract opcode from rINST 6523 SET_VREG(r0, r9) @ vAA<- r0 6524 GOTO_OPCODE(ip) @ jump to next instruction 6525 /* 10-13 instructions */ 6526 6527 6528 6529/* ------------------------------ */ 6530 .balign 64 6531.L_OP_DIV_INT_LIT16: /* 0xd3 */ 6532/* File: armv6t2/OP_DIV_INT_LIT16.S */ 6533/* File: armv6t2/binopLit16.S */ 6534 /* 6535 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6536 * that specifies an instruction that performs "result = r0 op r1". 6537 * This could be an ARM instruction or a function call. (If the result 6538 * comes back in a register other than r0, you can override "result".) 6539 * 6540 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6541 * vCC (r1). Useful for integer division and modulus. 6542 * 6543 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6544 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6545 */ 6546 /* binop/lit16 vA, vB, #+CCCC */ 6547 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6548 mov r2, rINST, lsr #12 @ r2<- B 6549 ubfx r9, rINST, #8, #4 @ r9<- A 6550 GET_VREG(r0, r2) @ r0<- vB 6551 .if 1 6552 cmp r1, #0 @ is second operand zero? 6553 beq common_errDivideByZero 6554 .endif 6555 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6556 6557 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6558 GET_INST_OPCODE(ip) @ extract opcode from rINST 6559 SET_VREG(r0, r9) @ vAA<- r0 6560 GOTO_OPCODE(ip) @ jump to next instruction 6561 /* 10-13 instructions */ 6562 6563 6564 6565/* ------------------------------ */ 6566 .balign 64 6567.L_OP_REM_INT_LIT16: /* 0xd4 */ 6568/* File: armv6t2/OP_REM_INT_LIT16.S */ 6569/* idivmod returns quotient in r0 and remainder in r1 */ 6570/* File: armv6t2/binopLit16.S */ 6571 /* 6572 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6573 * that specifies an instruction that performs "result = r0 op r1". 6574 * This could be an ARM instruction or a function call. (If the result 6575 * comes back in a register other than r0, you can override "result".) 6576 * 6577 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6578 * vCC (r1). Useful for integer division and modulus. 6579 * 6580 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6581 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6582 */ 6583 /* binop/lit16 vA, vB, #+CCCC */ 6584 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6585 mov r2, rINST, lsr #12 @ r2<- B 6586 ubfx r9, rINST, #8, #4 @ r9<- A 6587 GET_VREG(r0, r2) @ r0<- vB 6588 .if 1 6589 cmp r1, #0 @ is second operand zero? 6590 beq common_errDivideByZero 6591 .endif 6592 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6593 6594 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6595 GET_INST_OPCODE(ip) @ extract opcode from rINST 6596 SET_VREG(r1, r9) @ vAA<- r1 6597 GOTO_OPCODE(ip) @ jump to next instruction 6598 /* 10-13 instructions */ 6599 6600 6601 6602/* ------------------------------ */ 6603 .balign 64 6604.L_OP_AND_INT_LIT16: /* 0xd5 */ 6605/* File: armv6t2/OP_AND_INT_LIT16.S */ 6606/* File: armv6t2/binopLit16.S */ 6607 /* 6608 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6609 * that specifies an instruction that performs "result = r0 op r1". 6610 * This could be an ARM instruction or a function call. (If the result 6611 * comes back in a register other than r0, you can override "result".) 6612 * 6613 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6614 * vCC (r1). Useful for integer division and modulus. 6615 * 6616 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6617 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6618 */ 6619 /* binop/lit16 vA, vB, #+CCCC */ 6620 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6621 mov r2, rINST, lsr #12 @ r2<- B 6622 ubfx r9, rINST, #8, #4 @ r9<- A 6623 GET_VREG(r0, r2) @ r0<- vB 6624 .if 0 6625 cmp r1, #0 @ is second operand zero? 6626 beq common_errDivideByZero 6627 .endif 6628 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6629 6630 and r0, r0, r1 @ r0<- op, r0-r3 changed 6631 GET_INST_OPCODE(ip) @ extract opcode from rINST 6632 SET_VREG(r0, r9) @ vAA<- r0 6633 GOTO_OPCODE(ip) @ jump to next instruction 6634 /* 10-13 instructions */ 6635 6636 6637 6638/* ------------------------------ */ 6639 .balign 64 6640.L_OP_OR_INT_LIT16: /* 0xd6 */ 6641/* File: armv6t2/OP_OR_INT_LIT16.S */ 6642/* File: armv6t2/binopLit16.S */ 6643 /* 6644 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6645 * that specifies an instruction that performs "result = r0 op r1". 6646 * This could be an ARM instruction or a function call. (If the result 6647 * comes back in a register other than r0, you can override "result".) 6648 * 6649 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6650 * vCC (r1). Useful for integer division and modulus. 6651 * 6652 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6653 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6654 */ 6655 /* binop/lit16 vA, vB, #+CCCC */ 6656 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6657 mov r2, rINST, lsr #12 @ r2<- B 6658 ubfx r9, rINST, #8, #4 @ r9<- A 6659 GET_VREG(r0, r2) @ r0<- vB 6660 .if 0 6661 cmp r1, #0 @ is second operand zero? 6662 beq common_errDivideByZero 6663 .endif 6664 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6665 6666 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6667 GET_INST_OPCODE(ip) @ extract opcode from rINST 6668 SET_VREG(r0, r9) @ vAA<- r0 6669 GOTO_OPCODE(ip) @ jump to next instruction 6670 /* 10-13 instructions */ 6671 6672 6673 6674/* ------------------------------ */ 6675 .balign 64 6676.L_OP_XOR_INT_LIT16: /* 0xd7 */ 6677/* File: armv6t2/OP_XOR_INT_LIT16.S */ 6678/* File: armv6t2/binopLit16.S */ 6679 /* 6680 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6681 * that specifies an instruction that performs "result = r0 op r1". 6682 * This could be an ARM instruction or a function call. (If the result 6683 * comes back in a register other than r0, you can override "result".) 6684 * 6685 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6686 * vCC (r1). Useful for integer division and modulus. 6687 * 6688 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6689 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6690 */ 6691 /* binop/lit16 vA, vB, #+CCCC */ 6692 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6693 mov r2, rINST, lsr #12 @ r2<- B 6694 ubfx r9, rINST, #8, #4 @ r9<- A 6695 GET_VREG(r0, r2) @ r0<- vB 6696 .if 0 6697 cmp r1, #0 @ is second operand zero? 6698 beq common_errDivideByZero 6699 .endif 6700 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6701 6702 eor r0, r0, r1 @ r0<- op, r0-r3 changed 6703 GET_INST_OPCODE(ip) @ extract opcode from rINST 6704 SET_VREG(r0, r9) @ vAA<- r0 6705 GOTO_OPCODE(ip) @ jump to next instruction 6706 /* 10-13 instructions */ 6707 6708 6709 6710/* ------------------------------ */ 6711 .balign 64 6712.L_OP_ADD_INT_LIT8: /* 0xd8 */ 6713/* File: armv5te/OP_ADD_INT_LIT8.S */ 6714/* File: armv5te/binopLit8.S */ 6715 /* 6716 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6717 * that specifies an instruction that performs "result = r0 op r1". 6718 * This could be an ARM instruction or a function call. (If the result 6719 * comes back in a register other than r0, you can override "result".) 6720 * 6721 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6722 * vCC (r1). Useful for integer division and modulus. 6723 * 6724 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6725 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6726 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6727 */ 6728 /* binop/lit8 vAA, vBB, #+CC */ 6729 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6730 mov r9, rINST, lsr #8 @ r9<- AA 6731 and r2, r3, #255 @ r2<- BB 6732 GET_VREG(r0, r2) @ r0<- vBB 6733 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6734 .if 0 6735 @cmp r1, #0 @ is second operand zero? 6736 beq common_errDivideByZero 6737 .endif 6738 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6739 6740 @ optional op; may set condition codes 6741 add r0, r0, r1 @ r0<- op, r0-r3 changed 6742 GET_INST_OPCODE(ip) @ extract opcode from rINST 6743 SET_VREG(r0, r9) @ vAA<- r0 6744 GOTO_OPCODE(ip) @ jump to next instruction 6745 /* 10-12 instructions */ 6746 6747 6748 6749/* ------------------------------ */ 6750 .balign 64 6751.L_OP_RSUB_INT_LIT8: /* 0xd9 */ 6752/* File: armv5te/OP_RSUB_INT_LIT8.S */ 6753/* File: armv5te/binopLit8.S */ 6754 /* 6755 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6756 * that specifies an instruction that performs "result = r0 op r1". 6757 * This could be an ARM instruction or a function call. (If the result 6758 * comes back in a register other than r0, you can override "result".) 6759 * 6760 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6761 * vCC (r1). Useful for integer division and modulus. 6762 * 6763 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6764 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6765 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6766 */ 6767 /* binop/lit8 vAA, vBB, #+CC */ 6768 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6769 mov r9, rINST, lsr #8 @ r9<- AA 6770 and r2, r3, #255 @ r2<- BB 6771 GET_VREG(r0, r2) @ r0<- vBB 6772 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6773 .if 0 6774 @cmp r1, #0 @ is second operand zero? 6775 beq common_errDivideByZero 6776 .endif 6777 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6778 6779 @ optional op; may set condition codes 6780 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6781 GET_INST_OPCODE(ip) @ extract opcode from rINST 6782 SET_VREG(r0, r9) @ vAA<- r0 6783 GOTO_OPCODE(ip) @ jump to next instruction 6784 /* 10-12 instructions */ 6785 6786 6787 6788/* ------------------------------ */ 6789 .balign 64 6790.L_OP_MUL_INT_LIT8: /* 0xda */ 6791/* File: armv5te/OP_MUL_INT_LIT8.S */ 6792/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6793/* File: armv5te/binopLit8.S */ 6794 /* 6795 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6796 * that specifies an instruction that performs "result = r0 op r1". 6797 * This could be an ARM instruction or a function call. (If the result 6798 * comes back in a register other than r0, you can override "result".) 6799 * 6800 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6801 * vCC (r1). Useful for integer division and modulus. 6802 * 6803 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6804 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6805 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6806 */ 6807 /* binop/lit8 vAA, vBB, #+CC */ 6808 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6809 mov r9, rINST, lsr #8 @ r9<- AA 6810 and r2, r3, #255 @ r2<- BB 6811 GET_VREG(r0, r2) @ r0<- vBB 6812 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6813 .if 0 6814 @cmp r1, #0 @ is second operand zero? 6815 beq common_errDivideByZero 6816 .endif 6817 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6818 6819 @ optional op; may set condition codes 6820 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6821 GET_INST_OPCODE(ip) @ extract opcode from rINST 6822 SET_VREG(r0, r9) @ vAA<- r0 6823 GOTO_OPCODE(ip) @ jump to next instruction 6824 /* 10-12 instructions */ 6825 6826 6827 6828/* ------------------------------ */ 6829 .balign 64 6830.L_OP_DIV_INT_LIT8: /* 0xdb */ 6831/* File: armv5te/OP_DIV_INT_LIT8.S */ 6832/* File: armv5te/binopLit8.S */ 6833 /* 6834 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6835 * that specifies an instruction that performs "result = r0 op r1". 6836 * This could be an ARM instruction or a function call. (If the result 6837 * comes back in a register other than r0, you can override "result".) 6838 * 6839 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6840 * vCC (r1). Useful for integer division and modulus. 6841 * 6842 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6843 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6844 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6845 */ 6846 /* binop/lit8 vAA, vBB, #+CC */ 6847 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6848 mov r9, rINST, lsr #8 @ r9<- AA 6849 and r2, r3, #255 @ r2<- BB 6850 GET_VREG(r0, r2) @ r0<- vBB 6851 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6852 .if 1 6853 @cmp r1, #0 @ is second operand zero? 6854 beq common_errDivideByZero 6855 .endif 6856 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6857 6858 @ optional op; may set condition codes 6859 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6860 GET_INST_OPCODE(ip) @ extract opcode from rINST 6861 SET_VREG(r0, r9) @ vAA<- r0 6862 GOTO_OPCODE(ip) @ jump to next instruction 6863 /* 10-12 instructions */ 6864 6865 6866 6867/* ------------------------------ */ 6868 .balign 64 6869.L_OP_REM_INT_LIT8: /* 0xdc */ 6870/* File: armv5te/OP_REM_INT_LIT8.S */ 6871/* idivmod returns quotient in r0 and remainder in r1 */ 6872/* File: armv5te/binopLit8.S */ 6873 /* 6874 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6875 * that specifies an instruction that performs "result = r0 op r1". 6876 * This could be an ARM instruction or a function call. (If the result 6877 * comes back in a register other than r0, you can override "result".) 6878 * 6879 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6880 * vCC (r1). Useful for integer division and modulus. 6881 * 6882 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6883 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6884 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6885 */ 6886 /* binop/lit8 vAA, vBB, #+CC */ 6887 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6888 mov r9, rINST, lsr #8 @ r9<- AA 6889 and r2, r3, #255 @ r2<- BB 6890 GET_VREG(r0, r2) @ r0<- vBB 6891 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6892 .if 1 6893 @cmp r1, #0 @ is second operand zero? 6894 beq common_errDivideByZero 6895 .endif 6896 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6897 6898 @ optional op; may set condition codes 6899 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6900 GET_INST_OPCODE(ip) @ extract opcode from rINST 6901 SET_VREG(r1, r9) @ vAA<- r1 6902 GOTO_OPCODE(ip) @ jump to next instruction 6903 /* 10-12 instructions */ 6904 6905 6906 6907/* ------------------------------ */ 6908 .balign 64 6909.L_OP_AND_INT_LIT8: /* 0xdd */ 6910/* File: armv5te/OP_AND_INT_LIT8.S */ 6911/* File: armv5te/binopLit8.S */ 6912 /* 6913 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6914 * that specifies an instruction that performs "result = r0 op r1". 6915 * This could be an ARM instruction or a function call. (If the result 6916 * comes back in a register other than r0, you can override "result".) 6917 * 6918 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6919 * vCC (r1). Useful for integer division and modulus. 6920 * 6921 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6922 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6923 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6924 */ 6925 /* binop/lit8 vAA, vBB, #+CC */ 6926 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6927 mov r9, rINST, lsr #8 @ r9<- AA 6928 and r2, r3, #255 @ r2<- BB 6929 GET_VREG(r0, r2) @ r0<- vBB 6930 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6931 .if 0 6932 @cmp r1, #0 @ is second operand zero? 6933 beq common_errDivideByZero 6934 .endif 6935 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6936 6937 @ optional op; may set condition codes 6938 and r0, r0, r1 @ r0<- op, r0-r3 changed 6939 GET_INST_OPCODE(ip) @ extract opcode from rINST 6940 SET_VREG(r0, r9) @ vAA<- r0 6941 GOTO_OPCODE(ip) @ jump to next instruction 6942 /* 10-12 instructions */ 6943 6944 6945 6946/* ------------------------------ */ 6947 .balign 64 6948.L_OP_OR_INT_LIT8: /* 0xde */ 6949/* File: armv5te/OP_OR_INT_LIT8.S */ 6950/* File: armv5te/binopLit8.S */ 6951 /* 6952 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6953 * that specifies an instruction that performs "result = r0 op r1". 6954 * This could be an ARM instruction or a function call. (If the result 6955 * comes back in a register other than r0, you can override "result".) 6956 * 6957 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6958 * vCC (r1). Useful for integer division and modulus. 6959 * 6960 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 6961 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 6962 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 6963 */ 6964 /* binop/lit8 vAA, vBB, #+CC */ 6965 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 6966 mov r9, rINST, lsr #8 @ r9<- AA 6967 and r2, r3, #255 @ r2<- BB 6968 GET_VREG(r0, r2) @ r0<- vBB 6969 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 6970 .if 0 6971 @cmp r1, #0 @ is second operand zero? 6972 beq common_errDivideByZero 6973 .endif 6974 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6975 6976 @ optional op; may set condition codes 6977 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6978 GET_INST_OPCODE(ip) @ extract opcode from rINST 6979 SET_VREG(r0, r9) @ vAA<- r0 6980 GOTO_OPCODE(ip) @ jump to next instruction 6981 /* 10-12 instructions */ 6982 6983 6984 6985/* ------------------------------ */ 6986 .balign 64 6987.L_OP_XOR_INT_LIT8: /* 0xdf */ 6988/* File: armv5te/OP_XOR_INT_LIT8.S */ 6989/* File: armv5te/binopLit8.S */ 6990 /* 6991 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 6992 * that specifies an instruction that performs "result = r0 op r1". 6993 * This could be an ARM instruction or a function call. (If the result 6994 * comes back in a register other than r0, you can override "result".) 6995 * 6996 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6997 * vCC (r1). Useful for integer division and modulus. 6998 * 6999 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7000 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7001 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7002 */ 7003 /* binop/lit8 vAA, vBB, #+CC */ 7004 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7005 mov r9, rINST, lsr #8 @ r9<- AA 7006 and r2, r3, #255 @ r2<- BB 7007 GET_VREG(r0, r2) @ r0<- vBB 7008 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7009 .if 0 7010 @cmp r1, #0 @ is second operand zero? 7011 beq common_errDivideByZero 7012 .endif 7013 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7014 7015 @ optional op; may set condition codes 7016 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7017 GET_INST_OPCODE(ip) @ extract opcode from rINST 7018 SET_VREG(r0, r9) @ vAA<- r0 7019 GOTO_OPCODE(ip) @ jump to next instruction 7020 /* 10-12 instructions */ 7021 7022 7023 7024/* ------------------------------ */ 7025 .balign 64 7026.L_OP_SHL_INT_LIT8: /* 0xe0 */ 7027/* File: armv5te/OP_SHL_INT_LIT8.S */ 7028/* File: armv5te/binopLit8.S */ 7029 /* 7030 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7031 * that specifies an instruction that performs "result = r0 op r1". 7032 * This could be an ARM instruction or a function call. (If the result 7033 * comes back in a register other than r0, you can override "result".) 7034 * 7035 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7036 * vCC (r1). Useful for integer division and modulus. 7037 * 7038 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7039 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7040 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7041 */ 7042 /* binop/lit8 vAA, vBB, #+CC */ 7043 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7044 mov r9, rINST, lsr #8 @ r9<- AA 7045 and r2, r3, #255 @ r2<- BB 7046 GET_VREG(r0, r2) @ r0<- vBB 7047 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7048 .if 0 7049 @cmp r1, #0 @ is second operand zero? 7050 beq common_errDivideByZero 7051 .endif 7052 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7053 7054 and r1, r1, #31 @ optional op; may set condition codes 7055 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7056 GET_INST_OPCODE(ip) @ extract opcode from rINST 7057 SET_VREG(r0, r9) @ vAA<- r0 7058 GOTO_OPCODE(ip) @ jump to next instruction 7059 /* 10-12 instructions */ 7060 7061 7062 7063/* ------------------------------ */ 7064 .balign 64 7065.L_OP_SHR_INT_LIT8: /* 0xe1 */ 7066/* File: armv5te/OP_SHR_INT_LIT8.S */ 7067/* File: armv5te/binopLit8.S */ 7068 /* 7069 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7070 * that specifies an instruction that performs "result = r0 op r1". 7071 * This could be an ARM instruction or a function call. (If the result 7072 * comes back in a register other than r0, you can override "result".) 7073 * 7074 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7075 * vCC (r1). Useful for integer division and modulus. 7076 * 7077 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7078 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7079 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7080 */ 7081 /* binop/lit8 vAA, vBB, #+CC */ 7082 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7083 mov r9, rINST, lsr #8 @ r9<- AA 7084 and r2, r3, #255 @ r2<- BB 7085 GET_VREG(r0, r2) @ r0<- vBB 7086 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7087 .if 0 7088 @cmp r1, #0 @ is second operand zero? 7089 beq common_errDivideByZero 7090 .endif 7091 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7092 7093 and r1, r1, #31 @ optional op; may set condition codes 7094 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7095 GET_INST_OPCODE(ip) @ extract opcode from rINST 7096 SET_VREG(r0, r9) @ vAA<- r0 7097 GOTO_OPCODE(ip) @ jump to next instruction 7098 /* 10-12 instructions */ 7099 7100 7101 7102/* ------------------------------ */ 7103 .balign 64 7104.L_OP_USHR_INT_LIT8: /* 0xe2 */ 7105/* File: armv5te/OP_USHR_INT_LIT8.S */ 7106/* File: armv5te/binopLit8.S */ 7107 /* 7108 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7109 * that specifies an instruction that performs "result = r0 op r1". 7110 * This could be an ARM instruction or a function call. (If the result 7111 * comes back in a register other than r0, you can override "result".) 7112 * 7113 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7114 * vCC (r1). Useful for integer division and modulus. 7115 * 7116 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7117 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7118 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7119 */ 7120 /* binop/lit8 vAA, vBB, #+CC */ 7121 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7122 mov r9, rINST, lsr #8 @ r9<- AA 7123 and r2, r3, #255 @ r2<- BB 7124 GET_VREG(r0, r2) @ r0<- vBB 7125 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7126 .if 0 7127 @cmp r1, #0 @ is second operand zero? 7128 beq common_errDivideByZero 7129 .endif 7130 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7131 7132 and r1, r1, #31 @ optional op; may set condition codes 7133 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7134 GET_INST_OPCODE(ip) @ extract opcode from rINST 7135 SET_VREG(r0, r9) @ vAA<- r0 7136 GOTO_OPCODE(ip) @ jump to next instruction 7137 /* 10-12 instructions */ 7138 7139 7140 7141/* ------------------------------ */ 7142 .balign 64 7143.L_OP_UNUSED_E3: /* 0xe3 */ 7144/* File: armv5te/OP_UNUSED_E3.S */ 7145/* File: armv5te/unused.S */ 7146 bl common_abort 7147 7148 7149 7150/* ------------------------------ */ 7151 .balign 64 7152.L_OP_UNUSED_E4: /* 0xe4 */ 7153/* File: armv5te/OP_UNUSED_E4.S */ 7154/* File: armv5te/unused.S */ 7155 bl common_abort 7156 7157 7158 7159/* ------------------------------ */ 7160 .balign 64 7161.L_OP_UNUSED_E5: /* 0xe5 */ 7162/* File: armv5te/OP_UNUSED_E5.S */ 7163/* File: armv5te/unused.S */ 7164 bl common_abort 7165 7166 7167 7168/* ------------------------------ */ 7169 .balign 64 7170.L_OP_UNUSED_E6: /* 0xe6 */ 7171/* File: armv5te/OP_UNUSED_E6.S */ 7172/* File: armv5te/unused.S */ 7173 bl common_abort 7174 7175 7176 7177/* ------------------------------ */ 7178 .balign 64 7179.L_OP_UNUSED_E7: /* 0xe7 */ 7180/* File: armv5te/OP_UNUSED_E7.S */ 7181/* File: armv5te/unused.S */ 7182 bl common_abort 7183 7184 7185 7186/* ------------------------------ */ 7187 .balign 64 7188.L_OP_UNUSED_E8: /* 0xe8 */ 7189/* File: armv5te/OP_UNUSED_E8.S */ 7190/* File: armv5te/unused.S */ 7191 bl common_abort 7192 7193 7194 7195/* ------------------------------ */ 7196 .balign 64 7197.L_OP_UNUSED_E9: /* 0xe9 */ 7198/* File: armv5te/OP_UNUSED_E9.S */ 7199/* File: armv5te/unused.S */ 7200 bl common_abort 7201 7202 7203 7204/* ------------------------------ */ 7205 .balign 64 7206.L_OP_UNUSED_EA: /* 0xea */ 7207/* File: armv5te/OP_UNUSED_EA.S */ 7208/* File: armv5te/unused.S */ 7209 bl common_abort 7210 7211 7212 7213/* ------------------------------ */ 7214 .balign 64 7215.L_OP_UNUSED_EB: /* 0xeb */ 7216/* File: armv5te/OP_UNUSED_EB.S */ 7217/* File: armv5te/unused.S */ 7218 bl common_abort 7219 7220 7221 7222/* ------------------------------ */ 7223 .balign 64 7224.L_OP_BREAKPOINT: /* 0xec */ 7225/* File: armv5te/OP_BREAKPOINT.S */ 7226/* File: armv5te/unused.S */ 7227 bl common_abort 7228 7229 7230 7231/* ------------------------------ */ 7232 .balign 64 7233.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7234/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7235 /* 7236 * Handle a throw-verification-error instruction. This throws an 7237 * exception for an error discovered during verification. The 7238 * exception is indicated by AA, with some detail provided by BBBB. 7239 */ 7240 /* op AA, ref@BBBB */ 7241 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7242 FETCH(r2, 1) @ r2<- BBBB 7243 EXPORT_PC() @ export the PC 7244 mov r1, rINST, lsr #8 @ r1<- AA 7245 bl dvmThrowVerificationError @ always throws 7246 b common_exceptionThrown @ handle exception 7247 7248 7249/* ------------------------------ */ 7250 .balign 64 7251.L_OP_EXECUTE_INLINE: /* 0xee */ 7252/* File: armv5te/OP_EXECUTE_INLINE.S */ 7253 /* 7254 * Execute a "native inline" instruction. 7255 * 7256 * We need to call an InlineOp4Func: 7257 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7258 * 7259 * The first four args are in r0-r3, pointer to return value storage 7260 * is on the stack. The function's return value is a flag that tells 7261 * us if an exception was thrown. 7262 */ 7263 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7264 FETCH(r10, 1) @ r10<- BBBB 7265 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7266 EXPORT_PC() @ can throw 7267 sub sp, sp, #8 @ make room for arg, +64 bit align 7268 mov r0, rINST, lsr #12 @ r0<- B 7269 str r1, [sp] @ push &glue->retval 7270 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7271 add sp, sp, #8 @ pop stack 7272 cmp r0, #0 @ test boolean result of inline 7273 beq common_exceptionThrown @ returned false, handle exception 7274 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7275 GET_INST_OPCODE(ip) @ extract opcode from rINST 7276 GOTO_OPCODE(ip) @ jump to next instruction 7277 7278/* ------------------------------ */ 7279 .balign 64 7280.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7281/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7282 /* 7283 * Execute a "native inline" instruction, using "/range" semantics. 7284 * Same idea as execute-inline, but we get the args differently. 7285 * 7286 * We need to call an InlineOp4Func: 7287 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7288 * 7289 * The first four args are in r0-r3, pointer to return value storage 7290 * is on the stack. The function's return value is a flag that tells 7291 * us if an exception was thrown. 7292 */ 7293 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7294 FETCH(r10, 1) @ r10<- BBBB 7295 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7296 EXPORT_PC() @ can throw 7297 sub sp, sp, #8 @ make room for arg, +64 bit align 7298 mov r0, rINST, lsr #8 @ r0<- AA 7299 str r1, [sp] @ push &glue->retval 7300 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7301 add sp, sp, #8 @ pop stack 7302 cmp r0, #0 @ test boolean result of inline 7303 beq common_exceptionThrown @ returned false, handle exception 7304 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7305 GET_INST_OPCODE(ip) @ extract opcode from rINST 7306 GOTO_OPCODE(ip) @ jump to next instruction 7307 7308/* ------------------------------ */ 7309 .balign 64 7310.L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7311/* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7312 /* 7313 * invoke-direct-empty is a no-op in a "standard" interpreter. 7314 */ 7315 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7316 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7317 GOTO_OPCODE(ip) @ execute it 7318 7319/* ------------------------------ */ 7320 .balign 64 7321.L_OP_UNUSED_F1: /* 0xf1 */ 7322/* File: armv5te/OP_UNUSED_F1.S */ 7323/* File: armv5te/unused.S */ 7324 bl common_abort 7325 7326 7327 7328/* ------------------------------ */ 7329 .balign 64 7330.L_OP_IGET_QUICK: /* 0xf2 */ 7331/* File: armv6t2/OP_IGET_QUICK.S */ 7332 /* For: iget-quick, iget-object-quick */ 7333 /* op vA, vB, offset@CCCC */ 7334 mov r2, rINST, lsr #12 @ r2<- B 7335 FETCH(r1, 1) @ r1<- field byte offset 7336 GET_VREG(r3, r2) @ r3<- object we're operating on 7337 ubfx r2, rINST, #8, #4 @ r2<- A 7338 cmp r3, #0 @ check object for null 7339 beq common_errNullObject @ object was null 7340 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7341 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7342 GET_INST_OPCODE(ip) @ extract opcode from rINST 7343 SET_VREG(r0, r2) @ fp[A]<- r0 7344 GOTO_OPCODE(ip) @ jump to next instruction 7345 7346 7347/* ------------------------------ */ 7348 .balign 64 7349.L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7350/* File: armv6t2/OP_IGET_WIDE_QUICK.S */ 7351 /* iget-wide-quick vA, vB, offset@CCCC */ 7352 mov r2, rINST, lsr #12 @ r2<- B 7353 FETCH(r1, 1) @ r1<- field byte offset 7354 GET_VREG(r3, r2) @ r3<- object we're operating on 7355 ubfx r2, rINST, #8, #4 @ r2<- A 7356 cmp r3, #0 @ check object for null 7357 beq common_errNullObject @ object was null 7358 ldrd r0, [r3, r1] @ r0<- obj.field (64 bits, aligned) 7359 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7360 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7361 GET_INST_OPCODE(ip) @ extract opcode from rINST 7362 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7363 GOTO_OPCODE(ip) @ jump to next instruction 7364 7365 7366/* ------------------------------ */ 7367 .balign 64 7368.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7369/* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7370/* File: armv5te/OP_IGET_QUICK.S */ 7371 /* For: iget-quick, iget-object-quick */ 7372 /* op vA, vB, offset@CCCC */ 7373 mov r2, rINST, lsr #12 @ r2<- B 7374 GET_VREG(r3, r2) @ r3<- object we're operating on 7375 FETCH(r1, 1) @ r1<- field byte offset 7376 cmp r3, #0 @ check object for null 7377 mov r2, rINST, lsr #8 @ r2<- A(+) 7378 beq common_errNullObject @ object was null 7379 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7380 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7381 and r2, r2, #15 7382 GET_INST_OPCODE(ip) @ extract opcode from rINST 7383 SET_VREG(r0, r2) @ fp[A]<- r0 7384 GOTO_OPCODE(ip) @ jump to next instruction 7385 7386 7387 7388/* ------------------------------ */ 7389 .balign 64 7390.L_OP_IPUT_QUICK: /* 0xf5 */ 7391/* File: armv6t2/OP_IPUT_QUICK.S */ 7392 /* For: iput-quick, iput-object-quick */ 7393 /* op vA, vB, offset@CCCC */ 7394 mov r2, rINST, lsr #12 @ r2<- B 7395 FETCH(r1, 1) @ r1<- field byte offset 7396 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7397 ubfx r2, rINST, #8, #4 @ r2<- A 7398 cmp r3, #0 @ check object for null 7399 beq common_errNullObject @ object was null 7400 GET_VREG(r0, r2) @ r0<- fp[A] 7401 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7402 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7403 GET_INST_OPCODE(ip) @ extract opcode from rINST 7404 GOTO_OPCODE(ip) @ jump to next instruction 7405 7406 7407/* ------------------------------ */ 7408 .balign 64 7409.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7410/* File: armv6t2/OP_IPUT_WIDE_QUICK.S */ 7411 /* iput-wide-quick vA, vB, offset@CCCC */ 7412 mov r1, rINST, lsr #12 @ r1<- B 7413 ubfx r0, rINST, #8, #4 @ r0<- A 7414 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7415 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7416 cmp r2, #0 @ check object for null 7417 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7418 beq common_errNullObject @ object was null 7419 FETCH(r3, 1) @ r3<- field byte offset 7420 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7421 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7422 GET_INST_OPCODE(ip) @ extract opcode from rINST 7423 GOTO_OPCODE(ip) @ jump to next instruction 7424 7425 7426/* ------------------------------ */ 7427 .balign 64 7428.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7429/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7430/* File: armv5te/OP_IPUT_QUICK.S */ 7431 /* For: iput-quick, iput-object-quick */ 7432 /* op vA, vB, offset@CCCC */ 7433 mov r2, rINST, lsr #12 @ r2<- B 7434 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7435 FETCH(r1, 1) @ r1<- field byte offset 7436 cmp r3, #0 @ check object for null 7437 mov r2, rINST, lsr #8 @ r2<- A(+) 7438 beq common_errNullObject @ object was null 7439 and r2, r2, #15 7440 GET_VREG(r0, r2) @ r0<- fp[A] 7441 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7442 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7443 GET_INST_OPCODE(ip) @ extract opcode from rINST 7444 GOTO_OPCODE(ip) @ jump to next instruction 7445 7446 7447 7448/* ------------------------------ */ 7449 .balign 64 7450.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7451/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7452 /* 7453 * Handle an optimized virtual method call. 7454 * 7455 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7456 */ 7457 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7458 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7459 FETCH(r3, 2) @ r3<- FEDC or CCCC 7460 FETCH(r1, 1) @ r1<- BBBB 7461 .if (!0) 7462 and r3, r3, #15 @ r3<- C (or stays CCCC) 7463 .endif 7464 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7465 cmp r2, #0 @ is "this" null? 7466 beq common_errNullObject @ null "this", throw exception 7467 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7468 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7469 EXPORT_PC() @ invoke must export 7470 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7471 bl common_invokeMethodNoRange @ continue on 7472 7473/* ------------------------------ */ 7474 .balign 64 7475.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7476/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7477/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7478 /* 7479 * Handle an optimized virtual method call. 7480 * 7481 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7482 */ 7483 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7484 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7485 FETCH(r3, 2) @ r3<- FEDC or CCCC 7486 FETCH(r1, 1) @ r1<- BBBB 7487 .if (!1) 7488 and r3, r3, #15 @ r3<- C (or stays CCCC) 7489 .endif 7490 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7491 cmp r2, #0 @ is "this" null? 7492 beq common_errNullObject @ null "this", throw exception 7493 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7494 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7495 EXPORT_PC() @ invoke must export 7496 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7497 bl common_invokeMethodRange @ continue on 7498 7499 7500/* ------------------------------ */ 7501 .balign 64 7502.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7503/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7504 /* 7505 * Handle an optimized "super" method call. 7506 * 7507 * for: [opt] invoke-super-quick, invoke-super-quick/range 7508 */ 7509 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7510 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7511 FETCH(r10, 2) @ r10<- GFED or CCCC 7512 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7513 .if (!0) 7514 and r10, r10, #15 @ r10<- D (or stays CCCC) 7515 .endif 7516 FETCH(r1, 1) @ r1<- BBBB 7517 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7518 EXPORT_PC() @ must export for invoke 7519 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7520 GET_VREG(r3, r10) @ r3<- "this" 7521 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7522 cmp r3, #0 @ null "this" ref? 7523 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7524 beq common_errNullObject @ "this" is null, throw exception 7525 bl common_invokeMethodNoRange @ continue on 7526 7527 7528/* ------------------------------ */ 7529 .balign 64 7530.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7531/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7532/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7533 /* 7534 * Handle an optimized "super" method call. 7535 * 7536 * for: [opt] invoke-super-quick, invoke-super-quick/range 7537 */ 7538 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7539 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7540 FETCH(r10, 2) @ r10<- GFED or CCCC 7541 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7542 .if (!1) 7543 and r10, r10, #15 @ r10<- D (or stays CCCC) 7544 .endif 7545 FETCH(r1, 1) @ r1<- BBBB 7546 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7547 EXPORT_PC() @ must export for invoke 7548 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7549 GET_VREG(r3, r10) @ r3<- "this" 7550 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7551 cmp r3, #0 @ null "this" ref? 7552 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7553 beq common_errNullObject @ "this" is null, throw exception 7554 bl common_invokeMethodRange @ continue on 7555 7556 7557 7558/* ------------------------------ */ 7559 .balign 64 7560.L_OP_UNUSED_FC: /* 0xfc */ 7561/* File: armv5te/OP_UNUSED_FC.S */ 7562/* File: armv5te/unused.S */ 7563 bl common_abort 7564 7565 7566 7567/* ------------------------------ */ 7568 .balign 64 7569.L_OP_UNUSED_FD: /* 0xfd */ 7570/* File: armv5te/OP_UNUSED_FD.S */ 7571/* File: armv5te/unused.S */ 7572 bl common_abort 7573 7574 7575 7576/* ------------------------------ */ 7577 .balign 64 7578.L_OP_UNUSED_FE: /* 0xfe */ 7579/* File: armv5te/OP_UNUSED_FE.S */ 7580/* File: armv5te/unused.S */ 7581 bl common_abort 7582 7583 7584 7585/* ------------------------------ */ 7586 .balign 64 7587.L_OP_UNUSED_FF: /* 0xff */ 7588/* File: armv5te/OP_UNUSED_FF.S */ 7589/* File: armv5te/unused.S */ 7590 bl common_abort 7591 7592 7593 7594 7595 .balign 64 7596 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 7597 .global dvmAsmInstructionEnd 7598dvmAsmInstructionEnd: 7599 7600/* 7601 * =========================================================================== 7602 * Sister implementations 7603 * =========================================================================== 7604 */ 7605 .global dvmAsmSisterStart 7606 .type dvmAsmSisterStart, %function 7607 .text 7608 .balign 4 7609dvmAsmSisterStart: 7610 7611/* continuation for OP_CONST_STRING */ 7612 7613 /* 7614 * Continuation if the String has not yet been resolved. 7615 * r1: BBBB (String ref) 7616 * r9: target register 7617 */ 7618.LOP_CONST_STRING_resolve: 7619 EXPORT_PC() 7620 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7621 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7622 bl dvmResolveString @ r0<- String reference 7623 cmp r0, #0 @ failed? 7624 beq common_exceptionThrown @ yup, handle the exception 7625 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7626 GET_INST_OPCODE(ip) @ extract opcode from rINST 7627 SET_VREG(r0, r9) @ vAA<- r0 7628 GOTO_OPCODE(ip) @ jump to next instruction 7629 7630 7631/* continuation for OP_CONST_STRING_JUMBO */ 7632 7633 /* 7634 * Continuation if the String has not yet been resolved. 7635 * r1: BBBBBBBB (String ref) 7636 * r9: target register 7637 */ 7638.LOP_CONST_STRING_JUMBO_resolve: 7639 EXPORT_PC() 7640 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7641 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7642 bl dvmResolveString @ r0<- String reference 7643 cmp r0, #0 @ failed? 7644 beq common_exceptionThrown @ yup, handle the exception 7645 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7646 GET_INST_OPCODE(ip) @ extract opcode from rINST 7647 SET_VREG(r0, r9) @ vAA<- r0 7648 GOTO_OPCODE(ip) @ jump to next instruction 7649 7650 7651/* continuation for OP_CONST_CLASS */ 7652 7653 /* 7654 * Continuation if the Class has not yet been resolved. 7655 * r1: BBBB (Class ref) 7656 * r9: target register 7657 */ 7658.LOP_CONST_CLASS_resolve: 7659 EXPORT_PC() 7660 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7661 mov r2, #1 @ r2<- true 7662 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7663 bl dvmResolveClass @ r0<- Class reference 7664 cmp r0, #0 @ failed? 7665 beq common_exceptionThrown @ yup, handle the exception 7666 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7667 GET_INST_OPCODE(ip) @ extract opcode from rINST 7668 SET_VREG(r0, r9) @ vAA<- r0 7669 GOTO_OPCODE(ip) @ jump to next instruction 7670 7671 7672/* continuation for OP_CHECK_CAST */ 7673 7674 /* 7675 * Trivial test failed, need to perform full check. This is common. 7676 * r0 holds obj->clazz 7677 * r1 holds class resolved from BBBB 7678 * r9 holds object 7679 */ 7680.LOP_CHECK_CAST_fullcheck: 7681 bl dvmInstanceofNonTrivial @ r0<- boolean result 7682 cmp r0, #0 @ failed? 7683 bne .LOP_CHECK_CAST_okay @ no, success 7684 7685 @ A cast has failed. We need to throw a ClassCastException with the 7686 @ class of the object that failed to be cast. 7687 EXPORT_PC() @ about to throw 7688 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 7689 ldr r0, .LstrClassCastExceptionPtr 7690 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 7691 bl dvmThrowExceptionWithClassMessage 7692 b common_exceptionThrown 7693 7694 /* 7695 * Resolution required. This is the least-likely path. 7696 * 7697 * r2 holds BBBB 7698 * r9 holds object 7699 */ 7700.LOP_CHECK_CAST_resolve: 7701 EXPORT_PC() @ resolve() could throw 7702 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7703 mov r1, r2 @ r1<- BBBB 7704 mov r2, #0 @ r2<- false 7705 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7706 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7707 cmp r0, #0 @ got null? 7708 beq common_exceptionThrown @ yes, handle exception 7709 mov r1, r0 @ r1<- class resolved from BBB 7710 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 7711 b .LOP_CHECK_CAST_resolved @ pick up where we left off 7712 7713.LstrClassCastExceptionPtr: 7714 .word .LstrClassCastException 7715 7716 7717/* continuation for OP_INSTANCE_OF */ 7718 7719 /* 7720 * Trivial test failed, need to perform full check. This is common. 7721 * r0 holds obj->clazz 7722 * r1 holds class resolved from BBBB 7723 * r9 holds A 7724 */ 7725.LOP_INSTANCE_OF_fullcheck: 7726 bl dvmInstanceofNonTrivial @ r0<- boolean result 7727 @ fall through to OP_INSTANCE_OF_store 7728 7729 /* 7730 * r0 holds boolean result 7731 * r9 holds A 7732 */ 7733.LOP_INSTANCE_OF_store: 7734 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7735 SET_VREG(r0, r9) @ vA<- r0 7736 GET_INST_OPCODE(ip) @ extract opcode from rINST 7737 GOTO_OPCODE(ip) @ jump to next instruction 7738 7739 /* 7740 * Trivial test succeeded, save and bail. 7741 * r9 holds A 7742 */ 7743.LOP_INSTANCE_OF_trivial: 7744 mov r0, #1 @ indicate success 7745 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 7746 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7747 SET_VREG(r0, r9) @ vA<- r0 7748 GET_INST_OPCODE(ip) @ extract opcode from rINST 7749 GOTO_OPCODE(ip) @ jump to next instruction 7750 7751 /* 7752 * Resolution required. This is the least-likely path. 7753 * 7754 * r3 holds BBBB 7755 * r9 holds A 7756 */ 7757.LOP_INSTANCE_OF_resolve: 7758 EXPORT_PC() @ resolve() could throw 7759 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7760 mov r1, r3 @ r1<- BBBB 7761 mov r2, #1 @ r2<- true 7762 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 7763 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7764 cmp r0, #0 @ got null? 7765 beq common_exceptionThrown @ yes, handle exception 7766 mov r1, r0 @ r1<- class resolved from BBB 7767 mov r3, rINST, lsr #12 @ r3<- B 7768 GET_VREG(r0, r3) @ r0<- vB (object) 7769 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 7770 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 7771 7772 7773/* continuation for OP_NEW_INSTANCE */ 7774 7775 .balign 32 @ minimize cache lines 7776.LOP_NEW_INSTANCE_finish: @ r0=new object 7777 mov r3, rINST, lsr #8 @ r3<- AA 7778 cmp r0, #0 @ failed? 7779 beq common_exceptionThrown @ yes, handle the exception 7780 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7781 GET_INST_OPCODE(ip) @ extract opcode from rINST 7782 SET_VREG(r0, r3) @ vAA<- r0 7783 GOTO_OPCODE(ip) @ jump to next instruction 7784 7785 /* 7786 * Class initialization required. 7787 * 7788 * r0 holds class object 7789 */ 7790.LOP_NEW_INSTANCE_needinit: 7791 mov r9, r0 @ save r0 7792 bl dvmInitClass @ initialize class 7793 cmp r0, #0 @ check boolean result 7794 mov r0, r9 @ restore r0 7795 bne .LOP_NEW_INSTANCE_initialized @ success, continue 7796 b common_exceptionThrown @ failed, deal with init exception 7797 7798 /* 7799 * Resolution required. This is the least-likely path. 7800 * 7801 * r1 holds BBBB 7802 */ 7803.LOP_NEW_INSTANCE_resolve: 7804 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7805 mov r2, #0 @ r2<- false 7806 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7807 bl dvmResolveClass @ r0<- resolved ClassObject ptr 7808 cmp r0, #0 @ got null? 7809 bne .LOP_NEW_INSTANCE_resolved @ no, continue 7810 b common_exceptionThrown @ yes, handle exception 7811 7812.LstrInstantiationErrorPtr: 7813 .word .LstrInstantiationError 7814 7815 7816/* continuation for OP_NEW_ARRAY */ 7817 7818 7819 /* 7820 * Resolve class. (This is an uncommon case.) 7821 * 7822 * r1 holds array length 7823 * r2 holds class ref CCCC 7824 */ 7825.LOP_NEW_ARRAY_resolve: 7826 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 7827 mov r9, r1 @ r9<- length (save) 7828 mov r1, r2 @ r1<- CCCC 7829 mov r2, #0 @ r2<- false 7830 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 7831 bl dvmResolveClass @ r0<- call(clazz, ref) 7832 cmp r0, #0 @ got null? 7833 mov r1, r9 @ r1<- length (restore) 7834 beq common_exceptionThrown @ yes, handle exception 7835 @ fall through to OP_NEW_ARRAY_finish 7836 7837 /* 7838 * Finish allocation. 7839 * 7840 * r0 holds class 7841 * r1 holds array length 7842 */ 7843.LOP_NEW_ARRAY_finish: 7844 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 7845 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 7846 cmp r0, #0 @ failed? 7847 mov r2, rINST, lsr #8 @ r2<- A+ 7848 beq common_exceptionThrown @ yes, handle the exception 7849 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7850 and r2, r2, #15 @ r2<- A 7851 GET_INST_OPCODE(ip) @ extract opcode from rINST 7852 SET_VREG(r0, r2) @ vA<- r0 7853 GOTO_OPCODE(ip) @ jump to next instruction 7854 7855 7856/* continuation for OP_FILLED_NEW_ARRAY */ 7857 7858 /* 7859 * On entry: 7860 * r0 holds array class 7861 * r10 holds AA or BA 7862 */ 7863.LOP_FILLED_NEW_ARRAY_continue: 7864 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7865 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7866 ldrb r3, [r3, #1] @ r3<- descriptor[1] 7867 .if 0 7868 mov r1, r10 @ r1<- AA (length) 7869 .else 7870 mov r1, r10, lsr #4 @ r1<- B (length) 7871 .endif 7872 cmp r3, #'I' @ array of ints? 7873 cmpne r3, #'L' @ array of objects? 7874 cmpne r3, #'[' @ array of arrays? 7875 mov r9, r1 @ save length in r9 7876 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 7877 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7878 cmp r0, #0 @ null return? 7879 beq common_exceptionThrown @ alloc failed, handle exception 7880 7881 FETCH(r1, 2) @ r1<- FEDC or CCCC 7882 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 7883 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 7884 subs r9, r9, #1 @ length--, check for neg 7885 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7886 bmi 2f @ was zero, bail 7887 7888 @ copy values from registers into the array 7889 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 7890 .if 0 7891 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 78921: ldr r3, [r2], #4 @ r3<- *r2++ 7893 subs r9, r9, #1 @ count-- 7894 str r3, [r0], #4 @ *contents++ = vX 7895 bpl 1b 7896 @ continue at 2 7897 .else 7898 cmp r9, #4 @ length was initially 5? 7899 and r2, r10, #15 @ r2<- A 7900 bne 1f @ <= 4 args, branch 7901 GET_VREG(r3, r2) @ r3<- vA 7902 sub r9, r9, #1 @ count-- 7903 str r3, [r0, #16] @ contents[4] = vA 79041: and r2, r1, #15 @ r2<- F/E/D/C 7905 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 7906 mov r1, r1, lsr #4 @ r1<- next reg in low 4 7907 subs r9, r9, #1 @ count-- 7908 str r3, [r0], #4 @ *contents++ = vX 7909 bpl 1b 7910 @ continue at 2 7911 .endif 7912 79132: 7914 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7915 GOTO_OPCODE(ip) @ execute it 7916 7917 /* 7918 * Throw an exception indicating that we have not implemented this 7919 * mode of filled-new-array. 7920 */ 7921.LOP_FILLED_NEW_ARRAY_notimpl: 7922 ldr r0, .L_strInternalError 7923 ldr r1, .L_strFilledNewArrayNotImpl 7924 bl dvmThrowException 7925 b common_exceptionThrown 7926 7927 .if (!0) @ define in one or the other, not both 7928.L_strFilledNewArrayNotImpl: 7929 .word .LstrFilledNewArrayNotImpl 7930.L_strInternalError: 7931 .word .LstrInternalError 7932 .endif 7933 7934 7935/* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 7936 7937 /* 7938 * On entry: 7939 * r0 holds array class 7940 * r10 holds AA or BA 7941 */ 7942.LOP_FILLED_NEW_ARRAY_RANGE_continue: 7943 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 7944 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 7945 ldrb r3, [r3, #1] @ r3<- descriptor[1] 7946 .if 1 7947 mov r1, r10 @ r1<- AA (length) 7948 .else 7949 mov r1, r10, lsr #4 @ r1<- B (length) 7950 .endif 7951 cmp r3, #'I' @ array of ints? 7952 cmpne r3, #'L' @ array of objects? 7953 cmpne r3, #'[' @ array of arrays? 7954 mov r9, r1 @ save length in r9 7955 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 7956 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 7957 cmp r0, #0 @ null return? 7958 beq common_exceptionThrown @ alloc failed, handle exception 7959 7960 FETCH(r1, 2) @ r1<- FEDC or CCCC 7961 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 7962 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 7963 subs r9, r9, #1 @ length--, check for neg 7964 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7965 bmi 2f @ was zero, bail 7966 7967 @ copy values from registers into the array 7968 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 7969 .if 1 7970 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 79711: ldr r3, [r2], #4 @ r3<- *r2++ 7972 subs r9, r9, #1 @ count-- 7973 str r3, [r0], #4 @ *contents++ = vX 7974 bpl 1b 7975 @ continue at 2 7976 .else 7977 cmp r9, #4 @ length was initially 5? 7978 and r2, r10, #15 @ r2<- A 7979 bne 1f @ <= 4 args, branch 7980 GET_VREG(r3, r2) @ r3<- vA 7981 sub r9, r9, #1 @ count-- 7982 str r3, [r0, #16] @ contents[4] = vA 79831: and r2, r1, #15 @ r2<- F/E/D/C 7984 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 7985 mov r1, r1, lsr #4 @ r1<- next reg in low 4 7986 subs r9, r9, #1 @ count-- 7987 str r3, [r0], #4 @ *contents++ = vX 7988 bpl 1b 7989 @ continue at 2 7990 .endif 7991 79922: 7993 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7994 GOTO_OPCODE(ip) @ execute it 7995 7996 /* 7997 * Throw an exception indicating that we have not implemented this 7998 * mode of filled-new-array. 7999 */ 8000.LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8001 ldr r0, .L_strInternalError 8002 ldr r1, .L_strFilledNewArrayNotImpl 8003 bl dvmThrowException 8004 b common_exceptionThrown 8005 8006 .if (!1) @ define in one or the other, not both 8007.L_strFilledNewArrayNotImpl: 8008 .word .LstrFilledNewArrayNotImpl 8009.L_strInternalError: 8010 .word .LstrInternalError 8011 .endif 8012 8013 8014/* continuation for OP_CMPL_FLOAT */ 8015.LOP_CMPL_FLOAT_finish: 8016 SET_VREG(r0, r9) @ vAA<- r0 8017 GOTO_OPCODE(ip) @ jump to next instruction 8018 8019 8020/* continuation for OP_CMPG_FLOAT */ 8021.LOP_CMPG_FLOAT_finish: 8022 SET_VREG(r0, r9) @ vAA<- r0 8023 GOTO_OPCODE(ip) @ jump to next instruction 8024 8025 8026/* continuation for OP_CMPL_DOUBLE */ 8027.LOP_CMPL_DOUBLE_finish: 8028 SET_VREG(r0, r9) @ vAA<- r0 8029 GOTO_OPCODE(ip) @ jump to next instruction 8030 8031 8032/* continuation for OP_CMPG_DOUBLE */ 8033.LOP_CMPG_DOUBLE_finish: 8034 SET_VREG(r0, r9) @ vAA<- r0 8035 GOTO_OPCODE(ip) @ jump to next instruction 8036 8037 8038/* continuation for OP_CMP_LONG */ 8039 8040.LOP_CMP_LONG_less: 8041 mvn r1, #0 @ r1<- -1 8042 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8043 @ instead, we just replicate the tail end. 8044 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8045 SET_VREG(r1, r9) @ vAA<- r1 8046 GET_INST_OPCODE(ip) @ extract opcode from rINST 8047 GOTO_OPCODE(ip) @ jump to next instruction 8048 8049.LOP_CMP_LONG_greater: 8050 mov r1, #1 @ r1<- 1 8051 @ fall through to _finish 8052 8053.LOP_CMP_LONG_finish: 8054 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8055 SET_VREG(r1, r9) @ vAA<- r1 8056 GET_INST_OPCODE(ip) @ extract opcode from rINST 8057 GOTO_OPCODE(ip) @ jump to next instruction 8058 8059 8060/* continuation for OP_AGET_WIDE */ 8061 8062.LOP_AGET_WIDE_finish: 8063 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8064 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8065 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8066 GET_INST_OPCODE(ip) @ extract opcode from rINST 8067 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8068 GOTO_OPCODE(ip) @ jump to next instruction 8069 8070 8071/* continuation for OP_APUT_WIDE */ 8072 8073.LOP_APUT_WIDE_finish: 8074 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8075 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8076 GET_INST_OPCODE(ip) @ extract opcode from rINST 8077 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8078 GOTO_OPCODE(ip) @ jump to next instruction 8079 8080 8081/* continuation for OP_APUT_OBJECT */ 8082 /* 8083 * On entry: 8084 * r1 = vBB (arrayObj) 8085 * r9 = vAA (obj) 8086 * r10 = offset into array (vBB + vCC * width) 8087 */ 8088.LOP_APUT_OBJECT_finish: 8089 cmp r9, #0 @ storing null reference? 8090 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8091 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8092 ldr r1, [r1, #offObject_clazz] @ r1<- arrayObj->clazz 8093 bl dvmCanPutArrayElement @ test object type vs. array type 8094 cmp r0, #0 @ okay? 8095 beq common_errArrayStore @ no 8096.LOP_APUT_OBJECT_skip_check: 8097 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8098 GET_INST_OPCODE(ip) @ extract opcode from rINST 8099 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8100 GOTO_OPCODE(ip) @ jump to next instruction 8101 8102 8103/* continuation for OP_IGET */ 8104 8105 /* 8106 * Currently: 8107 * r0 holds resolved field 8108 * r9 holds object 8109 */ 8110.LOP_IGET_finish: 8111 @bl common_squeak0 8112 cmp r9, #0 @ check object for null 8113 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8114 beq common_errNullObject @ object was null 8115 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8116 ubfx r2, rINST, #8, #4 @ r2<- A 8117 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8118 GET_INST_OPCODE(ip) @ extract opcode from rINST 8119 SET_VREG(r0, r2) @ fp[A]<- r0 8120 GOTO_OPCODE(ip) @ jump to next instruction 8121 8122 8123/* continuation for OP_IGET_WIDE */ 8124 8125 /* 8126 * Currently: 8127 * r0 holds resolved field 8128 * r9 holds object 8129 */ 8130.LOP_IGET_WIDE_finish: 8131 cmp r9, #0 @ check object for null 8132 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8133 beq common_errNullObject @ object was null 8134 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8135 ubfx r2, rINST, #8, #4 @ r2<- A 8136 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8137 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8138 GET_INST_OPCODE(ip) @ extract opcode from rINST 8139 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8140 GOTO_OPCODE(ip) @ jump to next instruction 8141 8142 8143/* continuation for OP_IGET_OBJECT */ 8144 8145 /* 8146 * Currently: 8147 * r0 holds resolved field 8148 * r9 holds object 8149 */ 8150.LOP_IGET_OBJECT_finish: 8151 @bl common_squeak0 8152 cmp r9, #0 @ check object for null 8153 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8154 beq common_errNullObject @ object was null 8155 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8156 mov r2, rINST, lsr #8 @ r2<- A+ 8157 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8158 and r2, r2, #15 @ r2<- A 8159 GET_INST_OPCODE(ip) @ extract opcode from rINST 8160 SET_VREG(r0, r2) @ fp[A]<- r0 8161 GOTO_OPCODE(ip) @ jump to next instruction 8162 8163 8164/* continuation for OP_IGET_BOOLEAN */ 8165 8166 /* 8167 * Currently: 8168 * r0 holds resolved field 8169 * r9 holds object 8170 */ 8171.LOP_IGET_BOOLEAN_finish: 8172 @bl common_squeak1 8173 cmp r9, #0 @ check object for null 8174 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8175 beq common_errNullObject @ object was null 8176 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8177 mov r2, rINST, lsr #8 @ r2<- A+ 8178 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8179 and r2, r2, #15 @ r2<- A 8180 GET_INST_OPCODE(ip) @ extract opcode from rINST 8181 SET_VREG(r0, r2) @ fp[A]<- r0 8182 GOTO_OPCODE(ip) @ jump to next instruction 8183 8184 8185/* continuation for OP_IGET_BYTE */ 8186 8187 /* 8188 * Currently: 8189 * r0 holds resolved field 8190 * r9 holds object 8191 */ 8192.LOP_IGET_BYTE_finish: 8193 @bl common_squeak2 8194 cmp r9, #0 @ check object for null 8195 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8196 beq common_errNullObject @ object was null 8197 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8198 mov r2, rINST, lsr #8 @ r2<- A+ 8199 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8200 and r2, r2, #15 @ r2<- A 8201 GET_INST_OPCODE(ip) @ extract opcode from rINST 8202 SET_VREG(r0, r2) @ fp[A]<- r0 8203 GOTO_OPCODE(ip) @ jump to next instruction 8204 8205 8206/* continuation for OP_IGET_CHAR */ 8207 8208 /* 8209 * Currently: 8210 * r0 holds resolved field 8211 * r9 holds object 8212 */ 8213.LOP_IGET_CHAR_finish: 8214 @bl common_squeak3 8215 cmp r9, #0 @ check object for null 8216 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8217 beq common_errNullObject @ object was null 8218 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8219 mov r2, rINST, lsr #8 @ r2<- A+ 8220 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8221 and r2, r2, #15 @ r2<- A 8222 GET_INST_OPCODE(ip) @ extract opcode from rINST 8223 SET_VREG(r0, r2) @ fp[A]<- r0 8224 GOTO_OPCODE(ip) @ jump to next instruction 8225 8226 8227/* continuation for OP_IGET_SHORT */ 8228 8229 /* 8230 * Currently: 8231 * r0 holds resolved field 8232 * r9 holds object 8233 */ 8234.LOP_IGET_SHORT_finish: 8235 @bl common_squeak4 8236 cmp r9, #0 @ check object for null 8237 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8238 beq common_errNullObject @ object was null 8239 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8240 mov r2, rINST, lsr #8 @ r2<- A+ 8241 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8242 and r2, r2, #15 @ r2<- A 8243 GET_INST_OPCODE(ip) @ extract opcode from rINST 8244 SET_VREG(r0, r2) @ fp[A]<- r0 8245 GOTO_OPCODE(ip) @ jump to next instruction 8246 8247 8248/* continuation for OP_IPUT */ 8249 8250 /* 8251 * Currently: 8252 * r0 holds resolved field 8253 * r9 holds object 8254 */ 8255.LOP_IPUT_finish: 8256 @bl common_squeak0 8257 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8258 ubfx r1, rINST, #8, #4 @ r1<- A 8259 cmp r9, #0 @ check object for null 8260 GET_VREG(r0, r1) @ r0<- fp[A] 8261 beq common_errNullObject @ object was null 8262 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8263 GET_INST_OPCODE(ip) @ extract opcode from rINST 8264 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8265 GOTO_OPCODE(ip) @ jump to next instruction 8266 8267 8268/* continuation for OP_IPUT_WIDE */ 8269 8270 /* 8271 * Currently: 8272 * r0 holds resolved field 8273 * r9 holds object 8274 */ 8275.LOP_IPUT_WIDE_finish: 8276 ubfx r2, rINST, #8, #4 @ r2<- A 8277 cmp r9, #0 @ check object for null 8278 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8279 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8280 beq common_errNullObject @ object was null 8281 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8282 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8283 GET_INST_OPCODE(ip) @ extract opcode from rINST 8284 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0 8285 GOTO_OPCODE(ip) @ jump to next instruction 8286 8287 8288/* continuation for OP_IPUT_OBJECT */ 8289 8290 /* 8291 * Currently: 8292 * r0 holds resolved field 8293 * r9 holds object 8294 */ 8295.LOP_IPUT_OBJECT_finish: 8296 @bl common_squeak0 8297 mov r1, rINST, lsr #8 @ r1<- A+ 8298 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8299 and r1, r1, #15 @ r1<- A 8300 cmp r9, #0 @ check object for null 8301 GET_VREG(r0, r1) @ r0<- fp[A] 8302 beq common_errNullObject @ object was null 8303 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8304 GET_INST_OPCODE(ip) @ extract opcode from rINST 8305 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8306 GOTO_OPCODE(ip) @ jump to next instruction 8307 8308 8309/* continuation for OP_IPUT_BOOLEAN */ 8310 8311 /* 8312 * Currently: 8313 * r0 holds resolved field 8314 * r9 holds object 8315 */ 8316.LOP_IPUT_BOOLEAN_finish: 8317 @bl common_squeak1 8318 mov r1, rINST, lsr #8 @ r1<- A+ 8319 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8320 and r1, r1, #15 @ r1<- A 8321 cmp r9, #0 @ check object for null 8322 GET_VREG(r0, r1) @ r0<- fp[A] 8323 beq common_errNullObject @ object was null 8324 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8325 GET_INST_OPCODE(ip) @ extract opcode from rINST 8326 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8327 GOTO_OPCODE(ip) @ jump to next instruction 8328 8329 8330/* continuation for OP_IPUT_BYTE */ 8331 8332 /* 8333 * Currently: 8334 * r0 holds resolved field 8335 * r9 holds object 8336 */ 8337.LOP_IPUT_BYTE_finish: 8338 @bl common_squeak2 8339 mov r1, rINST, lsr #8 @ r1<- A+ 8340 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8341 and r1, r1, #15 @ r1<- A 8342 cmp r9, #0 @ check object for null 8343 GET_VREG(r0, r1) @ r0<- fp[A] 8344 beq common_errNullObject @ object was null 8345 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8346 GET_INST_OPCODE(ip) @ extract opcode from rINST 8347 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8348 GOTO_OPCODE(ip) @ jump to next instruction 8349 8350 8351/* continuation for OP_IPUT_CHAR */ 8352 8353 /* 8354 * Currently: 8355 * r0 holds resolved field 8356 * r9 holds object 8357 */ 8358.LOP_IPUT_CHAR_finish: 8359 @bl common_squeak3 8360 mov r1, rINST, lsr #8 @ r1<- A+ 8361 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8362 and r1, r1, #15 @ r1<- A 8363 cmp r9, #0 @ check object for null 8364 GET_VREG(r0, r1) @ r0<- fp[A] 8365 beq common_errNullObject @ object was null 8366 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8367 GET_INST_OPCODE(ip) @ extract opcode from rINST 8368 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8369 GOTO_OPCODE(ip) @ jump to next instruction 8370 8371 8372/* continuation for OP_IPUT_SHORT */ 8373 8374 /* 8375 * Currently: 8376 * r0 holds resolved field 8377 * r9 holds object 8378 */ 8379.LOP_IPUT_SHORT_finish: 8380 @bl common_squeak4 8381 mov r1, rINST, lsr #8 @ r1<- A+ 8382 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8383 and r1, r1, #15 @ r1<- A 8384 cmp r9, #0 @ check object for null 8385 GET_VREG(r0, r1) @ r0<- fp[A] 8386 beq common_errNullObject @ object was null 8387 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8388 GET_INST_OPCODE(ip) @ extract opcode from rINST 8389 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8390 GOTO_OPCODE(ip) @ jump to next instruction 8391 8392 8393/* continuation for OP_SGET */ 8394 8395 /* 8396 * Continuation if the field has not yet been resolved. 8397 * r1: BBBB field ref 8398 */ 8399.LOP_SGET_resolve: 8400 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8401 EXPORT_PC() @ resolve() could throw, so export now 8402 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8403 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8404 cmp r0, #0 @ success? 8405 bne .LOP_SGET_finish @ yes, finish 8406 b common_exceptionThrown @ no, handle exception 8407 8408 8409/* continuation for OP_SGET_WIDE */ 8410 8411 /* 8412 * Continuation if the field has not yet been resolved. 8413 * r1: BBBB field ref 8414 */ 8415.LOP_SGET_WIDE_resolve: 8416 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8417 EXPORT_PC() @ resolve() could throw, so export now 8418 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8419 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8420 cmp r0, #0 @ success? 8421 bne .LOP_SGET_WIDE_finish @ yes, finish 8422 b common_exceptionThrown @ no, handle exception 8423 8424 8425/* continuation for OP_SGET_OBJECT */ 8426 8427 /* 8428 * Continuation if the field has not yet been resolved. 8429 * r1: BBBB field ref 8430 */ 8431.LOP_SGET_OBJECT_resolve: 8432 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8433 EXPORT_PC() @ resolve() could throw, so export now 8434 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8435 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8436 cmp r0, #0 @ success? 8437 bne .LOP_SGET_OBJECT_finish @ yes, finish 8438 b common_exceptionThrown @ no, handle exception 8439 8440 8441/* continuation for OP_SGET_BOOLEAN */ 8442 8443 /* 8444 * Continuation if the field has not yet been resolved. 8445 * r1: BBBB field ref 8446 */ 8447.LOP_SGET_BOOLEAN_resolve: 8448 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8449 EXPORT_PC() @ resolve() could throw, so export now 8450 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8451 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8452 cmp r0, #0 @ success? 8453 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 8454 b common_exceptionThrown @ no, handle exception 8455 8456 8457/* continuation for OP_SGET_BYTE */ 8458 8459 /* 8460 * Continuation if the field has not yet been resolved. 8461 * r1: BBBB field ref 8462 */ 8463.LOP_SGET_BYTE_resolve: 8464 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8465 EXPORT_PC() @ resolve() could throw, so export now 8466 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8467 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8468 cmp r0, #0 @ success? 8469 bne .LOP_SGET_BYTE_finish @ yes, finish 8470 b common_exceptionThrown @ no, handle exception 8471 8472 8473/* continuation for OP_SGET_CHAR */ 8474 8475 /* 8476 * Continuation if the field has not yet been resolved. 8477 * r1: BBBB field ref 8478 */ 8479.LOP_SGET_CHAR_resolve: 8480 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8481 EXPORT_PC() @ resolve() could throw, so export now 8482 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8483 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8484 cmp r0, #0 @ success? 8485 bne .LOP_SGET_CHAR_finish @ yes, finish 8486 b common_exceptionThrown @ no, handle exception 8487 8488 8489/* continuation for OP_SGET_SHORT */ 8490 8491 /* 8492 * Continuation if the field has not yet been resolved. 8493 * r1: BBBB field ref 8494 */ 8495.LOP_SGET_SHORT_resolve: 8496 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8497 EXPORT_PC() @ resolve() could throw, so export now 8498 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8499 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8500 cmp r0, #0 @ success? 8501 bne .LOP_SGET_SHORT_finish @ yes, finish 8502 b common_exceptionThrown @ no, handle exception 8503 8504 8505/* continuation for OP_SPUT */ 8506 8507 /* 8508 * Continuation if the field has not yet been resolved. 8509 * r1: BBBB field ref 8510 */ 8511.LOP_SPUT_resolve: 8512 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8513 EXPORT_PC() @ resolve() could throw, so export now 8514 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8515 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8516 cmp r0, #0 @ success? 8517 bne .LOP_SPUT_finish @ yes, finish 8518 b common_exceptionThrown @ no, handle exception 8519 8520 8521/* continuation for OP_SPUT_WIDE */ 8522 8523 /* 8524 * Continuation if the field has not yet been resolved. 8525 * r1: BBBB field ref 8526 * r9: &fp[AA] 8527 */ 8528.LOP_SPUT_WIDE_resolve: 8529 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8530 EXPORT_PC() @ resolve() could throw, so export now 8531 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8532 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8533 cmp r0, #0 @ success? 8534 bne .LOP_SPUT_WIDE_finish @ yes, finish 8535 b common_exceptionThrown @ no, handle exception 8536 8537 8538/* continuation for OP_SPUT_OBJECT */ 8539 8540 /* 8541 * Continuation if the field has not yet been resolved. 8542 * r1: BBBB field ref 8543 */ 8544.LOP_SPUT_OBJECT_resolve: 8545 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8546 EXPORT_PC() @ resolve() could throw, so export now 8547 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8548 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8549 cmp r0, #0 @ success? 8550 bne .LOP_SPUT_OBJECT_finish @ yes, finish 8551 b common_exceptionThrown @ no, handle exception 8552 8553 8554/* continuation for OP_SPUT_BOOLEAN */ 8555 8556 /* 8557 * Continuation if the field has not yet been resolved. 8558 * r1: BBBB field ref 8559 */ 8560.LOP_SPUT_BOOLEAN_resolve: 8561 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8562 EXPORT_PC() @ resolve() could throw, so export now 8563 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8564 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8565 cmp r0, #0 @ success? 8566 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 8567 b common_exceptionThrown @ no, handle exception 8568 8569 8570/* continuation for OP_SPUT_BYTE */ 8571 8572 /* 8573 * Continuation if the field has not yet been resolved. 8574 * r1: BBBB field ref 8575 */ 8576.LOP_SPUT_BYTE_resolve: 8577 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8578 EXPORT_PC() @ resolve() could throw, so export now 8579 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8580 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8581 cmp r0, #0 @ success? 8582 bne .LOP_SPUT_BYTE_finish @ yes, finish 8583 b common_exceptionThrown @ no, handle exception 8584 8585 8586/* continuation for OP_SPUT_CHAR */ 8587 8588 /* 8589 * Continuation if the field has not yet been resolved. 8590 * r1: BBBB field ref 8591 */ 8592.LOP_SPUT_CHAR_resolve: 8593 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8594 EXPORT_PC() @ resolve() could throw, so export now 8595 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8596 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8597 cmp r0, #0 @ success? 8598 bne .LOP_SPUT_CHAR_finish @ yes, finish 8599 b common_exceptionThrown @ no, handle exception 8600 8601 8602/* continuation for OP_SPUT_SHORT */ 8603 8604 /* 8605 * Continuation if the field has not yet been resolved. 8606 * r1: BBBB field ref 8607 */ 8608.LOP_SPUT_SHORT_resolve: 8609 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8610 EXPORT_PC() @ resolve() could throw, so export now 8611 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8612 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8613 cmp r0, #0 @ success? 8614 bne .LOP_SPUT_SHORT_finish @ yes, finish 8615 b common_exceptionThrown @ no, handle exception 8616 8617 8618/* continuation for OP_INVOKE_VIRTUAL */ 8619 8620 /* 8621 * At this point: 8622 * r0 = resolved base method 8623 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8624 */ 8625.LOP_INVOKE_VIRTUAL_continue: 8626 GET_VREG(r1, r10) @ r1<- "this" ptr 8627 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8628 cmp r1, #0 @ is "this" null? 8629 beq common_errNullObject @ null "this", throw exception 8630 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8631 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8632 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8633 bl common_invokeMethodNoRange @ continue on 8634 8635 8636/* continuation for OP_INVOKE_SUPER */ 8637 8638 /* 8639 * At this point: 8640 * r0 = resolved base method 8641 * r9 = method->clazz 8642 */ 8643.LOP_INVOKE_SUPER_continue: 8644 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8645 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8646 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8647 EXPORT_PC() @ must export for invoke 8648 cmp r2, r3 @ compare (methodIndex, vtableCount) 8649 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 8650 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8651 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8652 bl common_invokeMethodNoRange @ continue on 8653 8654.LOP_INVOKE_SUPER_resolve: 8655 mov r0, r9 @ r0<- method->clazz 8656 mov r2, #METHOD_VIRTUAL @ resolver method type 8657 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8658 cmp r0, #0 @ got null? 8659 bne .LOP_INVOKE_SUPER_continue @ no, continue 8660 b common_exceptionThrown @ yes, handle exception 8661 8662 /* 8663 * Throw a NoSuchMethodError with the method name as the message. 8664 * r0 = resolved base method 8665 */ 8666.LOP_INVOKE_SUPER_nsm: 8667 ldr r1, [r0, #offMethod_name] @ r1<- method name 8668 b common_errNoSuchMethod 8669 8670 8671/* continuation for OP_INVOKE_DIRECT */ 8672 8673 /* 8674 * On entry: 8675 * r1 = reference (BBBB or CCCC) 8676 * r10 = "this" register 8677 */ 8678.LOP_INVOKE_DIRECT_resolve: 8679 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8680 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8681 mov r2, #METHOD_DIRECT @ resolver method type 8682 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8683 cmp r0, #0 @ got null? 8684 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8685 bne .LOP_INVOKE_DIRECT_finish @ no, continue 8686 b common_exceptionThrown @ yes, handle exception 8687 8688 8689/* continuation for OP_INVOKE_VIRTUAL_RANGE */ 8690 8691 /* 8692 * At this point: 8693 * r0 = resolved base method 8694 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 8695 */ 8696.LOP_INVOKE_VIRTUAL_RANGE_continue: 8697 GET_VREG(r1, r10) @ r1<- "this" ptr 8698 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8699 cmp r1, #0 @ is "this" null? 8700 beq common_errNullObject @ null "this", throw exception 8701 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 8702 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 8703 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 8704 bl common_invokeMethodRange @ continue on 8705 8706 8707/* continuation for OP_INVOKE_SUPER_RANGE */ 8708 8709 /* 8710 * At this point: 8711 * r0 = resolved base method 8712 * r9 = method->clazz 8713 */ 8714.LOP_INVOKE_SUPER_RANGE_continue: 8715 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 8716 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 8717 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 8718 EXPORT_PC() @ must export for invoke 8719 cmp r2, r3 @ compare (methodIndex, vtableCount) 8720 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 8721 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 8722 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 8723 bl common_invokeMethodRange @ continue on 8724 8725.LOP_INVOKE_SUPER_RANGE_resolve: 8726 mov r0, r9 @ r0<- method->clazz 8727 mov r2, #METHOD_VIRTUAL @ resolver method type 8728 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8729 cmp r0, #0 @ got null? 8730 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 8731 b common_exceptionThrown @ yes, handle exception 8732 8733 /* 8734 * Throw a NoSuchMethodError with the method name as the message. 8735 * r0 = resolved base method 8736 */ 8737.LOP_INVOKE_SUPER_RANGE_nsm: 8738 ldr r1, [r0, #offMethod_name] @ r1<- method name 8739 b common_errNoSuchMethod 8740 8741 8742/* continuation for OP_INVOKE_DIRECT_RANGE */ 8743 8744 /* 8745 * On entry: 8746 * r1 = reference (BBBB or CCCC) 8747 * r10 = "this" register 8748 */ 8749.LOP_INVOKE_DIRECT_RANGE_resolve: 8750 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8751 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8752 mov r2, #METHOD_DIRECT @ resolver method type 8753 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 8754 cmp r0, #0 @ got null? 8755 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 8756 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 8757 b common_exceptionThrown @ yes, handle exception 8758 8759 8760/* continuation for OP_FLOAT_TO_LONG */ 8761/* 8762 * Convert the float in r0 to a long in r0/r1. 8763 * 8764 * We have to clip values to long min/max per the specification. The 8765 * expected common case is a "reasonable" value that converts directly 8766 * to modest integer. The EABI convert function isn't doing this for us. 8767 */ 8768f2l_doconv: 8769 stmfd sp!, {r4, lr} 8770 mov r1, #0x5f000000 @ (float)maxlong 8771 mov r4, r0 8772 bl __aeabi_fcmpge @ is arg >= maxlong? 8773 cmp r0, #0 @ nonzero == yes 8774 mvnne r0, #0 @ return maxlong (7fffffff) 8775 mvnne r1, #0x80000000 8776 ldmnefd sp!, {r4, pc} 8777 8778 mov r0, r4 @ recover arg 8779 mov r1, #0xdf000000 @ (float)minlong 8780 bl __aeabi_fcmple @ is arg <= minlong? 8781 cmp r0, #0 @ nonzero == yes 8782 movne r0, #0 @ return minlong (80000000) 8783 movne r1, #0x80000000 8784 ldmnefd sp!, {r4, pc} 8785 8786 mov r0, r4 @ recover arg 8787 mov r1, r4 8788 bl __aeabi_fcmpeq @ is arg == self? 8789 cmp r0, #0 @ zero == no 8790 moveq r1, #0 @ return zero for NaN 8791 ldmeqfd sp!, {r4, pc} 8792 8793 mov r0, r4 @ recover arg 8794 bl __aeabi_f2lz @ convert float to long 8795 ldmfd sp!, {r4, pc} 8796 8797 8798/* continuation for OP_DOUBLE_TO_LONG */ 8799/* 8800 * Convert the double in r0/r1 to a long in r0/r1. 8801 * 8802 * We have to clip values to long min/max per the specification. The 8803 * expected common case is a "reasonable" value that converts directly 8804 * to modest integer. The EABI convert function isn't doing this for us. 8805 */ 8806d2l_doconv: 8807 stmfd sp!, {r4, r5, lr} @ save regs 8808 mov r3, #0x43000000 @ maxlong, as a double (high word) 8809 add r3, #0x00e00000 @ 0x43e00000 8810 mov r2, #0 @ maxlong, as a double (low word) 8811 sub sp, sp, #4 @ align for EABI 8812 mov r4, r0 @ save a copy of r0 8813 mov r5, r1 @ and r1 8814 bl __aeabi_dcmpge @ is arg >= maxlong? 8815 cmp r0, #0 @ nonzero == yes 8816 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 8817 mvnne r1, #0x80000000 8818 bne 1f 8819 8820 mov r0, r4 @ recover arg 8821 mov r1, r5 8822 mov r3, #0xc3000000 @ minlong, as a double (high word) 8823 add r3, #0x00e00000 @ 0xc3e00000 8824 mov r2, #0 @ minlong, as a double (low word) 8825 bl __aeabi_dcmple @ is arg <= minlong? 8826 cmp r0, #0 @ nonzero == yes 8827 movne r0, #0 @ return minlong (8000000000000000) 8828 movne r1, #0x80000000 8829 bne 1f 8830 8831 mov r0, r4 @ recover arg 8832 mov r1, r5 8833 mov r2, r4 @ compare against self 8834 mov r3, r5 8835 bl __aeabi_dcmpeq @ is arg == self? 8836 cmp r0, #0 @ zero == no 8837 moveq r1, #0 @ return zero for NaN 8838 beq 1f 8839 8840 mov r0, r4 @ recover arg 8841 mov r1, r5 8842 bl __aeabi_d2lz @ convert double to long 8843 88441: 8845 add sp, sp, #4 8846 ldmfd sp!, {r4, r5, pc} 8847 8848 8849/* continuation for OP_MUL_LONG */ 8850 8851.LOP_MUL_LONG_finish: 8852 GET_INST_OPCODE(ip) @ extract opcode from rINST 8853 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 8854 GOTO_OPCODE(ip) @ jump to next instruction 8855 8856 8857/* continuation for OP_SHL_LONG */ 8858 8859.LOP_SHL_LONG_finish: 8860 mov r0, r0, asl r2 @ r0<- r0 << r2 8861 GET_INST_OPCODE(ip) @ extract opcode from rINST 8862 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8863 GOTO_OPCODE(ip) @ jump to next instruction 8864 8865 8866/* continuation for OP_SHR_LONG */ 8867 8868.LOP_SHR_LONG_finish: 8869 mov r1, r1, asr r2 @ r1<- r1 >> r2 8870 GET_INST_OPCODE(ip) @ extract opcode from rINST 8871 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8872 GOTO_OPCODE(ip) @ jump to next instruction 8873 8874 8875/* continuation for OP_USHR_LONG */ 8876 8877.LOP_USHR_LONG_finish: 8878 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 8879 GET_INST_OPCODE(ip) @ extract opcode from rINST 8880 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8881 GOTO_OPCODE(ip) @ jump to next instruction 8882 8883 8884/* continuation for OP_SHL_LONG_2ADDR */ 8885 8886.LOP_SHL_LONG_2ADDR_finish: 8887 GET_INST_OPCODE(ip) @ extract opcode from rINST 8888 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8889 GOTO_OPCODE(ip) @ jump to next instruction 8890 8891 8892/* continuation for OP_SHR_LONG_2ADDR */ 8893 8894.LOP_SHR_LONG_2ADDR_finish: 8895 GET_INST_OPCODE(ip) @ extract opcode from rINST 8896 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8897 GOTO_OPCODE(ip) @ jump to next instruction 8898 8899 8900/* continuation for OP_USHR_LONG_2ADDR */ 8901 8902.LOP_USHR_LONG_2ADDR_finish: 8903 GET_INST_OPCODE(ip) @ extract opcode from rINST 8904 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 8905 GOTO_OPCODE(ip) @ jump to next instruction 8906 8907 8908/* continuation for OP_EXECUTE_INLINE */ 8909 8910 /* 8911 * Extract args, call function. 8912 * r0 = #of args (0-4) 8913 * r10 = call index 8914 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 8915 * 8916 * Other ideas: 8917 * - Use a jump table from the main piece to jump directly into the 8918 * AND/LDR pairs. Costs a data load, saves a branch. 8919 * - Have five separate pieces that do the loading, so we can work the 8920 * interleave a little better. Increases code size. 8921 */ 8922.LOP_EXECUTE_INLINE_continue: 8923 rsb r0, r0, #4 @ r0<- 4-r0 8924 FETCH(r9, 2) @ r9<- FEDC 8925 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 8926 bl common_abort @ (skipped due to ARM prefetch) 89274: and ip, r9, #0xf000 @ isolate F 8928 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 89293: and ip, r9, #0x0f00 @ isolate E 8930 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 89312: and ip, r9, #0x00f0 @ isolate D 8932 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 89331: and ip, r9, #0x000f @ isolate C 8934 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 89350: 8936 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 8937 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 8938 @ (not reached) 8939 8940.LOP_EXECUTE_INLINE_table: 8941 .word gDvmInlineOpsTable 8942 8943 8944/* continuation for OP_EXECUTE_INLINE_RANGE */ 8945 8946 /* 8947 * Extract args, call function. 8948 * r0 = #of args (0-4) 8949 * r10 = call index 8950 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 8951 */ 8952.LOP_EXECUTE_INLINE_RANGE_continue: 8953 rsb r0, r0, #4 @ r0<- 4-r0 8954 FETCH(r9, 2) @ r9<- CCCC 8955 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 8956 bl common_abort @ (skipped due to ARM prefetch) 89574: add ip, r9, #3 @ base+3 8958 GET_VREG(r3, ip) @ r3<- vBase[3] 89593: add ip, r9, #2 @ base+2 8960 GET_VREG(r2, ip) @ r2<- vBase[2] 89612: add ip, r9, #1 @ base+1 8962 GET_VREG(r1, ip) @ r1<- vBase[1] 89631: add ip, r9, #0 @ (nop) 8964 GET_VREG(r0, ip) @ r0<- vBase[0] 89650: 8966 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 8967 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 8968 @ (not reached) 8969 8970.LOP_EXECUTE_INLINE_RANGE_table: 8971 .word gDvmInlineOpsTable 8972 8973 8974 .size dvmAsmSisterStart, .-dvmAsmSisterStart 8975 .global dvmAsmSisterEnd 8976dvmAsmSisterEnd: 8977 8978/* File: armv5te/footer.S */ 8979 8980/* 8981 * =========================================================================== 8982 * Common subroutines and data 8983 * =========================================================================== 8984 */ 8985 8986 8987 8988 .text 8989 .align 2 8990 8991#if defined(WITH_JIT) 8992#if defined(WITH_SELF_VERIFICATION) 8993 .global dvmJitToInterpPunt 8994dvmJitToInterpPunt: 8995 mov r2,#kSVSPunt @ r2<- interpreter entry point 8996 b dvmJitSelfVerificationEnd @ doesn't return 8997 8998 .global dvmJitToInterpSingleStep 8999dvmJitToInterpSingleStep: 9000 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9001 b dvmJitSelfVerificationEnd @ doesn't return 9002 9003 .global dvmJitToTraceSelect 9004dvmJitToTraceSelect: 9005 ldr r0,[lr, #-1] @ pass our target PC 9006 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9007 b dvmJitSelfVerificationEnd @ doesn't return 9008 9009 .global dvmJitToBackwardBranch 9010dvmJitToBackwardBranch: 9011 ldr r0,[lr, #-1] @ pass our target PC 9012 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9013 b dvmJitSelfVerificationEnd @ doesn't return 9014 9015 .global dvmJitToInterpNormal 9016dvmJitToInterpNormal: 9017 ldr r0,[lr, #-1] @ pass our target PC 9018 mov r2,#kSVSNormal @ r2<- interpreter entry point 9019 b dvmJitSelfVerificationEnd @ doesn't return 9020 9021 .global dvmJitToInterpNoChain 9022dvmJitToInterpNoChain: 9023 mov r0,rPC @ pass our target PC 9024 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9025 b dvmJitSelfVerificationEnd @ doesn't return 9026#else 9027/* 9028 * Return from the translation cache to the interpreter when the compiler is 9029 * having issues translating/executing a Dalvik instruction. We have to skip 9030 * the code cache lookup otherwise it is possible to indefinitely bouce 9031 * between the interpreter and the code cache if the instruction that fails 9032 * to be compiled happens to be at a trace start. 9033 */ 9034 .global dvmJitToInterpPunt 9035dvmJitToInterpPunt: 9036 mov rPC, r0 9037#ifdef EXIT_STATS 9038 mov r0,lr 9039 bl dvmBumpPunt; 9040#endif 9041 EXPORT_PC() 9042 adrl rIBASE, dvmAsmInstructionStart 9043 FETCH_INST() 9044 GET_INST_OPCODE(ip) 9045 GOTO_OPCODE(ip) 9046 9047/* 9048 * Return to the interpreter to handle a single instruction. 9049 * On entry: 9050 * r0 <= PC 9051 * r1 <= PC of resume instruction 9052 * lr <= resume point in translation 9053 */ 9054 .global dvmJitToInterpSingleStep 9055dvmJitToInterpSingleStep: 9056 str lr,[rGLUE,#offGlue_jitResume] 9057 str r1,[rGLUE,#offGlue_jitResumePC] 9058 mov r1,#kInterpEntryInstr 9059 @ enum is 4 byte in aapcs-EABI 9060 str r1, [rGLUE, #offGlue_entryPoint] 9061 mov rPC,r0 9062 EXPORT_PC() 9063 adrl rIBASE, dvmAsmInstructionStart 9064 mov r2,#kJitSingleStep @ Ask for single step and then revert 9065 str r2,[rGLUE,#offGlue_jitState] 9066 mov r1,#1 @ set changeInterp to bail to debug interp 9067 b common_gotoBail 9068 9069 9070/* 9071 * Return from the translation cache and immediately request 9072 * a translation for the exit target. Commonly used following 9073 * invokes. 9074 */ 9075 .global dvmJitToTraceSelect 9076dvmJitToTraceSelect: 9077 ldr rPC,[lr, #-1] @ get our target PC 9078 add rINST,lr,#-5 @ save start of chain branch 9079 mov r0,rPC 9080 bl dvmJitGetCodeAddr @ Is there a translation? 9081 cmp r0,#0 9082 beq 2f 9083 mov r1,rINST 9084 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9085 mov r1, rPC @ arg1 of translation may need this 9086 mov lr, #0 @ in case target is HANDLER_INTERPRET 9087 cmp r0,#0 @ successful chain? 9088 bxne r0 @ continue native execution 9089 b toInterpreter @ didn't chain - resume with interpreter 9090 9091/* No translation, so request one if profiling isn't disabled*/ 90922: 9093 adrl rIBASE, dvmAsmInstructionStart 9094 GET_JIT_PROF_TABLE(r0) 9095 FETCH_INST() 9096 cmp r0, #0 9097 bne common_selectTrace 9098 GET_INST_OPCODE(ip) 9099 GOTO_OPCODE(ip) 9100 9101/* 9102 * Return from the translation cache to the interpreter. 9103 * The return was done with a BLX from thumb mode, and 9104 * the following 32-bit word contains the target rPC value. 9105 * Note that lr (r14) will have its low-order bit set to denote 9106 * its thumb-mode origin. 9107 * 9108 * We'll need to stash our lr origin away, recover the new 9109 * target and then check to see if there is a translation available 9110 * for our new target. If so, we do a translation chain and 9111 * go back to native execution. Otherwise, it's back to the 9112 * interpreter (after treating this entry as a potential 9113 * trace start). 9114 */ 9115 .global dvmJitToInterpNormal 9116dvmJitToInterpNormal: 9117 ldr rPC,[lr, #-1] @ get our target PC 9118 add rINST,lr,#-5 @ save start of chain branch 9119#ifdef EXIT_STATS 9120 bl dvmBumpNormal 9121#endif 9122 mov r0,rPC 9123 bl dvmJitGetCodeAddr @ Is there a translation? 9124 cmp r0,#0 9125 beq toInterpreter @ go if not, otherwise do chain 9126 mov r1,rINST 9127 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 9128 mov r1, rPC @ arg1 of translation may need this 9129 mov lr, #0 @ in case target is HANDLER_INTERPRET 9130 cmp r0,#0 @ successful chain? 9131 bxne r0 @ continue native execution 9132 b toInterpreter @ didn't chain - resume with interpreter 9133 9134/* 9135 * Return from the translation cache to the interpreter to do method invocation. 9136 * Check if translation exists for the callee, but don't chain to it. 9137 */ 9138 .global dvmJitToInterpNoChain 9139dvmJitToInterpNoChain: 9140#ifdef EXIT_STATS 9141 bl dvmBumpNoChain 9142#endif 9143 mov r0,rPC 9144 bl dvmJitGetCodeAddr @ Is there a translation? 9145 mov r1, rPC @ arg1 of translation may need this 9146 mov lr, #0 @ in case target is HANDLER_INTERPRET 9147 cmp r0,#0 9148 bxne r0 @ continue native execution if so 9149#endif 9150 9151/* 9152 * No translation, restore interpreter regs and start interpreting. 9153 * rGLUE & rFP were preserved in the translated code, and rPC has 9154 * already been restored by the time we get here. We'll need to set 9155 * up rIBASE & rINST, and load the address of the JitTable into r0. 9156 */ 9157toInterpreter: 9158 EXPORT_PC() 9159 adrl rIBASE, dvmAsmInstructionStart 9160 FETCH_INST() 9161 GET_JIT_PROF_TABLE(r0) 9162 @ NOTE: intended fallthrough 9163/* 9164 * Common code to update potential trace start counter, and initiate 9165 * a trace-build if appropriate. On entry, rPC should point to the 9166 * next instruction to execute, and rINST should be already loaded with 9167 * the next opcode word, and r0 holds a pointer to the jit profile 9168 * table (pJitProfTable). 9169 */ 9170common_testUpdateProfile: 9171 cmp r0,#0 9172 GET_INST_OPCODE(ip) 9173 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 9174 9175common_updateProfile: 9176 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 9177 lsl r3,r3,#23 @ shift out excess 511 9178 ldrb r1,[r0,r3,lsr #23] @ get counter 9179 GET_INST_OPCODE(ip) 9180 subs r1,r1,#1 @ decrement counter 9181 strb r1,[r0,r3,lsr #23] @ and store it 9182 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 9183 9184/* 9185 * Here, we switch to the debug interpreter to request 9186 * trace selection. First, though, check to see if there 9187 * is already a native translation in place (and, if so, 9188 * jump to it now). 9189 */ 9190 GET_JIT_THRESHOLD(r1) 9191 strb r1,[r0,r3,lsr #23] @ reset counter 9192 EXPORT_PC() 9193 mov r0,rPC 9194 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 9195 mov r1, rPC @ arg1 of translation may need this 9196 mov lr, #0 @ in case target is HANDLER_INTERPRET 9197 cmp r0,#0 9198#if !defined(WITH_SELF_VERIFICATION) 9199 bxne r0 @ jump to the translation 9200#else 9201 beq common_selectTrace 9202 /* 9203 * At this point, we have a target translation. However, if 9204 * that translation is actually the interpret-only pseudo-translation 9205 * we want to treat it the same as no translation. 9206 */ 9207 mov r10, r0 @ save target 9208 bl dvmCompilerGetInterpretTemplate 9209 cmp r0, r10 @ special case? 9210 bne dvmJitSelfVerificationStart @ set up self verification 9211 GET_INST_OPCODE(ip) 9212 GOTO_OPCODE(ip) 9213 /* no return */ 9214#endif 9215 9216common_selectTrace: 9217 mov r2,#kJitTSelectRequest @ ask for trace selection 9218 str r2,[rGLUE,#offGlue_jitState] 9219 mov r2,#kInterpEntryInstr @ normal entry reason 9220 str r2,[rGLUE,#offGlue_entryPoint] 9221 mov r1,#1 @ set changeInterp 9222 b common_gotoBail 9223 9224#if defined(WITH_SELF_VERIFICATION) 9225/* 9226 * Save PC and registers to shadow memory for self verification mode 9227 * before jumping to native translation. 9228 * On entry, r10 contains the address of the target translation. 9229 */ 9230dvmJitSelfVerificationStart: 9231 mov r0,rPC @ r0<- program counter 9232 mov r1,rFP @ r1<- frame pointer 9233 mov r2,rGLUE @ r2<- InterpState pointer 9234 mov r3,r10 @ r3<- target translation 9235 bl dvmSelfVerificationSaveState @ save registers to shadow space 9236 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 9237 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 9238 bx r10 @ jump to the translation 9239 9240/* 9241 * Restore PC, registers, and interpState to original values 9242 * before jumping back to the interpreter. 9243 */ 9244dvmJitSelfVerificationEnd: 9245 mov r1,rFP @ pass ending fp 9246 bl dvmSelfVerificationRestoreState @ restore pc and fp values 9247 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 9248 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 9249 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 9250 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 9251 cmp r1,#0 @ check for punt condition 9252 beq 1f 9253 mov r2,#kJitSelfVerification @ ask for self verification 9254 str r2,[rGLUE,#offGlue_jitState] 9255 mov r2,#kInterpEntryInstr @ normal entry reason 9256 str r2,[rGLUE,#offGlue_entryPoint] 9257 mov r1,#1 @ set changeInterp 9258 b common_gotoBail 9259 92601: @ exit to interpreter without check 9261 EXPORT_PC() 9262 adrl rIBASE, dvmAsmInstructionStart 9263 FETCH_INST() 9264 GET_INST_OPCODE(ip) 9265 GOTO_OPCODE(ip) 9266#endif 9267 9268#endif 9269 9270/* 9271 * Common code when a backward branch is taken. 9272 * 9273 * On entry: 9274 * r9 is PC adjustment *in bytes* 9275 */ 9276common_backwardBranch: 9277 mov r0, #kInterpEntryInstr 9278 bl common_periodicChecks 9279#if defined(WITH_JIT) 9280 GET_JIT_PROF_TABLE(r0) 9281 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9282 cmp r0,#0 9283 bne common_updateProfile 9284 GET_INST_OPCODE(ip) 9285 GOTO_OPCODE(ip) 9286#else 9287 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 9288 GET_INST_OPCODE(ip) @ extract opcode from rINST 9289 GOTO_OPCODE(ip) @ jump to next instruction 9290#endif 9291 9292 9293/* 9294 * Need to see if the thread needs to be suspended or debugger/profiler 9295 * activity has begun. 9296 * 9297 * TODO: if JDWP isn't running, zero out pDebuggerActive pointer so we don't 9298 * have to do the second ldr. 9299 * 9300 * TODO: reduce this so we're just checking a single location. 9301 * 9302 * On entry: 9303 * r0 is reentry type, e.g. kInterpEntryInstr 9304 * r9 is trampoline PC adjustment *in bytes* 9305 */ 9306common_periodicChecks: 9307 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 9308 9309 @ speculatively store r0 before it is clobbered by dvmCheckSuspendPending 9310 str r0, [rGLUE, #offGlue_entryPoint] 9311 9312#if defined(WITH_DEBUGGER) 9313 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 9314#endif 9315#if defined(WITH_PROFILER) 9316 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 9317#endif 9318 9319 ldr r3, [r3] @ r3<- suspendCount (int) 9320 9321#if defined(WITH_DEBUGGER) 9322 ldrb r1, [r1] @ r1<- debuggerActive (boolean) 9323#endif 9324#if defined (WITH_PROFILER) 9325 ldr r2, [r2] @ r2<- activeProfilers (int) 9326#endif 9327 9328 cmp r3, #0 @ suspend pending? 9329 bne 2f @ yes, do full suspension check 9330 9331#if defined(WITH_DEBUGGER) || defined(WITH_PROFILER) 9332# if defined(WITH_DEBUGGER) && defined(WITH_PROFILER) 9333 orrs r1, r1, r2 @ r1<- r1 | r2 9334 cmp r1, #0 @ debugger attached or profiler started? 9335# elif defined(WITH_DEBUGGER) 9336 cmp r1, #0 @ debugger attached? 9337# elif defined(WITH_PROFILER) 9338 cmp r2, #0 @ profiler started? 9339# endif 9340 bne 3f @ debugger/profiler, switch interp 9341#endif 9342 9343 bx lr @ nothing to do, return 9344 93452: @ check suspend 9346 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 9347 EXPORT_PC() @ need for precise GC 9348 b dvmCheckSuspendPending @ suspend if necessary, then return 9349 93503: @ debugger/profiler enabled, bail out 9351 add rPC, rPC, r9 @ update rPC 9352 mov r1, #1 @ "want switch" = true 9353 b common_gotoBail 9354 9355 9356/* 9357 * The equivalent of "goto bail", this calls through the "bail handler". 9358 * 9359 * State registers will be saved to the "glue" area before bailing. 9360 * 9361 * On entry: 9362 * r1 is "bool changeInterp", indicating if we want to switch to the 9363 * other interpreter or just bail all the way out 9364 */ 9365common_gotoBail: 9366 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9367 mov r0, rGLUE @ r0<- glue ptr 9368 b dvmMterpStdBail @ call(glue, changeInterp) 9369 9370 @add r1, r1, #1 @ using (boolean+1) 9371 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 9372 @bl _longjmp @ does not return 9373 @bl common_abort 9374 9375 9376/* 9377 * Common code for method invocation with range. 9378 * 9379 * On entry: 9380 * r0 is "Method* methodToCall", the method we're trying to call 9381 */ 9382common_invokeMethodRange: 9383.LinvokeNewRange: 9384 @ prepare to copy args to "outs" area of current frame 9385 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 9386 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9387 beq .LinvokeArgsDone @ if no args, skip the rest 9388 FETCH(r1, 2) @ r1<- CCCC 9389 9390 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 9391 @ (very few methods have > 10 args; could unroll for common cases) 9392 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 9393 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 9394 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 93951: ldr r1, [r3], #4 @ val = *fp++ 9396 subs r2, r2, #1 @ count-- 9397 str r1, [r10], #4 @ *outs++ = val 9398 bne 1b @ ...while count != 0 9399 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9400 b .LinvokeArgsDone 9401 9402/* 9403 * Common code for method invocation without range. 9404 * 9405 * On entry: 9406 * r0 is "Method* methodToCall", the method we're trying to call 9407 */ 9408common_invokeMethodNoRange: 9409.LinvokeNewNoRange: 9410 @ prepare to copy args to "outs" area of current frame 9411 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 9412 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 9413 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 9414 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 9415 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 9416 beq .LinvokeArgsDone 9417 9418 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 9419.LinvokeNonRange: 9420 rsb r2, r2, #5 @ r2<- 5-r2 9421 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 9422 bl common_abort @ (skipped due to ARM prefetch) 94235: and ip, rINST, #0x0f00 @ isolate A 9424 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 9425 mov r0, r0 @ nop 9426 str r2, [r10, #-4]! @ *--outs = vA 94274: and ip, r1, #0xf000 @ isolate G 9428 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 9429 mov r0, r0 @ nop 9430 str r2, [r10, #-4]! @ *--outs = vG 94313: and ip, r1, #0x0f00 @ isolate F 9432 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 9433 mov r0, r0 @ nop 9434 str r2, [r10, #-4]! @ *--outs = vF 94352: and ip, r1, #0x00f0 @ isolate E 9436 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 9437 mov r0, r0 @ nop 9438 str r2, [r10, #-4]! @ *--outs = vE 94391: and ip, r1, #0x000f @ isolate D 9440 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 9441 mov r0, r0 @ nop 9442 str r2, [r10, #-4]! @ *--outs = vD 94430: @ fall through to .LinvokeArgsDone 9444 9445.LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 9446 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 9447 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 9448 @ find space for the new stack frame, check for overflow 9449 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 9450 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 9451 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 9452@ bl common_dumpRegs 9453 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 9454 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 9455 cmp r3, r9 @ bottom < interpStackEnd? 9456 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 9457 blt .LstackOverflow @ yes, this frame will overflow stack 9458 9459 @ set up newSaveArea 9460#ifdef EASY_GDB 9461 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 9462 str ip, [r10, #offStackSaveArea_prevSave] 9463#endif 9464 str rFP, [r10, #offStackSaveArea_prevFrame] 9465 str rPC, [r10, #offStackSaveArea_savedPc] 9466#if defined(WITH_JIT) 9467 mov r9, #0 9468 str r9, [r10, #offStackSaveArea_returnAddr] 9469#endif 9470 str r0, [r10, #offStackSaveArea_method] 9471 tst r3, #ACC_NATIVE 9472 bne .LinvokeNative 9473 9474 /* 9475 stmfd sp!, {r0-r3} 9476 bl common_printNewline 9477 mov r0, rFP 9478 mov r1, #0 9479 bl dvmDumpFp 9480 ldmfd sp!, {r0-r3} 9481 stmfd sp!, {r0-r3} 9482 mov r0, r1 9483 mov r1, r10 9484 bl dvmDumpFp 9485 bl common_printNewline 9486 ldmfd sp!, {r0-r3} 9487 */ 9488 9489 ldrh r9, [r2] @ r9 <- load INST from new PC 9490 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 9491 mov rPC, r2 @ publish new rPC 9492 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 9493 9494 @ Update "glue" values for the new method 9495 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 9496 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 9497 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 9498#if defined(WITH_JIT) 9499 GET_JIT_PROF_TABLE(r0) 9500 mov rFP, r1 @ fp = newFp 9501 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9502 mov rINST, r9 @ publish new rINST 9503 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9504 cmp r0,#0 9505 bne common_updateProfile 9506 GOTO_OPCODE(ip) @ jump to next instruction 9507#else 9508 mov rFP, r1 @ fp = newFp 9509 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 9510 mov rINST, r9 @ publish new rINST 9511 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 9512 GOTO_OPCODE(ip) @ jump to next instruction 9513#endif 9514 9515.LinvokeNative: 9516 @ Prep for the native call 9517 @ r0=methodToCall, r1=newFp, r10=newSaveArea 9518 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9519 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 9520 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 9521 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 9522 mov r9, r3 @ r9<- glue->self (preserve) 9523 9524 mov r2, r0 @ r2<- methodToCall 9525 mov r0, r1 @ r0<- newFp (points to args) 9526 add r1, rGLUE, #offGlue_retval @ r1<- &retval 9527 9528#ifdef ASSIST_DEBUGGER 9529 /* insert fake function header to help gdb find the stack frame */ 9530 b .Lskip 9531 .type dalvik_mterp, %function 9532dalvik_mterp: 9533 .fnstart 9534 MTERP_ENTRY1 9535 MTERP_ENTRY2 9536.Lskip: 9537#endif 9538 9539 @mov lr, pc @ set return addr 9540 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 9541 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 9542 9543 @ native return; r9=self, r10=newSaveArea 9544 @ equivalent to dvmPopJniLocals 9545 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 9546 ldr r1, [r9, #offThread_exception] @ check for exception 9547 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 9548 cmp r1, #0 @ null? 9549 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 9550 bne common_exceptionThrown @ no, handle exception 9551 9552 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 9553 GET_INST_OPCODE(ip) @ extract opcode from rINST 9554 GOTO_OPCODE(ip) @ jump to next instruction 9555 9556.LstackOverflow: @ r0=methodToCall 9557 mov r1, r0 @ r1<- methodToCall 9558 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 9559 bl dvmHandleStackOverflow 9560 b common_exceptionThrown 9561#ifdef ASSIST_DEBUGGER 9562 .fnend 9563#endif 9564 9565 9566 /* 9567 * Common code for method invocation, calling through "glue code". 9568 * 9569 * TODO: now that we have range and non-range invoke handlers, this 9570 * needs to be split into two. Maybe just create entry points 9571 * that set r9 and jump here? 9572 * 9573 * On entry: 9574 * r0 is "Method* methodToCall", the method we're trying to call 9575 * r9 is "bool methodCallRange", indicating if this is a /range variant 9576 */ 9577 .if 0 9578.LinvokeOld: 9579 sub sp, sp, #8 @ space for args + pad 9580 FETCH(ip, 2) @ ip<- FEDC or CCCC 9581 mov r2, r0 @ A2<- methodToCall 9582 mov r0, rGLUE @ A0<- glue 9583 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 9584 mov r1, r9 @ A1<- methodCallRange 9585 mov r3, rINST, lsr #8 @ A3<- AA 9586 str ip, [sp, #0] @ A4<- ip 9587 bl dvmMterp_invokeMethod @ call the C invokeMethod 9588 add sp, sp, #8 @ remove arg area 9589 b common_resumeAfterGlueCall @ continue to next instruction 9590 .endif 9591 9592 9593 9594/* 9595 * Common code for handling a return instruction. 9596 * 9597 * This does not return. 9598 */ 9599common_returnFromMethod: 9600.LreturnNew: 9601 mov r0, #kInterpEntryReturn 9602 mov r9, #0 9603 bl common_periodicChecks 9604 9605 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 9606 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 9607 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 9608 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 9609 @ r2<- method we're returning to 9610 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 9611 cmp r2, #0 @ is this a break frame? 9612 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 9613 mov r1, #0 @ "want switch" = false 9614 beq common_gotoBail @ break frame, bail out completely 9615 9616 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 9617 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 9618 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 9619 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 9620#if defined(WITH_JIT) 9621 ldr r3, [r0, #offStackSaveArea_returnAddr] @ r3 = saveArea->returnAddr 9622 GET_JIT_PROF_TABLE(r0) 9623 mov rPC, r9 @ publish new rPC 9624 str r1, [rGLUE, #offGlue_methodClassDex] 9625 cmp r3, #0 @ caller is compiled code 9626 blxne r3 9627 GET_INST_OPCODE(ip) @ extract opcode from rINST 9628 cmp r0,#0 9629 bne common_updateProfile 9630 GOTO_OPCODE(ip) @ jump to next instruction 9631#else 9632 GET_INST_OPCODE(ip) @ extract opcode from rINST 9633 mov rPC, r9 @ publish new rPC 9634 str r1, [rGLUE, #offGlue_methodClassDex] 9635 GOTO_OPCODE(ip) @ jump to next instruction 9636#endif 9637 9638 /* 9639 * Return handling, calls through "glue code". 9640 */ 9641 .if 0 9642.LreturnOld: 9643 SAVE_PC_FP_TO_GLUE() @ export state 9644 mov r0, rGLUE @ arg to function 9645 bl dvmMterp_returnFromMethod 9646 b common_resumeAfterGlueCall 9647 .endif 9648 9649 9650/* 9651 * Somebody has thrown an exception. Handle it. 9652 * 9653 * If the exception processing code returns to us (instead of falling 9654 * out of the interpreter), continue with whatever the next instruction 9655 * now happens to be. 9656 * 9657 * This does not return. 9658 */ 9659 .global dvmMterpCommonExceptionThrown 9660dvmMterpCommonExceptionThrown: 9661common_exceptionThrown: 9662.LexceptionNew: 9663 mov r0, #kInterpEntryThrow 9664 mov r9, #0 9665 bl common_periodicChecks 9666 9667#if defined(WITH_JIT) 9668 mov r2,#kJitTSelectAbort @ abandon trace selection in progress 9669 str r2,[rGLUE,#offGlue_jitState] 9670#endif 9671 9672 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 9673 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 9674 mov r1, r10 @ r1<- self 9675 mov r0, r9 @ r0<- exception 9676 bl dvmAddTrackedAlloc @ don't let the exception be GCed 9677 mov r3, #0 @ r3<- NULL 9678 str r3, [r10, #offThread_exception] @ self->exception = NULL 9679 9680 /* set up args and a local for "&fp" */ 9681 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 9682 str rFP, [sp, #-4]! @ *--sp = fp 9683 mov ip, sp @ ip<- &fp 9684 mov r3, #0 @ r3<- false 9685 str ip, [sp, #-4]! @ *--sp = &fp 9686 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 9687 mov r0, r10 @ r0<- self 9688 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 9689 mov r2, r9 @ r2<- exception 9690 sub r1, rPC, r1 @ r1<- pc - method->insns 9691 mov r1, r1, asr #1 @ r1<- offset in code units 9692 9693 /* call, r0 gets catchRelPc (a code-unit offset) */ 9694 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 9695 9696 /* fix earlier stack overflow if necessary; may trash rFP */ 9697 ldrb r1, [r10, #offThread_stackOverflowed] 9698 cmp r1, #0 @ did we overflow earlier? 9699 beq 1f @ no, skip ahead 9700 mov rFP, r0 @ save relPc result in rFP 9701 mov r0, r10 @ r0<- self 9702 bl dvmCleanupStackOverflow @ call(self) 9703 mov r0, rFP @ restore result 97041: 9705 9706 /* update frame pointer and check result from dvmFindCatchBlock */ 9707 ldr rFP, [sp, #4] @ retrieve the updated rFP 9708 cmp r0, #0 @ is catchRelPc < 0? 9709 add sp, sp, #8 @ restore stack 9710 bmi .LnotCaughtLocally 9711 9712 /* adjust locals to match self->curFrame and updated PC */ 9713 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 9714 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 9715 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 9716 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 9717 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 9718 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 9719 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 9720 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 9721 9722 /* release the tracked alloc on the exception */ 9723 mov r0, r9 @ r0<- exception 9724 mov r1, r10 @ r1<- self 9725 bl dvmReleaseTrackedAlloc @ release the exception 9726 9727 /* restore the exception if the handler wants it */ 9728 FETCH_INST() @ load rINST from rPC 9729 GET_INST_OPCODE(ip) @ extract opcode from rINST 9730 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 9731 streq r9, [r10, #offThread_exception] @ yes, restore the exception 9732 GOTO_OPCODE(ip) @ jump to next instruction 9733 9734.LnotCaughtLocally: @ r9=exception, r10=self 9735 /* fix stack overflow if necessary */ 9736 ldrb r1, [r10, #offThread_stackOverflowed] 9737 cmp r1, #0 @ did we overflow earlier? 9738 movne r0, r10 @ if yes: r0<- self 9739 blne dvmCleanupStackOverflow @ if yes: call(self) 9740 9741 @ may want to show "not caught locally" debug messages here 9742#if DVM_SHOW_EXCEPTION >= 2 9743 /* call __android_log_print(prio, tag, format, ...) */ 9744 /* "Exception %s from %s:%d not caught locally" */ 9745 @ dvmLineNumFromPC(method, pc - method->insns) 9746 ldr r0, [rGLUE, #offGlue_method] 9747 ldr r1, [r0, #offMethod_insns] 9748 sub r1, rPC, r1 9749 asr r1, r1, #1 9750 bl dvmLineNumFromPC 9751 str r0, [sp, #-4]! 9752 @ dvmGetMethodSourceFile(method) 9753 ldr r0, [rGLUE, #offGlue_method] 9754 bl dvmGetMethodSourceFile 9755 str r0, [sp, #-4]! 9756 @ exception->clazz->descriptor 9757 ldr r3, [r9, #offObject_clazz] 9758 ldr r3, [r3, #offClassObject_descriptor] 9759 @ 9760 ldr r2, strExceptionNotCaughtLocally 9761 ldr r1, strLogTag 9762 mov r0, #3 @ LOG_DEBUG 9763 bl __android_log_print 9764#endif 9765 str r9, [r10, #offThread_exception] @ restore exception 9766 mov r0, r9 @ r0<- exception 9767 mov r1, r10 @ r1<- self 9768 bl dvmReleaseTrackedAlloc @ release the exception 9769 mov r1, #0 @ "want switch" = false 9770 b common_gotoBail @ bail out 9771 9772 9773 /* 9774 * Exception handling, calls through "glue code". 9775 */ 9776 .if 0 9777.LexceptionOld: 9778 SAVE_PC_FP_TO_GLUE() @ export state 9779 mov r0, rGLUE @ arg to function 9780 bl dvmMterp_exceptionThrown 9781 b common_resumeAfterGlueCall 9782 .endif 9783 9784 9785/* 9786 * After returning from a "glued" function, pull out the updated 9787 * values and start executing at the next instruction. 9788 */ 9789common_resumeAfterGlueCall: 9790 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 9791 FETCH_INST() @ load rINST from rPC 9792 GET_INST_OPCODE(ip) @ extract opcode from rINST 9793 GOTO_OPCODE(ip) @ jump to next instruction 9794 9795/* 9796 * Invalid array index. 9797 */ 9798common_errArrayIndex: 9799 EXPORT_PC() 9800 ldr r0, strArrayIndexException 9801 mov r1, #0 9802 bl dvmThrowException 9803 b common_exceptionThrown 9804 9805/* 9806 * Invalid array value. 9807 */ 9808common_errArrayStore: 9809 EXPORT_PC() 9810 ldr r0, strArrayStoreException 9811 mov r1, #0 9812 bl dvmThrowException 9813 b common_exceptionThrown 9814 9815/* 9816 * Integer divide or mod by zero. 9817 */ 9818common_errDivideByZero: 9819 EXPORT_PC() 9820 ldr r0, strArithmeticException 9821 ldr r1, strDivideByZero 9822 bl dvmThrowException 9823 b common_exceptionThrown 9824 9825/* 9826 * Attempt to allocate an array with a negative size. 9827 */ 9828common_errNegativeArraySize: 9829 EXPORT_PC() 9830 ldr r0, strNegativeArraySizeException 9831 mov r1, #0 9832 bl dvmThrowException 9833 b common_exceptionThrown 9834 9835/* 9836 * Invocation of a non-existent method. 9837 */ 9838common_errNoSuchMethod: 9839 EXPORT_PC() 9840 ldr r0, strNoSuchMethodError 9841 mov r1, #0 9842 bl dvmThrowException 9843 b common_exceptionThrown 9844 9845/* 9846 * We encountered a null object when we weren't expecting one. We 9847 * export the PC, throw a NullPointerException, and goto the exception 9848 * processing code. 9849 */ 9850common_errNullObject: 9851 EXPORT_PC() 9852 ldr r0, strNullPointerException 9853 mov r1, #0 9854 bl dvmThrowException 9855 b common_exceptionThrown 9856 9857/* 9858 * For debugging, cause an immediate fault. The source address will 9859 * be in lr (use a bl instruction to jump here). 9860 */ 9861common_abort: 9862 ldr pc, .LdeadFood 9863.LdeadFood: 9864 .word 0xdeadf00d 9865 9866/* 9867 * Spit out a "we were here", preserving all registers. (The attempt 9868 * to save ip won't work, but we need to save an even number of 9869 * registers for EABI 64-bit stack alignment.) 9870 */ 9871 .macro SQUEAK num 9872common_squeak\num: 9873 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9874 ldr r0, strSqueak 9875 mov r1, #\num 9876 bl printf 9877 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9878 bx lr 9879 .endm 9880 9881 SQUEAK 0 9882 SQUEAK 1 9883 SQUEAK 2 9884 SQUEAK 3 9885 SQUEAK 4 9886 SQUEAK 5 9887 9888/* 9889 * Spit out the number in r0, preserving registers. 9890 */ 9891common_printNum: 9892 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9893 mov r1, r0 9894 ldr r0, strSqueak 9895 bl printf 9896 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9897 bx lr 9898 9899/* 9900 * Print a newline, preserving registers. 9901 */ 9902common_printNewline: 9903 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9904 ldr r0, strNewline 9905 bl printf 9906 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9907 bx lr 9908 9909 /* 9910 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 9911 */ 9912common_printHex: 9913 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9914 mov r1, r0 9915 ldr r0, strPrintHex 9916 bl printf 9917 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9918 bx lr 9919 9920/* 9921 * Print the 64-bit quantity in r0-r1, preserving registers. 9922 */ 9923common_printLong: 9924 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9925 mov r3, r1 9926 mov r2, r0 9927 ldr r0, strPrintLong 9928 bl printf 9929 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9930 bx lr 9931 9932/* 9933 * Print full method info. Pass the Method* in r0. Preserves regs. 9934 */ 9935common_printMethod: 9936 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9937 bl dvmMterpPrintMethod 9938 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9939 bx lr 9940 9941/* 9942 * Call a C helper function that dumps regs and possibly some 9943 * additional info. Requires the C function to be compiled in. 9944 */ 9945 .if 0 9946common_dumpRegs: 9947 stmfd sp!, {r0, r1, r2, r3, ip, lr} 9948 bl dvmMterpDumpArmRegs 9949 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 9950 bx lr 9951 .endif 9952 9953#if 0 9954/* 9955 * Experiment on VFP mode. 9956 * 9957 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 9958 * 9959 * Updates the bits specified by "mask", setting them to the values in "val". 9960 */ 9961setFPSCR: 9962 and r0, r0, r1 @ make sure no stray bits are set 9963 fmrx r2, fpscr @ get VFP reg 9964 mvn r1, r1 @ bit-invert mask 9965 and r2, r2, r1 @ clear masked bits 9966 orr r2, r2, r0 @ set specified bits 9967 fmxr fpscr, r2 @ set VFP reg 9968 mov r0, r2 @ return new value 9969 bx lr 9970 9971 .align 2 9972 .global dvmConfigureFP 9973 .type dvmConfigureFP, %function 9974dvmConfigureFP: 9975 stmfd sp!, {ip, lr} 9976 /* 0x03000000 sets DN/FZ */ 9977 /* 0x00009f00 clears the six exception enable flags */ 9978 bl common_squeak0 9979 mov r0, #0x03000000 @ r0<- 0x03000000 9980 add r1, r0, #0x9f00 @ r1<- 0x03009f00 9981 bl setFPSCR 9982 ldmfd sp!, {ip, pc} 9983#endif 9984 9985 9986/* 9987 * String references, must be close to the code that uses them. 9988 */ 9989 .align 2 9990strArithmeticException: 9991 .word .LstrArithmeticException 9992strArrayIndexException: 9993 .word .LstrArrayIndexException 9994strArrayStoreException: 9995 .word .LstrArrayStoreException 9996strDivideByZero: 9997 .word .LstrDivideByZero 9998strNegativeArraySizeException: 9999 .word .LstrNegativeArraySizeException 10000strNoSuchMethodError: 10001 .word .LstrNoSuchMethodError 10002strNullPointerException: 10003 .word .LstrNullPointerException 10004 10005strLogTag: 10006 .word .LstrLogTag 10007strExceptionNotCaughtLocally: 10008 .word .LstrExceptionNotCaughtLocally 10009 10010strNewline: 10011 .word .LstrNewline 10012strSqueak: 10013 .word .LstrSqueak 10014strPrintHex: 10015 .word .LstrPrintHex 10016strPrintLong: 10017 .word .LstrPrintLong 10018 10019/* 10020 * Zero-terminated ASCII string data. 10021 * 10022 * On ARM we have two choices: do like gcc does, and LDR from a .word 10023 * with the address, or use an ADR pseudo-op to get the address 10024 * directly. ADR saves 4 bytes and an indirection, but it's using a 10025 * PC-relative addressing mode and hence has a limited range, which 10026 * makes it not work well with mergeable string sections. 10027 */ 10028 .section .rodata.str1.4,"aMS",%progbits,1 10029 10030.LstrBadEntryPoint: 10031 .asciz "Bad entry point %d\n" 10032.LstrArithmeticException: 10033 .asciz "Ljava/lang/ArithmeticException;" 10034.LstrArrayIndexException: 10035 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 10036.LstrArrayStoreException: 10037 .asciz "Ljava/lang/ArrayStoreException;" 10038.LstrClassCastException: 10039 .asciz "Ljava/lang/ClassCastException;" 10040.LstrDivideByZero: 10041 .asciz "divide by zero" 10042.LstrFilledNewArrayNotImpl: 10043 .asciz "filled-new-array only implemented for objects and 'int'" 10044.LstrInternalError: 10045 .asciz "Ljava/lang/InternalError;" 10046.LstrInstantiationError: 10047 .asciz "Ljava/lang/InstantiationError;" 10048.LstrNegativeArraySizeException: 10049 .asciz "Ljava/lang/NegativeArraySizeException;" 10050.LstrNoSuchMethodError: 10051 .asciz "Ljava/lang/NoSuchMethodError;" 10052.LstrNullPointerException: 10053 .asciz "Ljava/lang/NullPointerException;" 10054 10055.LstrLogTag: 10056 .asciz "mterp" 10057.LstrExceptionNotCaughtLocally: 10058 .asciz "Exception %s from %s:%d not caught locally\n" 10059 10060.LstrNewline: 10061 .asciz "\n" 10062.LstrSqueak: 10063 .asciz "<%d>" 10064.LstrPrintHex: 10065 .asciz "<0x%x>" 10066.LstrPrintLong: 10067 .asciz "<%lld>" 10068 10069 10070